21#include "llvm/ADT/DenseMap.h"
22#include "llvm/IR/DataLayout.h"
23#include "llvm/IR/Intrinsics.h"
35 CharUnits AtomicAlign;
42 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
43 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45 assert(!lvalue.isGlobalReg());
47 if (lvalue.isSimple()) {
48 AtomicTy = lvalue.getType();
49 if (
auto *ATy = AtomicTy->
getAs<AtomicType>())
50 ValueTy = ATy->getValueType();
57 TypeInfo ValueTI =
C.getTypeInfo(ValueTy);
58 ValueSizeInBits = ValueTI.
Width;
59 ValueAlignInBits = ValueTI.
Align;
61 TypeInfo AtomicTI =
C.getTypeInfo(AtomicTy);
62 AtomicSizeInBits = AtomicTI.
Width;
63 AtomicAlignInBits = AtomicTI.
Align;
65 assert(ValueSizeInBits <= AtomicSizeInBits);
66 assert(ValueAlignInBits <= AtomicAlignInBits);
68 AtomicAlign =
C.toCharUnitsFromBits(AtomicAlignInBits);
69 ValueAlign =
C.toCharUnitsFromBits(ValueAlignInBits);
70 if (lvalue.getAlignment().isZero())
71 lvalue.setAlignment(AtomicAlign);
74 }
else if (lvalue.isBitField()) {
75 ValueTy = lvalue.getType();
76 ValueSizeInBits =
C.getTypeSize(ValueTy);
77 auto &OrigBFI = lvalue.getBitFieldInfo();
78 auto Offset = OrigBFI.Offset %
C.toBits(lvalue.getAlignment());
79 AtomicSizeInBits =
C.toBits(
80 C.toCharUnitsFromBits(Offset + OrigBFI.Size +
C.getCharWidth() - 1)
81 .alignTo(lvalue.getAlignment()));
82 llvm::Value *BitFieldPtr = lvalue.getRawBitFieldPointer(CGF);
84 (
C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
85 lvalue.getAlignment();
86 llvm::Value *StoragePtr = CGF.
Builder.CreateConstGEP1_64(
87 CGF.
Int8Ty, BitFieldPtr, OffsetInChars.getQuantity());
94 llvm::Type *StorageTy = CGF.
Builder.getIntNTy(AtomicSizeInBits);
95 LVal = LValue::MakeBitfield(
96 Address(StoragePtr, StorageTy, lvalue.getAlignment()), BFI,
97 lvalue.getType(), lvalue.getBaseInfo(), lvalue.getTBAAInfo());
98 AtomicTy =
C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
102 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
103 AtomicTy =
C.getConstantArrayType(
C.CharTy, Size,
nullptr,
104 ArraySizeModifier::Normal,
107 AtomicAlign = ValueAlign = lvalue.getAlignment();
108 }
else if (lvalue.isVectorElt()) {
109 ValueTy = lvalue.getType()->
castAs<VectorType>()->getElementType();
110 ValueSizeInBits =
C.getTypeSize(ValueTy);
111 AtomicTy = lvalue.getType();
112 AtomicSizeInBits =
C.getTypeSize(AtomicTy);
113 AtomicAlign = ValueAlign = lvalue.getAlignment();
116 assert(lvalue.isExtVectorElt());
117 ValueTy = lvalue.getType();
118 ValueSizeInBits =
C.getTypeSize(ValueTy);
121 lvalue.getExtVectorAddress().getElementType())
123 AtomicSizeInBits =
C.getTypeSize(AtomicTy);
124 AtomicAlign = ValueAlign = lvalue.getAlignment();
127 UseLibcall = !
C.getTargetInfo().hasBuiltinAtomic(
128 AtomicSizeInBits,
C.toBits(lvalue.getAlignment()));
131 QualType getAtomicType()
const {
return AtomicTy; }
132 QualType getValueType()
const {
return ValueTy; }
133 CharUnits getAtomicAlignment()
const {
return AtomicAlign; }
134 uint64_t getAtomicSizeInBits()
const {
return AtomicSizeInBits; }
135 uint64_t getValueSizeInBits()
const {
return ValueSizeInBits; }
137 bool shouldUseLibcall()
const {
return UseLibcall; }
138 const LValue &getAtomicLValue()
const {
return LVal; }
139 llvm::Value *getAtomicPointer()
const {
141 return LVal.emitRawPointer(CGF);
142 else if (LVal.isBitField())
143 return LVal.getRawBitFieldPointer(CGF);
144 else if (LVal.isVectorElt())
145 return LVal.getRawVectorPointer(CGF);
146 assert(LVal.isExtVectorElt());
147 return LVal.getRawExtVectorPointer(CGF);
149 Address getAtomicAddress()
const {
152 ElTy = LVal.getAddress().getElementType();
153 else if (LVal.isBitField())
154 ElTy = LVal.getBitFieldAddress().getElementType();
155 else if (LVal.isVectorElt())
156 ElTy = LVal.getVectorAddress().getElementType();
158 ElTy = LVal.getExtVectorAddress().getElementType();
159 return Address(getAtomicPointer(), ElTy, getAtomicAlignment());
162 Address getAtomicAddressAsAtomicIntPointer()
const {
163 return castToAtomicIntPointer(getAtomicAddress());
172 bool hasPadding()
const {
173 return (ValueSizeInBits != AtomicSizeInBits);
176 bool emitMemSetZeroIfNecessary()
const;
178 llvm::Value *getAtomicSizeValue()
const {
185 Address castToAtomicIntPointer(Address
Addr)
const;
190 Address convertToAtomicIntPointer(Address
Addr)
const;
193 RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
194 SourceLocation loc,
bool AsValue)
const;
196 llvm::Value *getScalarRValValueOrNull(RValue RVal)
const;
199 llvm::Value *convertRValueToInt(RValue RVal,
bool CmpXchg =
false)
const;
201 RValue ConvertToValueOrAtomic(llvm::Value *IntVal, AggValueSlot ResultSlot,
202 SourceLocation Loc,
bool AsValue,
203 bool CmpXchg =
false)
const;
206 void emitCopyIntoMemory(RValue rvalue)
const;
209 LValue projectValue()
const {
210 assert(LVal.isSimple());
211 Address addr = getAtomicAddress();
215 return LValue::MakeAddr(addr, getValueType(), CGF.
getContext(),
216 LVal.getBaseInfo(), LVal.getTBAAInfo());
221 RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
222 bool AsValue, llvm::AtomicOrdering AO,
233 std::pair<RValue, llvm::Value *>
234 EmitAtomicCompareExchange(RValue Expected, RValue Desired,
236 llvm::AtomicOrdering::SequentiallyConsistent,
237 llvm::AtomicOrdering Failure =
238 llvm::AtomicOrdering::SequentiallyConsistent,
239 bool IsWeak =
false);
244 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
245 const llvm::function_ref<RValue(RValue)> &UpdateOp,
249 void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
253 Address materializeRValue(RValue rvalue)
const;
256 Address CreateTempAlloca()
const;
258 bool requiresMemSetZero(llvm::Type *
type)
const;
262 void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
263 llvm::AtomicOrdering AO,
bool IsVolatile);
265 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO,
bool IsVolatile,
266 bool CmpXchg =
false);
268 llvm::Value *EmitAtomicCompareExchangeLibcall(
269 llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
271 llvm::AtomicOrdering::SequentiallyConsistent,
272 llvm::AtomicOrdering Failure =
273 llvm::AtomicOrdering::SequentiallyConsistent);
275 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
276 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
278 llvm::AtomicOrdering::SequentiallyConsistent,
279 llvm::AtomicOrdering Failure =
280 llvm::AtomicOrdering::SequentiallyConsistent,
281 bool IsWeak =
false);
284 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
285 const llvm::function_ref<RValue(RValue)> &UpdateOp,
288 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
289 const llvm::function_ref<RValue(RValue)> &UpdateOp,
292 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
295 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
300Address AtomicInfo::CreateTempAlloca()
const {
302 (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
304 getAtomicAlignment(),
307 if (LVal.isBitField())
309 TempAlloca, getAtomicAddress().
getType(),
310 getAtomicAddress().getElementType());
322 fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
323 fnAttrB.addAttribute(llvm::Attribute::WillReturn);
324 llvm::AttributeList fnAttrs = llvm::AttributeList::get(
325 CGF.
getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);
327 llvm::FunctionCallee fn =
335 uint64_t expectedSize) {
342bool AtomicInfo::requiresMemSetZero(llvm::Type *
type)
const {
344 if (hasPadding())
return true;
347 switch (getEvaluationKind()) {
354 AtomicSizeInBits / 2);
360 llvm_unreachable(
"bad evaluation kind");
363bool AtomicInfo::emitMemSetZeroIfNecessary()
const {
364 assert(LVal.isSimple());
365 Address addr = LVal.getAddress();
372 LVal.getAlignment().getAsAlign());
379 uint64_t Size, llvm::AtomicOrdering SuccessOrder,
380 llvm::AtomicOrdering FailureOrder,
381 llvm::SyncScope::ID
Scope) {
389 Pair->setWeak(IsWeak);
394 llvm::Value *Old = CGF.
Builder.CreateExtractValue(Pair, 0);
395 llvm::Value *
Cmp = CGF.
Builder.CreateExtractValue(Pair, 1);
399 llvm::BasicBlock *StoreExpectedBB =
404 llvm::BasicBlock *ContinueBB =
409 CGF.
Builder.CreateCondBr(
Cmp, ContinueBB, StoreExpectedBB);
411 CGF.
Builder.SetInsertPoint(StoreExpectedBB);
415 uint64_t ExpectedSizeInBytes = DL.getTypeStoreSize(
ExpectedType);
417 if (ExpectedSizeInBytes == Size) {
423 llvm::Type *OldType = Old->getType();
439 CGF.
Builder.CreateBr(ContinueBB);
441 CGF.
Builder.SetInsertPoint(ContinueBB);
452 llvm::Value *FailureOrderVal, uint64_t Size,
453 llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID
Scope) {
454 llvm::AtomicOrdering FailureOrder;
455 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
456 auto FOS = FO->getSExtValue();
457 if (!llvm::isValidAtomicOrderingCABI(FOS))
458 FailureOrder = llvm::AtomicOrdering::Monotonic;
460 switch ((llvm::AtomicOrderingCABI)FOS) {
461 case llvm::AtomicOrderingCABI::relaxed:
464 case llvm::AtomicOrderingCABI::release:
465 case llvm::AtomicOrderingCABI::acq_rel:
466 FailureOrder = llvm::AtomicOrdering::Monotonic;
468 case llvm::AtomicOrderingCABI::consume:
469 case llvm::AtomicOrderingCABI::acquire:
470 FailureOrder = llvm::AtomicOrdering::Acquire;
472 case llvm::AtomicOrderingCABI::seq_cst:
473 FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
481 Size, SuccessOrder, FailureOrder,
Scope);
494 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
496 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
498 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
500 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
504 CGF.
Builder.SetInsertPoint(MonotonicBB);
506 SuccessOrder, llvm::AtomicOrdering::Monotonic,
Scope);
509 CGF.
Builder.SetInsertPoint(AcquireBB);
511 SuccessOrder, llvm::AtomicOrdering::Acquire,
Scope);
514 CGF.
Builder.SetInsertPoint(SeqCstBB);
516 SuccessOrder, llvm::AtomicOrdering::SequentiallyConsistent,
520 CGF.
Builder.SetInsertPoint(ContBB);
530 const bool IsFP = OldVal->getType()->isFloatingPointTy();
533 llvm::Intrinsic::ID IID = (Op == AtomicExpr::AO__atomic_max_fetch ||
534 Op == AtomicExpr::AO__scoped_atomic_max_fetch)
535 ? llvm::Intrinsic::maxnum
536 : llvm::Intrinsic::minnum;
538 return Builder.CreateBinaryIntrinsic(IID, OldVal, RHS, llvm::FMFSource(),
542 llvm::CmpInst::Predicate Pred;
545 llvm_unreachable(
"Unexpected min/max operation");
546 case AtomicExpr::AO__atomic_max_fetch:
547 case AtomicExpr::AO__scoped_atomic_max_fetch:
548 Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
550 case AtomicExpr::AO__atomic_min_fetch:
551 case AtomicExpr::AO__scoped_atomic_min_fetch:
552 Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
555 llvm::Value *
Cmp = Builder.CreateICmp(Pred, OldVal, RHS,
"tst");
556 return Builder.CreateSelect(
Cmp, OldVal, RHS,
"newval");
561 Address ExpectedResult, llvm::Value *IsWeak,
562 llvm::Value *FailureOrder, uint64_t Size,
563 llvm::AtomicOrdering Order,
564 llvm::SyncScope::ID
Scope) {
565 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
566 bool PostOpMinMax =
false;
569 switch (E->
getOp()) {
570 case AtomicExpr::AO__c11_atomic_init:
571 case AtomicExpr::AO__opencl_atomic_init:
572 llvm_unreachable(
"Already handled!");
574 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
575 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
576 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
578 ExpectedResult, FailureOrder, Size, Order,
581 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
582 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
583 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
585 ExpectedResult, FailureOrder, Size, Order,
588 case AtomicExpr::AO__atomic_compare_exchange:
589 case AtomicExpr::AO__atomic_compare_exchange_n:
590 case AtomicExpr::AO__scoped_atomic_compare_exchange:
591 case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {
592 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
594 Val1, Val2, ExpectedResult, FailureOrder,
598 llvm::BasicBlock *StrongBB =
601 llvm::BasicBlock *ContBB =
604 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(IsWeak, WeakBB);
605 SI->addCase(CGF.
Builder.getInt1(
false), StrongBB);
607 CGF.
Builder.SetInsertPoint(StrongBB);
609 ExpectedResult, FailureOrder, Size, Order,
613 CGF.
Builder.SetInsertPoint(WeakBB);
615 ExpectedResult, FailureOrder, Size, Order,
619 CGF.
Builder.SetInsertPoint(ContBB);
623 case AtomicExpr::AO__c11_atomic_load:
624 case AtomicExpr::AO__opencl_atomic_load:
625 case AtomicExpr::AO__hip_atomic_load:
626 case AtomicExpr::AO__atomic_load_n:
627 case AtomicExpr::AO__atomic_load:
628 case AtomicExpr::AO__scoped_atomic_load_n:
629 case AtomicExpr::AO__scoped_atomic_load: {
631 Load->setAtomic(Order,
Scope);
639 case AtomicExpr::AO__c11_atomic_store:
640 case AtomicExpr::AO__opencl_atomic_store:
641 case AtomicExpr::AO__hip_atomic_store:
642 case AtomicExpr::AO__atomic_store:
643 case AtomicExpr::AO__atomic_store_n:
644 case AtomicExpr::AO__scoped_atomic_store:
645 case AtomicExpr::AO__scoped_atomic_store_n: {
648 Store->setAtomic(Order,
Scope);
654 case AtomicExpr::AO__c11_atomic_exchange:
655 case AtomicExpr::AO__hip_atomic_exchange:
656 case AtomicExpr::AO__opencl_atomic_exchange:
657 case AtomicExpr::AO__atomic_exchange_n:
658 case AtomicExpr::AO__atomic_exchange:
659 case AtomicExpr::AO__scoped_atomic_exchange_n:
660 case AtomicExpr::AO__scoped_atomic_exchange:
661 Op = llvm::AtomicRMWInst::Xchg;
664 case AtomicExpr::AO__atomic_add_fetch:
665 case AtomicExpr::AO__scoped_atomic_add_fetch:
667 : llvm::Instruction::Add;
669 case AtomicExpr::AO__c11_atomic_fetch_add:
670 case AtomicExpr::AO__hip_atomic_fetch_add:
671 case AtomicExpr::AO__opencl_atomic_fetch_add:
672 case AtomicExpr::AO__atomic_fetch_add:
673 case AtomicExpr::AO__scoped_atomic_fetch_add:
675 : llvm::AtomicRMWInst::Add;
678 case AtomicExpr::AO__atomic_sub_fetch:
679 case AtomicExpr::AO__scoped_atomic_sub_fetch:
681 : llvm::Instruction::Sub;
683 case AtomicExpr::AO__c11_atomic_fetch_sub:
684 case AtomicExpr::AO__hip_atomic_fetch_sub:
685 case AtomicExpr::AO__opencl_atomic_fetch_sub:
686 case AtomicExpr::AO__atomic_fetch_sub:
687 case AtomicExpr::AO__scoped_atomic_fetch_sub:
689 : llvm::AtomicRMWInst::Sub;
692 case AtomicExpr::AO__atomic_min_fetch:
693 case AtomicExpr::AO__scoped_atomic_min_fetch:
696 case AtomicExpr::AO__c11_atomic_fetch_min:
697 case AtomicExpr::AO__hip_atomic_fetch_min:
698 case AtomicExpr::AO__opencl_atomic_fetch_min:
699 case AtomicExpr::AO__atomic_fetch_min:
700 case AtomicExpr::AO__scoped_atomic_fetch_min:
702 ? llvm::AtomicRMWInst::FMin
704 ? llvm::AtomicRMWInst::Min
705 : llvm::AtomicRMWInst::UMin);
708 case AtomicExpr::AO__atomic_max_fetch:
709 case AtomicExpr::AO__scoped_atomic_max_fetch:
712 case AtomicExpr::AO__c11_atomic_fetch_max:
713 case AtomicExpr::AO__hip_atomic_fetch_max:
714 case AtomicExpr::AO__opencl_atomic_fetch_max:
715 case AtomicExpr::AO__atomic_fetch_max:
716 case AtomicExpr::AO__scoped_atomic_fetch_max:
718 ? llvm::AtomicRMWInst::FMax
720 ? llvm::AtomicRMWInst::Max
721 : llvm::AtomicRMWInst::UMax);
724 case AtomicExpr::AO__atomic_and_fetch:
725 case AtomicExpr::AO__scoped_atomic_and_fetch:
726 PostOp = llvm::Instruction::And;
728 case AtomicExpr::AO__c11_atomic_fetch_and:
729 case AtomicExpr::AO__hip_atomic_fetch_and:
730 case AtomicExpr::AO__opencl_atomic_fetch_and:
731 case AtomicExpr::AO__atomic_fetch_and:
732 case AtomicExpr::AO__scoped_atomic_fetch_and:
733 Op = llvm::AtomicRMWInst::And;
736 case AtomicExpr::AO__atomic_or_fetch:
737 case AtomicExpr::AO__scoped_atomic_or_fetch:
738 PostOp = llvm::Instruction::Or;
740 case AtomicExpr::AO__c11_atomic_fetch_or:
741 case AtomicExpr::AO__hip_atomic_fetch_or:
742 case AtomicExpr::AO__opencl_atomic_fetch_or:
743 case AtomicExpr::AO__atomic_fetch_or:
744 case AtomicExpr::AO__scoped_atomic_fetch_or:
745 Op = llvm::AtomicRMWInst::Or;
748 case AtomicExpr::AO__atomic_xor_fetch:
749 case AtomicExpr::AO__scoped_atomic_xor_fetch:
750 PostOp = llvm::Instruction::Xor;
752 case AtomicExpr::AO__c11_atomic_fetch_xor:
753 case AtomicExpr::AO__hip_atomic_fetch_xor:
754 case AtomicExpr::AO__opencl_atomic_fetch_xor:
755 case AtomicExpr::AO__atomic_fetch_xor:
756 case AtomicExpr::AO__scoped_atomic_fetch_xor:
757 Op = llvm::AtomicRMWInst::Xor;
760 case AtomicExpr::AO__atomic_nand_fetch:
761 case AtomicExpr::AO__scoped_atomic_nand_fetch:
762 PostOp = llvm::Instruction::And;
764 case AtomicExpr::AO__c11_atomic_fetch_nand:
765 case AtomicExpr::AO__atomic_fetch_nand:
766 case AtomicExpr::AO__scoped_atomic_fetch_nand:
767 Op = llvm::AtomicRMWInst::Nand;
770 case AtomicExpr::AO__atomic_fetch_uinc:
771 case AtomicExpr::AO__scoped_atomic_fetch_uinc:
772 Op = llvm::AtomicRMWInst::UIncWrap;
774 case AtomicExpr::AO__atomic_fetch_udec:
775 case AtomicExpr::AO__scoped_atomic_fetch_udec:
776 Op = llvm::AtomicRMWInst::UDecWrap;
779 case AtomicExpr::AO__atomic_test_and_set: {
780 llvm::AtomicRMWInst *RMWI =
791 case AtomicExpr::AO__atomic_clear: {
792 llvm::StoreInst *Store =
794 Store->setAtomic(Order,
Scope);
802 llvm::AtomicRMWInst *RMWI =
808 llvm::Value *Result = RMWI;
814 Result = CGF.
Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
816 if (E->
getOp() == AtomicExpr::AO__atomic_nand_fetch ||
817 E->
getOp() == AtomicExpr::AO__scoped_atomic_nand_fetch)
818 Result = CGF.
Builder.CreateNot(Result);
839 if (ValTy->isFloatingPointTy())
840 return ValTy->isX86_FP80Ty() || CmpXchg;
841 return !ValTy->isIntegerTy() && !ValTy->isPointerTy();
846 Address OriginalVal1, llvm::Value *IsWeak,
847 llvm::Value *FailureOrder, uint64_t Size,
848 llvm::AtomicOrdering Order, llvm::Value *
Scope) {
849 auto ScopeModel =
Expr->getScopeModel();
854 llvm::SyncScope::ID SS;
864 SS = llvm::SyncScope::System;
866 FailureOrder, Size, Order, SS);
871 if (
auto SC = dyn_cast<llvm::ConstantInt>(
Scope)) {
876 FailureOrder, Size, Order, SCID);
882 auto Scopes = ScopeModel->getRuntimeValues();
883 llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
884 for (
auto S : Scopes)
887 llvm::BasicBlock *ContBB =
890 auto *SC = Builder.CreateIntCast(
Scope, Builder.getInt32Ty(),
false);
893 auto FallBack = ScopeModel->getFallBackValue();
894 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
895 for (
auto S : Scopes) {
898 SI->addCase(Builder.getInt32(S), B);
900 Builder.SetInsertPoint(B);
902 FailureOrder, Size, Order,
906 Builder.CreateBr(ContBB);
909 Builder.SetInsertPoint(ContBB);
918 MemTy = AT->getValueType();
919 llvm::Value *IsWeak =
nullptr, *OrderFail =
nullptr;
926 if (E->
getOp() == AtomicExpr::AO__c11_atomic_init ||
927 E->
getOp() == AtomicExpr::AO__opencl_atomic_init) {
933 auto TInfo =
getContext().getTypeInfoInChars(AtomicTy);
934 uint64_t Size = TInfo.Width.getQuantity();
935 unsigned MaxInlineWidthInBits =
getTarget().getMaxAtomicInlineWidth();
938 getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
941 bool Oversized =
getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
944 << (int)TInfo.Width.getQuantity()
949 << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.
getQuantity();
956 switch (E->
getOp()) {
957 case AtomicExpr::AO__c11_atomic_init:
958 case AtomicExpr::AO__opencl_atomic_init:
959 llvm_unreachable(
"Already handled above with EmitAtomicInit!");
961 case AtomicExpr::AO__atomic_load_n:
962 case AtomicExpr::AO__scoped_atomic_load_n:
963 case AtomicExpr::AO__c11_atomic_load:
964 case AtomicExpr::AO__opencl_atomic_load:
965 case AtomicExpr::AO__hip_atomic_load:
966 case AtomicExpr::AO__atomic_test_and_set:
967 case AtomicExpr::AO__atomic_clear:
970 case AtomicExpr::AO__atomic_load:
971 case AtomicExpr::AO__scoped_atomic_load:
975 case AtomicExpr::AO__atomic_store:
976 case AtomicExpr::AO__scoped_atomic_store:
980 case AtomicExpr::AO__atomic_exchange:
981 case AtomicExpr::AO__scoped_atomic_exchange:
986 case AtomicExpr::AO__atomic_compare_exchange:
987 case AtomicExpr::AO__atomic_compare_exchange_n:
988 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
989 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
990 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
991 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
992 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
993 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
994 case AtomicExpr::AO__scoped_atomic_compare_exchange:
995 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
997 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange ||
998 E->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
1003 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
1004 E->
getOp() == AtomicExpr::AO__atomic_compare_exchange ||
1005 E->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
1006 E->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
1010 case AtomicExpr::AO__c11_atomic_fetch_add:
1011 case AtomicExpr::AO__c11_atomic_fetch_sub:
1012 case AtomicExpr::AO__hip_atomic_fetch_add:
1013 case AtomicExpr::AO__hip_atomic_fetch_sub:
1014 case AtomicExpr::AO__opencl_atomic_fetch_add:
1015 case AtomicExpr::AO__opencl_atomic_fetch_sub:
1025 Val1Scalar =
Builder.CreateMul(Val1Scalar,
CGM.getSize(PointeeIncAmt));
1032 case AtomicExpr::AO__atomic_fetch_add:
1033 case AtomicExpr::AO__atomic_fetch_max:
1034 case AtomicExpr::AO__atomic_fetch_min:
1035 case AtomicExpr::AO__atomic_fetch_sub:
1036 case AtomicExpr::AO__atomic_add_fetch:
1037 case AtomicExpr::AO__atomic_max_fetch:
1038 case AtomicExpr::AO__atomic_min_fetch:
1039 case AtomicExpr::AO__atomic_sub_fetch:
1040 case AtomicExpr::AO__c11_atomic_fetch_max:
1041 case AtomicExpr::AO__c11_atomic_fetch_min:
1042 case AtomicExpr::AO__opencl_atomic_fetch_max:
1043 case AtomicExpr::AO__opencl_atomic_fetch_min:
1044 case AtomicExpr::AO__hip_atomic_fetch_max:
1045 case AtomicExpr::AO__hip_atomic_fetch_min:
1046 case AtomicExpr::AO__scoped_atomic_fetch_add:
1047 case AtomicExpr::AO__scoped_atomic_fetch_max:
1048 case AtomicExpr::AO__scoped_atomic_fetch_min:
1049 case AtomicExpr::AO__scoped_atomic_fetch_sub:
1050 case AtomicExpr::AO__scoped_atomic_add_fetch:
1051 case AtomicExpr::AO__scoped_atomic_max_fetch:
1052 case AtomicExpr::AO__scoped_atomic_min_fetch:
1053 case AtomicExpr::AO__scoped_atomic_sub_fetch:
1056 case AtomicExpr::AO__atomic_fetch_and:
1057 case AtomicExpr::AO__atomic_fetch_nand:
1058 case AtomicExpr::AO__atomic_fetch_or:
1059 case AtomicExpr::AO__atomic_fetch_xor:
1060 case AtomicExpr::AO__atomic_fetch_uinc:
1061 case AtomicExpr::AO__atomic_fetch_udec:
1062 case AtomicExpr::AO__atomic_and_fetch:
1063 case AtomicExpr::AO__atomic_nand_fetch:
1064 case AtomicExpr::AO__atomic_or_fetch:
1065 case AtomicExpr::AO__atomic_xor_fetch:
1066 case AtomicExpr::AO__atomic_store_n:
1067 case AtomicExpr::AO__atomic_exchange_n:
1068 case AtomicExpr::AO__c11_atomic_fetch_and:
1069 case AtomicExpr::AO__c11_atomic_fetch_nand:
1070 case AtomicExpr::AO__c11_atomic_fetch_or:
1071 case AtomicExpr::AO__c11_atomic_fetch_xor:
1072 case AtomicExpr::AO__c11_atomic_store:
1073 case AtomicExpr::AO__c11_atomic_exchange:
1074 case AtomicExpr::AO__hip_atomic_fetch_and:
1075 case AtomicExpr::AO__hip_atomic_fetch_or:
1076 case AtomicExpr::AO__hip_atomic_fetch_xor:
1077 case AtomicExpr::AO__hip_atomic_store:
1078 case AtomicExpr::AO__hip_atomic_exchange:
1079 case AtomicExpr::AO__opencl_atomic_fetch_and:
1080 case AtomicExpr::AO__opencl_atomic_fetch_or:
1081 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1082 case AtomicExpr::AO__opencl_atomic_store:
1083 case AtomicExpr::AO__opencl_atomic_exchange:
1084 case AtomicExpr::AO__scoped_atomic_fetch_and:
1085 case AtomicExpr::AO__scoped_atomic_fetch_nand:
1086 case AtomicExpr::AO__scoped_atomic_fetch_or:
1087 case AtomicExpr::AO__scoped_atomic_fetch_xor:
1088 case AtomicExpr::AO__scoped_atomic_and_fetch:
1089 case AtomicExpr::AO__scoped_atomic_nand_fetch:
1090 case AtomicExpr::AO__scoped_atomic_or_fetch:
1091 case AtomicExpr::AO__scoped_atomic_xor_fetch:
1092 case AtomicExpr::AO__scoped_atomic_store_n:
1093 case AtomicExpr::AO__scoped_atomic_exchange_n:
1094 case AtomicExpr::AO__scoped_atomic_fetch_uinc:
1095 case AtomicExpr::AO__scoped_atomic_fetch_udec:
1101 bool ShouldCastToIntPtrTy =
1108 AtomicInfo Atomics(*
this, AtomicVal);
1111 if (ShouldCastToIntPtrTy) {
1112 Ptr = Atomics.castToAtomicIntPointer(Ptr);
1114 Val1 = Atomics.convertToAtomicIntPointer(Val1);
1116 Val2 = Atomics.convertToAtomicIntPointer(Val2);
1119 if (ShouldCastToIntPtrTy)
1120 Dest = Atomics.castToAtomicIntPointer(Dest);
1124 Dest = Atomics.CreateTempAlloca();
1125 if (ShouldCastToIntPtrTy)
1126 Dest = Atomics.castToAtomicIntPointer(Dest);
1129 bool PowerOf2Size = (Size & (Size - 1)) == 0;
1130 bool UseLibcall = !PowerOf2Size || (Size > 16);
1150 auto CastToGenericAddrSpace = [&](llvm::Value *
V,
QualType PT) {
1157 auto *DestType = llvm::PointerType::get(
getLLVMContext(), DestAS);
1167 std::string LibCallName;
1169 bool HaveRetTy =
false;
1170 switch (E->
getOp()) {
1171 case AtomicExpr::AO__c11_atomic_init:
1172 case AtomicExpr::AO__opencl_atomic_init:
1173 llvm_unreachable(
"Already handled!");
1180 case AtomicExpr::AO__atomic_compare_exchange:
1181 case AtomicExpr::AO__atomic_compare_exchange_n:
1182 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1183 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1184 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
1185 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
1186 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1187 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1188 case AtomicExpr::AO__scoped_atomic_compare_exchange:
1189 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
1190 LibCallName =
"__atomic_compare_exchange";
1204 case AtomicExpr::AO__atomic_exchange:
1205 case AtomicExpr::AO__atomic_exchange_n:
1206 case AtomicExpr::AO__c11_atomic_exchange:
1207 case AtomicExpr::AO__hip_atomic_exchange:
1208 case AtomicExpr::AO__opencl_atomic_exchange:
1209 case AtomicExpr::AO__scoped_atomic_exchange:
1210 case AtomicExpr::AO__scoped_atomic_exchange_n:
1211 LibCallName =
"__atomic_exchange";
1217 case AtomicExpr::AO__atomic_store:
1218 case AtomicExpr::AO__atomic_store_n:
1219 case AtomicExpr::AO__c11_atomic_store:
1220 case AtomicExpr::AO__hip_atomic_store:
1221 case AtomicExpr::AO__opencl_atomic_store:
1222 case AtomicExpr::AO__scoped_atomic_store:
1223 case AtomicExpr::AO__scoped_atomic_store_n:
1224 LibCallName =
"__atomic_store";
1232 case AtomicExpr::AO__atomic_load:
1233 case AtomicExpr::AO__atomic_load_n:
1234 case AtomicExpr::AO__c11_atomic_load:
1235 case AtomicExpr::AO__hip_atomic_load:
1236 case AtomicExpr::AO__opencl_atomic_load:
1237 case AtomicExpr::AO__scoped_atomic_load:
1238 case AtomicExpr::AO__scoped_atomic_load_n:
1239 LibCallName =
"__atomic_load";
1241 case AtomicExpr::AO__atomic_add_fetch:
1242 case AtomicExpr::AO__scoped_atomic_add_fetch:
1243 case AtomicExpr::AO__atomic_fetch_add:
1244 case AtomicExpr::AO__c11_atomic_fetch_add:
1245 case AtomicExpr::AO__hip_atomic_fetch_add:
1246 case AtomicExpr::AO__opencl_atomic_fetch_add:
1247 case AtomicExpr::AO__scoped_atomic_fetch_add:
1248 case AtomicExpr::AO__atomic_and_fetch:
1249 case AtomicExpr::AO__scoped_atomic_and_fetch:
1250 case AtomicExpr::AO__atomic_fetch_and:
1251 case AtomicExpr::AO__c11_atomic_fetch_and:
1252 case AtomicExpr::AO__hip_atomic_fetch_and:
1253 case AtomicExpr::AO__opencl_atomic_fetch_and:
1254 case AtomicExpr::AO__scoped_atomic_fetch_and:
1255 case AtomicExpr::AO__atomic_or_fetch:
1256 case AtomicExpr::AO__scoped_atomic_or_fetch:
1257 case AtomicExpr::AO__atomic_fetch_or:
1258 case AtomicExpr::AO__c11_atomic_fetch_or:
1259 case AtomicExpr::AO__hip_atomic_fetch_or:
1260 case AtomicExpr::AO__opencl_atomic_fetch_or:
1261 case AtomicExpr::AO__scoped_atomic_fetch_or:
1262 case AtomicExpr::AO__atomic_sub_fetch:
1263 case AtomicExpr::AO__scoped_atomic_sub_fetch:
1264 case AtomicExpr::AO__atomic_fetch_sub:
1265 case AtomicExpr::AO__c11_atomic_fetch_sub:
1266 case AtomicExpr::AO__hip_atomic_fetch_sub:
1267 case AtomicExpr::AO__opencl_atomic_fetch_sub:
1268 case AtomicExpr::AO__scoped_atomic_fetch_sub:
1269 case AtomicExpr::AO__atomic_xor_fetch:
1270 case AtomicExpr::AO__scoped_atomic_xor_fetch:
1271 case AtomicExpr::AO__atomic_fetch_xor:
1272 case AtomicExpr::AO__c11_atomic_fetch_xor:
1273 case AtomicExpr::AO__hip_atomic_fetch_xor:
1274 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1275 case AtomicExpr::AO__scoped_atomic_fetch_xor:
1276 case AtomicExpr::AO__atomic_nand_fetch:
1277 case AtomicExpr::AO__atomic_fetch_nand:
1278 case AtomicExpr::AO__c11_atomic_fetch_nand:
1279 case AtomicExpr::AO__scoped_atomic_fetch_nand:
1280 case AtomicExpr::AO__scoped_atomic_nand_fetch:
1281 case AtomicExpr::AO__atomic_min_fetch:
1282 case AtomicExpr::AO__atomic_fetch_min:
1283 case AtomicExpr::AO__c11_atomic_fetch_min:
1284 case AtomicExpr::AO__hip_atomic_fetch_min:
1285 case AtomicExpr::AO__opencl_atomic_fetch_min:
1286 case AtomicExpr::AO__scoped_atomic_fetch_min:
1287 case AtomicExpr::AO__scoped_atomic_min_fetch:
1288 case AtomicExpr::AO__atomic_max_fetch:
1289 case AtomicExpr::AO__atomic_fetch_max:
1290 case AtomicExpr::AO__c11_atomic_fetch_max:
1291 case AtomicExpr::AO__hip_atomic_fetch_max:
1292 case AtomicExpr::AO__opencl_atomic_fetch_max:
1293 case AtomicExpr::AO__scoped_atomic_fetch_max:
1294 case AtomicExpr::AO__scoped_atomic_max_fetch:
1295 case AtomicExpr::AO__scoped_atomic_fetch_uinc:
1296 case AtomicExpr::AO__scoped_atomic_fetch_udec:
1297 case AtomicExpr::AO__atomic_test_and_set:
1298 case AtomicExpr::AO__atomic_clear:
1299 case AtomicExpr::AO__atomic_fetch_uinc:
1300 case AtomicExpr::AO__atomic_fetch_udec:
1301 llvm_unreachable(
"Integral atomic operations always become atomicrmw!");
1306 std::string(
"__opencl") + StringRef(LibCallName).drop_front(1).str();
1334 bool IsStore = E->
getOp() == AtomicExpr::AO__c11_atomic_store ||
1335 E->
getOp() == AtomicExpr::AO__opencl_atomic_store ||
1336 E->
getOp() == AtomicExpr::AO__hip_atomic_store ||
1337 E->
getOp() == AtomicExpr::AO__atomic_store ||
1338 E->
getOp() == AtomicExpr::AO__atomic_store_n ||
1339 E->
getOp() == AtomicExpr::AO__scoped_atomic_store ||
1340 E->
getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
1341 E->
getOp() == AtomicExpr::AO__atomic_clear;
1342 bool IsLoad = E->
getOp() == AtomicExpr::AO__c11_atomic_load ||
1343 E->
getOp() == AtomicExpr::AO__opencl_atomic_load ||
1344 E->
getOp() == AtomicExpr::AO__hip_atomic_load ||
1345 E->
getOp() == AtomicExpr::AO__atomic_load ||
1346 E->
getOp() == AtomicExpr::AO__atomic_load_n ||
1347 E->
getOp() == AtomicExpr::AO__scoped_atomic_load ||
1348 E->
getOp() == AtomicExpr::AO__scoped_atomic_load_n;
1354 if (llvm::isValidAtomicOrderingCABI(ord))
1355 switch ((llvm::AtomicOrderingCABI)ord) {
1356 case llvm::AtomicOrderingCABI::relaxed:
1357 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1358 OrderFail, Size, llvm::AtomicOrdering::Monotonic,
Scope);
1360 case llvm::AtomicOrderingCABI::consume:
1361 case llvm::AtomicOrderingCABI::acquire:
1364 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1365 OrderFail, Size, llvm::AtomicOrdering::Acquire,
Scope);
1367 case llvm::AtomicOrderingCABI::release:
1370 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1371 OrderFail, Size, llvm::AtomicOrdering::Release,
Scope);
1373 case llvm::AtomicOrderingCABI::acq_rel:
1374 if (IsLoad || IsStore)
1376 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1377 OrderFail, Size, llvm::AtomicOrdering::AcquireRelease,
1380 case llvm::AtomicOrderingCABI::seq_cst:
1381 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1383 llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
1396 llvm::BasicBlock *MonotonicBB =
nullptr, *AcquireBB =
nullptr,
1397 *ReleaseBB =
nullptr, *AcqRelBB =
nullptr,
1398 *SeqCstBB =
nullptr;
1404 if (!IsLoad && !IsStore)
1413 Order =
Builder.CreateIntCast(Order,
Builder.getInt32Ty(),
false);
1414 llvm::SwitchInst *SI =
Builder.CreateSwitch(Order, MonotonicBB);
1417 Builder.SetInsertPoint(MonotonicBB);
1418 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail,
1419 Size, llvm::AtomicOrdering::Monotonic,
Scope);
1422 Builder.SetInsertPoint(AcquireBB);
1423 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1424 OrderFail, Size, llvm::AtomicOrdering::Acquire,
Scope);
1426 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
1428 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
1432 Builder.SetInsertPoint(ReleaseBB);
1433 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1434 OrderFail, Size, llvm::AtomicOrdering::Release,
Scope);
1436 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::release),
1439 if (!IsLoad && !IsStore) {
1440 Builder.SetInsertPoint(AcqRelBB);
1441 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1442 OrderFail, Size, llvm::AtomicOrdering::AcquireRelease,
Scope);
1444 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acq_rel),
1447 Builder.SetInsertPoint(SeqCstBB);
1448 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail,
1449 Size, llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
1451 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
1455 Builder.SetInsertPoint(ContBB);
1459 assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1465 llvm::IntegerType *ty =
1471 llvm::Type *Ty =
Addr.getElementType();
1473 if (SourceSizeInBits != AtomicSizeInBits) {
1474 Address Tmp = CreateTempAlloca();
1481 std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1485 return castToAtomicIntPointer(
Addr);
1488RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1489 AggValueSlot resultSlot,
1491 bool asValue)
const {
1492 if (LVal.isSimple()) {
1507 if (LVal.isBitField())
1509 LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1510 LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1511 if (LVal.isVectorElt())
1513 LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1514 LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1515 assert(LVal.isExtVectorElt());
1517 addr, LVal.getExtVectorElts(), LVal.getType(),
1518 LVal.getBaseInfo(), TBAAAccessInfo()));
1521RValue AtomicInfo::ConvertToValueOrAtomic(llvm::Value *Val,
1522 AggValueSlot ResultSlot,
1523 SourceLocation Loc,
bool AsValue,
1524 bool CmpXchg)
const {
1526 assert((Val->getType()->isIntegerTy() || Val->getType()->isPointerTy() ||
1527 Val->getType()->isIEEELikeFPTy()) &&
1528 "Expected integer, pointer or floating point value when converting "
1531 (((!LVal.isBitField() ||
1532 LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1535 auto *ValTy = AsValue
1537 : getAtomicAddress().getElementType();
1539 assert((!ValTy->isIntegerTy() || Val->getType() == ValTy) &&
1540 "Different integer types.");
1543 if (llvm::CastInst::isBitCastable(Val->getType(), ValTy))
1550 bool TempIsVolatile =
false;
1556 Temp = CreateTempAlloca();
1560 Address CastTemp = castToAtomicIntPointer(Temp);
1563 return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1566void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1567 llvm::AtomicOrdering AO,
bool) {
1579llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1580 bool IsVolatile,
bool CmpXchg) {
1582 Address
Addr = getAtomicAddress();
1584 Addr = castToAtomicIntPointer(
Addr);
1586 Load->setAtomic(AO);
1590 Load->setVolatile(
true);
1599 if (!
CGM.getLangOpts().MSVolatile)
return false;
1600 AtomicInfo AI(*
this, LV);
1603 bool AtomicIsInline = !AI.shouldUseLibcall();
1608 return IsVolatile && AtomicIsInline;
1613 llvm::AtomicOrdering AO;
1616 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1618 AO = llvm::AtomicOrdering::Acquire;
1625 bool AsValue, llvm::AtomicOrdering AO,
1628 if (shouldUseLibcall()) {
1630 if (LVal.isSimple() && !ResultSlot.
isIgnored()) {
1634 TempAddr = CreateTempAlloca();
1636 EmitAtomicLoadLibcall(TempAddr.
emitRawPointer(CGF), AO, IsVolatile);
1640 return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1644 auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1652 return ConvertToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1658 llvm::AtomicOrdering AO,
bool IsVolatile,
1660 AtomicInfo Atomics(*
this, src);
1661 return Atomics.EmitAtomicLoad(resultSlot, loc,
true, AO,
1667void AtomicInfo::emitCopyIntoMemory(
RValue rvalue)
const {
1668 assert(LVal.isSimple());
1677 LVal.isVolatileQualified();
1686 emitMemSetZeroIfNecessary();
1689 LValue TempLVal = projectValue();
1702Address AtomicInfo::materializeRValue(RValue rvalue)
const {
1709 LValue TempLV = CGF.
MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1710 AtomicInfo Atomics(CGF, TempLV);
1711 Atomics.emitCopyIntoMemory(rvalue);
1712 return TempLV.getAddress();
1715llvm::Value *AtomicInfo::getScalarRValValueOrNull(RValue RVal)
const {
1716 if (RVal.
isScalar() && (!hasPadding() || !LVal.isSimple()))
1721llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal,
bool CmpXchg)
const {
1724 if (llvm::Value *
Value = getScalarRValValueOrNull(RVal)) {
1728 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1730 LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1731 if (llvm::BitCastInst::isBitCastable(
Value->getType(), InputIntTy))
1737 Address
Addr = materializeRValue(RVal);
1740 Addr = castToAtomicIntPointer(
Addr);
1744std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1745 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1746 llvm::AtomicOrdering
Success, llvm::AtomicOrdering Failure,
bool IsWeak) {
1748 Address
Addr = getAtomicAddressAsAtomicIntPointer();
1752 Inst->setVolatile(LVal.isVolatileQualified());
1753 Inst->setWeak(IsWeak);
1756 auto *PreviousVal = CGF.
Builder.CreateExtractValue(Inst, 0);
1757 auto *SuccessFailureVal = CGF.
Builder.CreateExtractValue(Inst, 1);
1758 return std::make_pair(PreviousVal, SuccessFailureVal);
1762AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1763 llvm::Value *DesiredAddr,
1765 llvm::AtomicOrdering Failure) {
1774 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(
Success))),
1777 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(Failure))),
1782 return SuccessFailureRVal.getScalarVal();
1785std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1786 RValue Expected, RValue Desired, llvm::AtomicOrdering
Success,
1787 llvm::AtomicOrdering Failure,
bool IsWeak) {
1789 if (shouldUseLibcall()) {
1791 Address ExpectedAddr = materializeRValue(Expected);
1793 llvm::Value *DesiredPtr = materializeRValue(Desired).emitRawPointer(CGF);
1794 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr,
1796 return std::make_pair(
1798 SourceLocation(),
false),
1804 auto *ExpectedVal = convertRValueToInt(Expected,
true);
1805 auto *DesiredVal = convertRValueToInt(Desired,
true);
1806 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal,
Success,
1808 return std::make_pair(
1810 SourceLocation(),
false,
1820 LValue AtomicLVal = Atomics.getAtomicLValue();
1822 if (AtomicLVal.isSimple()) {
1824 DesiredLVal = CGF.
MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1827 Address Ptr = Atomics.materializeRValue(OldRVal);
1829 if (AtomicLVal.isBitField()) {
1831 LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1832 AtomicLVal.getType(),
1833 AtomicLVal.getBaseInfo(),
1834 AtomicLVal.getTBAAInfo());
1836 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1837 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1838 AtomicLVal.getTBAAInfo());
1839 }
else if (AtomicLVal.isVectorElt()) {
1840 UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1841 AtomicLVal.getType(),
1842 AtomicLVal.getBaseInfo(),
1843 AtomicLVal.getTBAAInfo());
1844 DesiredLVal = LValue::MakeVectorElt(
1845 DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1846 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1848 assert(AtomicLVal.isExtVectorElt());
1849 UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1850 AtomicLVal.getType(),
1851 AtomicLVal.getBaseInfo(),
1852 AtomicLVal.getTBAAInfo());
1853 DesiredLVal = LValue::MakeExtVectorElt(
1854 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1855 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1860 RValue NewRVal = UpdateOp(UpRVal);
1870void AtomicInfo::EmitAtomicUpdateLibcall(
1871 llvm::AtomicOrdering AO,
const llvm::function_ref<RValue(RValue)> &UpdateOp,
1873 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1875 Address ExpectedAddr = CreateTempAlloca();
1877 EmitAtomicLoadLibcall(ExpectedAddr.
emitRawPointer(CGF), AO, IsVolatile);
1881 Address DesiredAddr = CreateTempAlloca();
1882 if ((LVal.isBitField() && BFI.
Size != ValueSizeInBits) ||
1883 requiresMemSetZero(getAtomicAddress().getElementType())) {
1887 auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1889 SourceLocation(),
false);
1894 EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);
1895 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1899void AtomicInfo::EmitAtomicUpdateOp(
1900 llvm::AtomicOrdering AO,
const llvm::function_ref<RValue(RValue)> &UpdateOp,
1902 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1905 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile,
true);
1909 auto *CurBB = CGF.
Builder.GetInsertBlock();
1911 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1913 PHI->addIncoming(OldVal, CurBB);
1914 Address NewAtomicAddr = CreateTempAlloca();
1915 Address NewAtomicIntAddr =
1917 ? castToAtomicIntPointer(NewAtomicAddr)
1920 if ((LVal.isBitField() && BFI.
Size != ValueSizeInBits) ||
1921 requiresMemSetZero(getAtomicAddress().getElementType())) {
1925 SourceLocation(),
false,
1930 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1931 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1932 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1938 LValue AtomicLVal = Atomics.getAtomicLValue();
1941 if (AtomicLVal.isBitField()) {
1943 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1944 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1945 AtomicLVal.getTBAAInfo());
1946 }
else if (AtomicLVal.isVectorElt()) {
1948 LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1949 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1950 AtomicLVal.getTBAAInfo());
1952 assert(AtomicLVal.isExtVectorElt());
1953 DesiredLVal = LValue::MakeExtVectorElt(
1954 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1955 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1962void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1963 RValue UpdateRVal,
bool IsVolatile) {
1964 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1966 Address ExpectedAddr = CreateTempAlloca();
1968 EmitAtomicLoadLibcall(ExpectedAddr.
emitRawPointer(CGF), AO, IsVolatile);
1972 Address DesiredAddr = CreateTempAlloca();
1973 if ((LVal.isBitField() && BFI.
Size != ValueSizeInBits) ||
1974 requiresMemSetZero(getAtomicAddress().getElementType())) {
1982 EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);
1983 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1987void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1989 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1992 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile,
true);
1996 auto *CurBB = CGF.
Builder.GetInsertBlock();
1998 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
2000 PHI->addIncoming(OldVal, CurBB);
2001 Address NewAtomicAddr = CreateTempAlloca();
2002 Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr);
2003 if ((LVal.isBitField() && BFI.
Size != ValueSizeInBits) ||
2004 requiresMemSetZero(getAtomicAddress().getElementType())) {
2010 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
2011 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
2012 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
2016void AtomicInfo::EmitAtomicUpdate(
2017 llvm::AtomicOrdering AO,
const llvm::function_ref<RValue(RValue)> &UpdateOp,
2019 if (shouldUseLibcall()) {
2020 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
2022 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
2026void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
2028 if (shouldUseLibcall()) {
2029 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
2031 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
2038 llvm::AtomicOrdering AO;
2040 AO = llvm::AtomicOrdering::SequentiallyConsistent;
2042 AO = llvm::AtomicOrdering::Release;
2054 llvm::AtomicOrdering AO,
bool IsVolatile,
2062 AtomicInfo atomics(*
this, dest);
2063 LValue LVal = atomics.getAtomicLValue();
2068 atomics.emitCopyIntoMemory(rvalue);
2073 if (atomics.shouldUseLibcall()) {
2075 Address srcAddr = atomics.materializeRValue(rvalue);
2092 llvm::Value *ValToStore = atomics.convertRValueToInt(rvalue);
2096 if (llvm::Value *
Value = atomics.getScalarRValValueOrNull(rvalue))
2098 Addr = atomics.castToAtomicIntPointer(
Addr);
2099 ValToStore =
Builder.CreateIntCast(ValToStore,
Addr.getElementType(),
2102 llvm::StoreInst *store =
Builder.CreateStore(ValToStore,
Addr);
2104 if (AO == llvm::AtomicOrdering::Acquire)
2105 AO = llvm::AtomicOrdering::Monotonic;
2106 else if (AO == llvm::AtomicOrdering::AcquireRelease)
2107 AO = llvm::AtomicOrdering::Release;
2110 store->setAtomic(AO);
2114 store->setVolatile(
true);
2120 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2127 llvm::AtomicOrdering
Success, llvm::AtomicOrdering Failure,
bool IsWeak,
2132 Expected.getAggregateAddress().getElementType() ==
2137 AtomicInfo Atomics(*
this, Obj);
2139 return Atomics.EmitAtomicCompareExchange(
Expected, Desired,
Success, Failure,
2143llvm::AtomicRMWInst *
2145 llvm::Value *Val, llvm::AtomicOrdering Order,
2146 llvm::SyncScope::ID SSID,
2148 llvm::AtomicRMWInst *RMW =
2149 Builder.CreateAtomicRMW(Op,
Addr, Val, Order, SSID);
2155 LValue LVal, llvm::AtomicOrdering AO,
2156 const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
bool IsVolatile) {
2157 AtomicInfo Atomics(*
this, LVal);
2158 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2162 AtomicInfo atomics(*
this, dest);
2164 switch (atomics.getEvaluationKind()) {
2180 bool Zeroed =
false;
2182 Zeroed = atomics.emitMemSetZeroIfNecessary();
2183 dest = atomics.projectValue();
2197 llvm_unreachable(
"bad evaluation kind");
Defines the clang::ASTContext interface.
static llvm::Value * EmitPostAtomicMinMax(CGBuilderTy &Builder, AtomicExpr::AtomicOp Op, bool IsSigned, llvm::Value *OldVal, llvm::Value *RHS)
Duplicate the atomic min/max operation in conventional IR for the builtin variants that return the ne...
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, Address DesiredAddr)
static Address EmitValToTemp(CodeGenFunction &CGF, Expr *E)
static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, Address ExpectedResult, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, llvm::AtomicOrdering Order, llvm::SyncScope::ID Scope)
static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, KnownNonNull_t IsKnownNonNull, CodeGenFunction &CGF)
static bool shouldCastToInt(mlir::Type valueTy, bool cmpxchg)
Return true if.
static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, uint64_t size, cir::MemOrder successOrder, cir::MemOrder failureOrder, cir::SyncScopeKind scope)
static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, Expr *failureOrderExpr, uint64_t size, cir::MemOrder successOrder, cir::SyncScopeKind scope)
static QualType getPointeeType(const MemRegion *R)
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
static std::unique_ptr< AtomicScopeModel > getScopeModel(AtomicOp Op)
Get atomic scope model for the atomic op code.
QualType getValueType() const
SourceLocation getBeginLoc() const LLVM_READONLY
Expr * getOrderFail() const
CharUnits - This is an opaque type for sizes expressed in character units.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
CharUnits getAlignment() const
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Address getAddress() const
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(Address Addr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallArgList - Type for representing both the value and type of arguments in a call.
void add(RValue rvalue, QualType type)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * performAddrSpaceCast(llvm::Value *Src, llvm::Type *DestTy)
void EmitAtomicInit(Expr *E, LValue lvalue)
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
void maybeAttachRangeForLoad(llvm::LoadInst *Load, QualType Ty, SourceLocation Loc)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
const TargetInfo & getTarget() const
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
CGDebugInfo * getDebugInfo()
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
const TargetCodeGenInfo & getTargetHooks() const
ASTContext & getContext() const
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System, const AtomicExpr *AE=nullptr)
Emit an atomicrmw instruction, and applying relevant metadata when applicable.
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
llvm::Type * ConvertTypeForMem(QualType T)
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
RValue EmitAtomicExpr(AtomicExpr *E)
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
RValue EmitLoadOfExtVectorElementLValue(LValue V)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
llvm::LLVMContext & getLLVMContext()
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
This class organizes the cross-function state that is used while generating LLVM code.
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const llvm::DataLayout & getDataLayout() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
llvm::LLVMContext & getLLVMContext()
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
LValue - This represents an lvalue references.
bool isVolatileQualified() const
Address getAddress() const
TBAAAccessInfo getTBAAInfo() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
bool isVolatileQualified() const
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const
Get the syncscope used in LLVM IR as a SyncScope ID.
virtual void setTargetAtomicMetadata(CodeGenFunction &CGF, llvm::Instruction &AtomicInst, const AtomicExpr *Expr=nullptr) const
Allow the target to apply other metadata to an atomic instruction.
Concrete class used by the front-end to report problems and issues.
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
PointerType - C99 6.7.5.1 - Pointer Declarators.
A (possibly-)qualified type.
bool isNull() const
Return true if this QualType doesn't point to a type yet.
LangAS getAddressSpace() const
Return the address space of this type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Scope - A scope is a transient data structure that is used while parsing the program.
Encodes a location in the source.
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
bool isPointerType() const
const T * castAs() const
Member-template castAs<specific type>.
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isAtomicType() const
bool isFloatingType() const
const T * getAs() const
Member-template getAs<specific type>'.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Load(InterpState &S, CodePtr OpPC)
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
@ Success
Annotation was successful.
llvm::Expected< QualType > ExpectedType
llvm::StringRef getAsString(SyncScope S)
U cast(CodeGen::Address addr)
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
llvm::PointerType * VoidPtrTy
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * SizeTy
llvm::IntegerType * IntTy
int
llvm::PointerType * DefaultPtrTy