21#include "llvm/ADT/DenseMap.h"
22#include "llvm/IR/DataLayout.h"
23#include "llvm/IR/Intrinsics.h"
35 CharUnits AtomicAlign;
42 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
43 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45 assert(!lvalue.isGlobalReg());
47 if (lvalue.isSimple()) {
48 AtomicTy = lvalue.getType();
49 if (
auto *ATy = AtomicTy->
getAs<AtomicType>())
50 ValueTy = ATy->getValueType();
57 TypeInfo ValueTI =
C.getTypeInfo(ValueTy);
58 ValueSizeInBits = ValueTI.
Width;
59 ValueAlignInBits = ValueTI.
Align;
61 TypeInfo AtomicTI =
C.getTypeInfo(AtomicTy);
62 AtomicSizeInBits = AtomicTI.
Width;
63 AtomicAlignInBits = AtomicTI.
Align;
65 assert(ValueSizeInBits <= AtomicSizeInBits);
66 assert(ValueAlignInBits <= AtomicAlignInBits);
68 AtomicAlign =
C.toCharUnitsFromBits(AtomicAlignInBits);
69 ValueAlign =
C.toCharUnitsFromBits(ValueAlignInBits);
70 if (lvalue.getAlignment().isZero())
71 lvalue.setAlignment(AtomicAlign);
74 }
else if (lvalue.isBitField()) {
75 ValueTy = lvalue.getType();
76 ValueSizeInBits =
C.getTypeSize(ValueTy);
77 auto &OrigBFI = lvalue.getBitFieldInfo();
78 auto Offset = OrigBFI.Offset %
C.toBits(lvalue.getAlignment());
79 AtomicSizeInBits =
C.toBits(
80 C.toCharUnitsFromBits(Offset + OrigBFI.Size +
C.getCharWidth() - 1)
81 .alignTo(lvalue.getAlignment()));
82 llvm::Value *BitFieldPtr = lvalue.getRawBitFieldPointer(CGF);
84 (
C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
85 lvalue.getAlignment();
86 llvm::Value *StoragePtr = CGF.
Builder.CreateConstGEP1_64(
87 CGF.
Int8Ty, BitFieldPtr, OffsetInChars.getQuantity());
94 llvm::Type *StorageTy = CGF.
Builder.getIntNTy(AtomicSizeInBits);
95 LVal = LValue::MakeBitfield(
96 Address(StoragePtr, StorageTy, lvalue.getAlignment()), BFI,
97 lvalue.getType(), lvalue.getBaseInfo(), lvalue.getTBAAInfo());
98 AtomicTy =
C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
102 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
103 AtomicTy =
C.getConstantArrayType(
C.CharTy, Size,
nullptr,
104 ArraySizeModifier::Normal,
107 AtomicAlign = ValueAlign = lvalue.getAlignment();
108 }
else if (lvalue.isVectorElt()) {
109 ValueTy = lvalue.getType()->
castAs<VectorType>()->getElementType();
110 ValueSizeInBits =
C.getTypeSize(ValueTy);
111 AtomicTy = lvalue.getType();
112 AtomicSizeInBits =
C.getTypeSize(AtomicTy);
113 AtomicAlign = ValueAlign = lvalue.getAlignment();
116 assert(lvalue.isExtVectorElt());
117 ValueTy = lvalue.getType();
118 ValueSizeInBits =
C.getTypeSize(ValueTy);
121 lvalue.getExtVectorAddress().getElementType())
123 AtomicSizeInBits =
C.getTypeSize(AtomicTy);
124 AtomicAlign = ValueAlign = lvalue.getAlignment();
127 UseLibcall = !
C.getTargetInfo().hasBuiltinAtomic(
128 AtomicSizeInBits,
C.toBits(lvalue.getAlignment()));
131 QualType getAtomicType()
const {
return AtomicTy; }
132 QualType getValueType()
const {
return ValueTy; }
133 CharUnits getAtomicAlignment()
const {
return AtomicAlign; }
134 uint64_t getAtomicSizeInBits()
const {
return AtomicSizeInBits; }
135 uint64_t getValueSizeInBits()
const {
return ValueSizeInBits; }
137 bool shouldUseLibcall()
const {
return UseLibcall; }
138 const LValue &getAtomicLValue()
const {
return LVal; }
139 llvm::Value *getAtomicPointer()
const {
141 return LVal.emitRawPointer(CGF);
142 else if (LVal.isBitField())
143 return LVal.getRawBitFieldPointer(CGF);
144 else if (LVal.isVectorElt())
145 return LVal.getRawVectorPointer(CGF);
146 assert(LVal.isExtVectorElt());
147 return LVal.getRawExtVectorPointer(CGF);
149 Address getAtomicAddress()
const {
152 ElTy = LVal.getAddress().getElementType();
153 else if (LVal.isBitField())
154 ElTy = LVal.getBitFieldAddress().getElementType();
155 else if (LVal.isVectorElt())
156 ElTy = LVal.getVectorAddress().getElementType();
158 ElTy = LVal.getExtVectorAddress().getElementType();
159 return Address(getAtomicPointer(), ElTy, getAtomicAlignment());
162 Address getAtomicAddressAsAtomicIntPointer()
const {
163 return castToAtomicIntPointer(getAtomicAddress());
172 bool hasPadding()
const {
173 return (ValueSizeInBits != AtomicSizeInBits);
176 bool emitMemSetZeroIfNecessary()
const;
178 llvm::Value *getAtomicSizeValue()
const {
185 Address castToAtomicIntPointer(Address
Addr)
const;
190 Address convertToAtomicIntPointer(Address
Addr)
const;
193 RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
194 SourceLocation loc,
bool AsValue)
const;
196 llvm::Value *getScalarRValValueOrNull(RValue RVal)
const;
199 llvm::Value *convertRValueToInt(RValue RVal,
bool CmpXchg =
false)
const;
201 RValue ConvertToValueOrAtomic(llvm::Value *IntVal, AggValueSlot ResultSlot,
202 SourceLocation Loc,
bool AsValue,
203 bool CmpXchg =
false)
const;
206 void emitCopyIntoMemory(RValue rvalue)
const;
209 LValue projectValue()
const {
210 assert(LVal.isSimple());
211 Address addr = getAtomicAddress();
215 return LValue::MakeAddr(addr, getValueType(), CGF.
getContext(),
216 LVal.getBaseInfo(), LVal.getTBAAInfo());
221 RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
222 bool AsValue, llvm::AtomicOrdering AO,
233 std::pair<RValue, llvm::Value *>
234 EmitAtomicCompareExchange(RValue Expected, RValue Desired,
236 llvm::AtomicOrdering::SequentiallyConsistent,
238 llvm::AtomicOrdering::SequentiallyConsistent,
239 bool IsWeak =
false);
244 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
245 const llvm::function_ref<RValue(RValue)> &UpdateOp,
249 void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
253 Address materializeRValue(RValue rvalue)
const;
256 Address CreateTempAlloca()
const;
258 bool requiresMemSetZero(llvm::Type *
type)
const;
262 void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
263 llvm::AtomicOrdering AO,
bool IsVolatile);
265 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO,
bool IsVolatile,
266 bool CmpXchg =
false);
268 llvm::Value *EmitAtomicCompareExchangeLibcall(
269 llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
271 llvm::AtomicOrdering::SequentiallyConsistent,
273 llvm::AtomicOrdering::SequentiallyConsistent);
275 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
276 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
278 llvm::AtomicOrdering::SequentiallyConsistent,
280 llvm::AtomicOrdering::SequentiallyConsistent,
281 bool IsWeak =
false);
284 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
285 const llvm::function_ref<RValue(RValue)> &UpdateOp,
288 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
289 const llvm::function_ref<RValue(RValue)> &UpdateOp,
292 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
295 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
300Address AtomicInfo::CreateTempAlloca()
const {
303 QualType TmpTy = (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits)
307 CGF.
CreateMemTemp(TmpTy, getAtomicAlignment(),
"atomic-temp");
309 if (LVal.isBitField())
311 TempAlloca, getAtomicAddress().
getType(),
312 getAtomicAddress().getElementType());
324 fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
325 fnAttrB.addAttribute(llvm::Attribute::WillReturn);
326 llvm::AttributeList fnAttrs = llvm::AttributeList::get(
327 CGF.
getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);
329 llvm::FunctionCallee fn =
337 uint64_t expectedSize) {
344bool AtomicInfo::requiresMemSetZero(llvm::Type *
type)
const {
346 if (hasPadding())
return true;
349 switch (getEvaluationKind()) {
356 AtomicSizeInBits / 2);
362 llvm_unreachable(
"bad evaluation kind");
365bool AtomicInfo::emitMemSetZeroIfNecessary()
const {
366 assert(LVal.isSimple());
367 Address addr = LVal.getAddress();
374 LVal.getAlignment().getAsAlign());
381 uint64_t Size, llvm::AtomicOrdering SuccessOrder,
382 llvm::AtomicOrdering FailureOrder,
383 llvm::SyncScope::ID
Scope) {
391 Pair->setWeak(IsWeak);
396 llvm::Value *Old = CGF.
Builder.CreateExtractValue(Pair, 0);
397 llvm::Value *
Cmp = CGF.
Builder.CreateExtractValue(Pair, 1);
401 llvm::BasicBlock *StoreExpectedBB =
406 llvm::BasicBlock *ContinueBB =
411 CGF.
Builder.CreateCondBr(
Cmp, ContinueBB, StoreExpectedBB);
413 CGF.
Builder.SetInsertPoint(StoreExpectedBB);
417 uint64_t ExpectedSizeInBytes = DL.getTypeStoreSize(
ExpectedType);
419 if (ExpectedSizeInBytes == Size) {
425 llvm::Type *OldType = Old->getType();
441 CGF.
Builder.CreateBr(ContinueBB);
443 CGF.
Builder.SetInsertPoint(ContinueBB);
454 llvm::Value *FailureOrderVal, uint64_t Size,
455 llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID
Scope) {
456 llvm::AtomicOrdering FailureOrder;
457 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
458 auto FOS = FO->getSExtValue();
459 if (!llvm::isValidAtomicOrderingCABI(FOS))
460 FailureOrder = llvm::AtomicOrdering::Monotonic;
462 switch ((llvm::AtomicOrderingCABI)FOS) {
463 case llvm::AtomicOrderingCABI::relaxed:
466 case llvm::AtomicOrderingCABI::release:
467 case llvm::AtomicOrderingCABI::acq_rel:
468 FailureOrder = llvm::AtomicOrdering::Monotonic;
470 case llvm::AtomicOrderingCABI::consume:
471 case llvm::AtomicOrderingCABI::acquire:
472 FailureOrder = llvm::AtomicOrdering::Acquire;
474 case llvm::AtomicOrderingCABI::seq_cst:
475 FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
483 Size, SuccessOrder, FailureOrder,
Scope);
496 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
498 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
500 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
502 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
506 CGF.
Builder.SetInsertPoint(MonotonicBB);
508 SuccessOrder, llvm::AtomicOrdering::Monotonic,
Scope);
511 CGF.
Builder.SetInsertPoint(AcquireBB);
513 SuccessOrder, llvm::AtomicOrdering::Acquire,
Scope);
516 CGF.
Builder.SetInsertPoint(SeqCstBB);
518 SuccessOrder, llvm::AtomicOrdering::SequentiallyConsistent,
522 CGF.
Builder.SetInsertPoint(ContBB);
532 const bool IsFP = OldVal->getType()->isFloatingPointTy();
535 llvm::Intrinsic::ID IID = (Op == AtomicExpr::AO__atomic_max_fetch ||
536 Op == AtomicExpr::AO__scoped_atomic_max_fetch)
537 ? llvm::Intrinsic::maxnum
538 : llvm::Intrinsic::minnum;
540 return Builder.CreateBinaryIntrinsic(IID, OldVal, RHS, llvm::FMFSource(),
544 llvm::CmpInst::Predicate Pred;
547 llvm_unreachable(
"Unexpected min/max operation");
548 case AtomicExpr::AO__atomic_max_fetch:
549 case AtomicExpr::AO__scoped_atomic_max_fetch:
550 Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
552 case AtomicExpr::AO__atomic_min_fetch:
553 case AtomicExpr::AO__scoped_atomic_min_fetch:
554 Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
557 llvm::Value *
Cmp = Builder.CreateICmp(Pred, OldVal, RHS,
"tst");
558 return Builder.CreateSelect(
Cmp, OldVal, RHS,
"newval");
563 Address ExpectedResult, llvm::Value *IsWeak,
564 llvm::Value *FailureOrder, uint64_t Size,
565 llvm::AtomicOrdering Order,
566 llvm::SyncScope::ID
Scope) {
567 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
568 bool PostOpMinMax =
false;
571 switch (E->
getOp()) {
572 case AtomicExpr::AO__c11_atomic_init:
573 case AtomicExpr::AO__opencl_atomic_init:
574 llvm_unreachable(
"Already handled!");
576 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
577 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
578 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
580 ExpectedResult, FailureOrder, Size, Order,
583 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
584 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
585 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
587 ExpectedResult, FailureOrder, Size, Order,
590 case AtomicExpr::AO__atomic_compare_exchange:
591 case AtomicExpr::AO__atomic_compare_exchange_n:
592 case AtomicExpr::AO__scoped_atomic_compare_exchange:
593 case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {
594 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
596 Val1, Val2, ExpectedResult, FailureOrder,
600 llvm::BasicBlock *StrongBB =
603 llvm::BasicBlock *ContBB =
606 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(IsWeak, WeakBB);
607 SI->addCase(CGF.
Builder.getInt1(
false), StrongBB);
609 CGF.
Builder.SetInsertPoint(StrongBB);
611 ExpectedResult, FailureOrder, Size, Order,
615 CGF.
Builder.SetInsertPoint(WeakBB);
617 ExpectedResult, FailureOrder, Size, Order,
621 CGF.
Builder.SetInsertPoint(ContBB);
625 case AtomicExpr::AO__c11_atomic_load:
626 case AtomicExpr::AO__opencl_atomic_load:
627 case AtomicExpr::AO__hip_atomic_load:
628 case AtomicExpr::AO__atomic_load_n:
629 case AtomicExpr::AO__atomic_load:
630 case AtomicExpr::AO__scoped_atomic_load_n:
631 case AtomicExpr::AO__scoped_atomic_load: {
633 Load->setAtomic(Order,
Scope);
641 case AtomicExpr::AO__c11_atomic_store:
642 case AtomicExpr::AO__opencl_atomic_store:
643 case AtomicExpr::AO__hip_atomic_store:
644 case AtomicExpr::AO__atomic_store:
645 case AtomicExpr::AO__atomic_store_n:
646 case AtomicExpr::AO__scoped_atomic_store:
647 case AtomicExpr::AO__scoped_atomic_store_n: {
650 Store->setAtomic(Order,
Scope);
656 case AtomicExpr::AO__c11_atomic_exchange:
657 case AtomicExpr::AO__hip_atomic_exchange:
658 case AtomicExpr::AO__opencl_atomic_exchange:
659 case AtomicExpr::AO__atomic_exchange_n:
660 case AtomicExpr::AO__atomic_exchange:
661 case AtomicExpr::AO__scoped_atomic_exchange_n:
662 case AtomicExpr::AO__scoped_atomic_exchange:
663 Op = llvm::AtomicRMWInst::Xchg;
666 case AtomicExpr::AO__atomic_add_fetch:
667 case AtomicExpr::AO__scoped_atomic_add_fetch:
669 : llvm::Instruction::Add;
671 case AtomicExpr::AO__c11_atomic_fetch_add:
672 case AtomicExpr::AO__hip_atomic_fetch_add:
673 case AtomicExpr::AO__opencl_atomic_fetch_add:
674 case AtomicExpr::AO__atomic_fetch_add:
675 case AtomicExpr::AO__scoped_atomic_fetch_add:
677 : llvm::AtomicRMWInst::Add;
680 case AtomicExpr::AO__atomic_sub_fetch:
681 case AtomicExpr::AO__scoped_atomic_sub_fetch:
683 : llvm::Instruction::Sub;
685 case AtomicExpr::AO__c11_atomic_fetch_sub:
686 case AtomicExpr::AO__hip_atomic_fetch_sub:
687 case AtomicExpr::AO__opencl_atomic_fetch_sub:
688 case AtomicExpr::AO__atomic_fetch_sub:
689 case AtomicExpr::AO__scoped_atomic_fetch_sub:
691 : llvm::AtomicRMWInst::Sub;
694 case AtomicExpr::AO__atomic_min_fetch:
695 case AtomicExpr::AO__scoped_atomic_min_fetch:
698 case AtomicExpr::AO__c11_atomic_fetch_min:
699 case AtomicExpr::AO__hip_atomic_fetch_min:
700 case AtomicExpr::AO__opencl_atomic_fetch_min:
701 case AtomicExpr::AO__atomic_fetch_min:
702 case AtomicExpr::AO__scoped_atomic_fetch_min:
704 ? llvm::AtomicRMWInst::FMin
706 ? llvm::AtomicRMWInst::Min
707 : llvm::AtomicRMWInst::UMin);
710 case AtomicExpr::AO__atomic_max_fetch:
711 case AtomicExpr::AO__scoped_atomic_max_fetch:
714 case AtomicExpr::AO__c11_atomic_fetch_max:
715 case AtomicExpr::AO__hip_atomic_fetch_max:
716 case AtomicExpr::AO__opencl_atomic_fetch_max:
717 case AtomicExpr::AO__atomic_fetch_max:
718 case AtomicExpr::AO__scoped_atomic_fetch_max:
720 ? llvm::AtomicRMWInst::FMax
722 ? llvm::AtomicRMWInst::Max
723 : llvm::AtomicRMWInst::UMax);
726 case AtomicExpr::AO__atomic_and_fetch:
727 case AtomicExpr::AO__scoped_atomic_and_fetch:
728 PostOp = llvm::Instruction::And;
730 case AtomicExpr::AO__c11_atomic_fetch_and:
731 case AtomicExpr::AO__hip_atomic_fetch_and:
732 case AtomicExpr::AO__opencl_atomic_fetch_and:
733 case AtomicExpr::AO__atomic_fetch_and:
734 case AtomicExpr::AO__scoped_atomic_fetch_and:
735 Op = llvm::AtomicRMWInst::And;
738 case AtomicExpr::AO__atomic_or_fetch:
739 case AtomicExpr::AO__scoped_atomic_or_fetch:
740 PostOp = llvm::Instruction::Or;
742 case AtomicExpr::AO__c11_atomic_fetch_or:
743 case AtomicExpr::AO__hip_atomic_fetch_or:
744 case AtomicExpr::AO__opencl_atomic_fetch_or:
745 case AtomicExpr::AO__atomic_fetch_or:
746 case AtomicExpr::AO__scoped_atomic_fetch_or:
747 Op = llvm::AtomicRMWInst::Or;
750 case AtomicExpr::AO__atomic_xor_fetch:
751 case AtomicExpr::AO__scoped_atomic_xor_fetch:
752 PostOp = llvm::Instruction::Xor;
754 case AtomicExpr::AO__c11_atomic_fetch_xor:
755 case AtomicExpr::AO__hip_atomic_fetch_xor:
756 case AtomicExpr::AO__opencl_atomic_fetch_xor:
757 case AtomicExpr::AO__atomic_fetch_xor:
758 case AtomicExpr::AO__scoped_atomic_fetch_xor:
759 Op = llvm::AtomicRMWInst::Xor;
762 case AtomicExpr::AO__atomic_nand_fetch:
763 case AtomicExpr::AO__scoped_atomic_nand_fetch:
764 PostOp = llvm::Instruction::And;
766 case AtomicExpr::AO__c11_atomic_fetch_nand:
767 case AtomicExpr::AO__atomic_fetch_nand:
768 case AtomicExpr::AO__scoped_atomic_fetch_nand:
769 Op = llvm::AtomicRMWInst::Nand;
772 case AtomicExpr::AO__atomic_fetch_uinc:
773 case AtomicExpr::AO__scoped_atomic_fetch_uinc:
774 Op = llvm::AtomicRMWInst::UIncWrap;
776 case AtomicExpr::AO__atomic_fetch_udec:
777 case AtomicExpr::AO__scoped_atomic_fetch_udec:
778 Op = llvm::AtomicRMWInst::UDecWrap;
781 case AtomicExpr::AO__atomic_test_and_set: {
782 llvm::AtomicRMWInst *RMWI =
793 case AtomicExpr::AO__atomic_clear: {
794 llvm::StoreInst *Store =
796 Store->setAtomic(Order,
Scope);
804 llvm::AtomicRMWInst *RMWI =
810 llvm::Value *
Result = RMWI;
816 Result = CGF.
Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
818 if (E->
getOp() == AtomicExpr::AO__atomic_nand_fetch ||
819 E->
getOp() == AtomicExpr::AO__scoped_atomic_nand_fetch)
841 if (ValTy->isFloatingPointTy())
842 return ValTy->isX86_FP80Ty() || CmpXchg;
843 return !ValTy->isIntegerTy() && !ValTy->isPointerTy();
848 Address OriginalVal1, llvm::Value *IsWeak,
849 llvm::Value *FailureOrder, uint64_t Size,
850 llvm::AtomicOrdering Order, llvm::Value *
Scope) {
851 auto ScopeModel =
Expr->getScopeModel();
856 llvm::SyncScope::ID SS;
866 SS = llvm::SyncScope::System;
868 FailureOrder, Size, Order, SS);
873 if (
auto SC = dyn_cast<llvm::ConstantInt>(
Scope)) {
878 FailureOrder, Size, Order, SCID);
884 auto Scopes = ScopeModel->getRuntimeValues();
885 llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
886 for (
auto S : Scopes)
889 llvm::BasicBlock *ContBB =
892 auto *SC = Builder.CreateIntCast(
Scope, Builder.getInt32Ty(),
false);
895 auto FallBack = ScopeModel->getFallBackValue();
896 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
897 for (
auto S : Scopes) {
900 SI->addCase(Builder.getInt32(S), B);
902 Builder.SetInsertPoint(B);
904 FailureOrder, Size, Order,
908 Builder.CreateBr(ContBB);
911 Builder.SetInsertPoint(ContBB);
920 MemTy = AT->getValueType();
921 llvm::Value *IsWeak =
nullptr, *OrderFail =
nullptr;
928 if (E->
getOp() == AtomicExpr::AO__c11_atomic_init ||
929 E->
getOp() == AtomicExpr::AO__opencl_atomic_init) {
935 auto TInfo =
getContext().getTypeInfoInChars(AtomicTy);
936 uint64_t Size = TInfo.Width.getQuantity();
937 unsigned MaxInlineWidthInBits =
getTarget().getMaxAtomicInlineWidth();
940 getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
943 bool Oversized =
getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
946 << (int)TInfo.Width.getQuantity()
951 << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.
getQuantity();
958 switch (E->
getOp()) {
959 case AtomicExpr::AO__c11_atomic_init:
960 case AtomicExpr::AO__opencl_atomic_init:
961 llvm_unreachable(
"Already handled above with EmitAtomicInit!");
963 case AtomicExpr::AO__atomic_load_n:
964 case AtomicExpr::AO__scoped_atomic_load_n:
965 case AtomicExpr::AO__c11_atomic_load:
966 case AtomicExpr::AO__opencl_atomic_load:
967 case AtomicExpr::AO__hip_atomic_load:
968 case AtomicExpr::AO__atomic_test_and_set:
969 case AtomicExpr::AO__atomic_clear:
972 case AtomicExpr::AO__atomic_load:
973 case AtomicExpr::AO__scoped_atomic_load:
977 case AtomicExpr::AO__atomic_store:
978 case AtomicExpr::AO__scoped_atomic_store:
982 case AtomicExpr::AO__atomic_exchange:
983 case AtomicExpr::AO__scoped_atomic_exchange:
988 case AtomicExpr::AO__atomic_compare_exchange:
989 case AtomicExpr::AO__atomic_compare_exchange_n:
990 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
991 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
992 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
993 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
994 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
995 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
996 case AtomicExpr::AO__scoped_atomic_compare_exchange:
997 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
999 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange ||
1000 E->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
1005 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
1006 E->
getOp() == AtomicExpr::AO__atomic_compare_exchange ||
1007 E->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
1008 E->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
1012 case AtomicExpr::AO__c11_atomic_fetch_add:
1013 case AtomicExpr::AO__c11_atomic_fetch_sub:
1014 case AtomicExpr::AO__hip_atomic_fetch_add:
1015 case AtomicExpr::AO__hip_atomic_fetch_sub:
1016 case AtomicExpr::AO__opencl_atomic_fetch_add:
1017 case AtomicExpr::AO__opencl_atomic_fetch_sub:
1027 Val1Scalar =
Builder.CreateMul(Val1Scalar,
CGM.getSize(PointeeIncAmt));
1034 case AtomicExpr::AO__atomic_fetch_add:
1035 case AtomicExpr::AO__atomic_fetch_max:
1036 case AtomicExpr::AO__atomic_fetch_min:
1037 case AtomicExpr::AO__atomic_fetch_sub:
1038 case AtomicExpr::AO__atomic_add_fetch:
1039 case AtomicExpr::AO__atomic_max_fetch:
1040 case AtomicExpr::AO__atomic_min_fetch:
1041 case AtomicExpr::AO__atomic_sub_fetch:
1042 case AtomicExpr::AO__c11_atomic_fetch_max:
1043 case AtomicExpr::AO__c11_atomic_fetch_min:
1044 case AtomicExpr::AO__opencl_atomic_fetch_max:
1045 case AtomicExpr::AO__opencl_atomic_fetch_min:
1046 case AtomicExpr::AO__hip_atomic_fetch_max:
1047 case AtomicExpr::AO__hip_atomic_fetch_min:
1048 case AtomicExpr::AO__scoped_atomic_fetch_add:
1049 case AtomicExpr::AO__scoped_atomic_fetch_max:
1050 case AtomicExpr::AO__scoped_atomic_fetch_min:
1051 case AtomicExpr::AO__scoped_atomic_fetch_sub:
1052 case AtomicExpr::AO__scoped_atomic_add_fetch:
1053 case AtomicExpr::AO__scoped_atomic_max_fetch:
1054 case AtomicExpr::AO__scoped_atomic_min_fetch:
1055 case AtomicExpr::AO__scoped_atomic_sub_fetch:
1058 case AtomicExpr::AO__atomic_fetch_and:
1059 case AtomicExpr::AO__atomic_fetch_nand:
1060 case AtomicExpr::AO__atomic_fetch_or:
1061 case AtomicExpr::AO__atomic_fetch_xor:
1062 case AtomicExpr::AO__atomic_fetch_uinc:
1063 case AtomicExpr::AO__atomic_fetch_udec:
1064 case AtomicExpr::AO__atomic_and_fetch:
1065 case AtomicExpr::AO__atomic_nand_fetch:
1066 case AtomicExpr::AO__atomic_or_fetch:
1067 case AtomicExpr::AO__atomic_xor_fetch:
1068 case AtomicExpr::AO__atomic_store_n:
1069 case AtomicExpr::AO__atomic_exchange_n:
1070 case AtomicExpr::AO__c11_atomic_fetch_and:
1071 case AtomicExpr::AO__c11_atomic_fetch_nand:
1072 case AtomicExpr::AO__c11_atomic_fetch_or:
1073 case AtomicExpr::AO__c11_atomic_fetch_xor:
1074 case AtomicExpr::AO__c11_atomic_store:
1075 case AtomicExpr::AO__c11_atomic_exchange:
1076 case AtomicExpr::AO__hip_atomic_fetch_and:
1077 case AtomicExpr::AO__hip_atomic_fetch_or:
1078 case AtomicExpr::AO__hip_atomic_fetch_xor:
1079 case AtomicExpr::AO__hip_atomic_store:
1080 case AtomicExpr::AO__hip_atomic_exchange:
1081 case AtomicExpr::AO__opencl_atomic_fetch_and:
1082 case AtomicExpr::AO__opencl_atomic_fetch_or:
1083 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1084 case AtomicExpr::AO__opencl_atomic_store:
1085 case AtomicExpr::AO__opencl_atomic_exchange:
1086 case AtomicExpr::AO__scoped_atomic_fetch_and:
1087 case AtomicExpr::AO__scoped_atomic_fetch_nand:
1088 case AtomicExpr::AO__scoped_atomic_fetch_or:
1089 case AtomicExpr::AO__scoped_atomic_fetch_xor:
1090 case AtomicExpr::AO__scoped_atomic_and_fetch:
1091 case AtomicExpr::AO__scoped_atomic_nand_fetch:
1092 case AtomicExpr::AO__scoped_atomic_or_fetch:
1093 case AtomicExpr::AO__scoped_atomic_xor_fetch:
1094 case AtomicExpr::AO__scoped_atomic_store_n:
1095 case AtomicExpr::AO__scoped_atomic_exchange_n:
1096 case AtomicExpr::AO__scoped_atomic_fetch_uinc:
1097 case AtomicExpr::AO__scoped_atomic_fetch_udec:
1103 bool ShouldCastToIntPtrTy =
1110 AtomicInfo Atomics(*
this, AtomicVal);
1113 if (ShouldCastToIntPtrTy) {
1114 Ptr = Atomics.castToAtomicIntPointer(Ptr);
1116 Val1 = Atomics.convertToAtomicIntPointer(Val1);
1118 Val2 = Atomics.convertToAtomicIntPointer(Val2);
1121 if (ShouldCastToIntPtrTy)
1122 Dest = Atomics.castToAtomicIntPointer(Dest);
1126 Dest = Atomics.CreateTempAlloca();
1127 if (ShouldCastToIntPtrTy)
1128 Dest = Atomics.castToAtomicIntPointer(Dest);
1131 bool PowerOf2Size = (Size & (Size - 1)) == 0;
1132 bool UseLibcall = !PowerOf2Size || (Size > 16);
1152 auto CastToGenericAddrSpace = [&](llvm::Value *
V,
QualType PT) {
1159 auto *DestType = llvm::PointerType::get(
getLLVMContext(), DestAS);
1169 std::string LibCallName;
1171 bool HaveRetTy =
false;
1172 switch (E->
getOp()) {
1173 case AtomicExpr::AO__c11_atomic_init:
1174 case AtomicExpr::AO__opencl_atomic_init:
1175 llvm_unreachable(
"Already handled!");
1182 case AtomicExpr::AO__atomic_compare_exchange:
1183 case AtomicExpr::AO__atomic_compare_exchange_n:
1184 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1185 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1186 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
1187 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
1188 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1189 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1190 case AtomicExpr::AO__scoped_atomic_compare_exchange:
1191 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
1192 LibCallName =
"__atomic_compare_exchange";
1206 case AtomicExpr::AO__atomic_exchange:
1207 case AtomicExpr::AO__atomic_exchange_n:
1208 case AtomicExpr::AO__c11_atomic_exchange:
1209 case AtomicExpr::AO__hip_atomic_exchange:
1210 case AtomicExpr::AO__opencl_atomic_exchange:
1211 case AtomicExpr::AO__scoped_atomic_exchange:
1212 case AtomicExpr::AO__scoped_atomic_exchange_n:
1213 LibCallName =
"__atomic_exchange";
1219 case AtomicExpr::AO__atomic_store:
1220 case AtomicExpr::AO__atomic_store_n:
1221 case AtomicExpr::AO__c11_atomic_store:
1222 case AtomicExpr::AO__hip_atomic_store:
1223 case AtomicExpr::AO__opencl_atomic_store:
1224 case AtomicExpr::AO__scoped_atomic_store:
1225 case AtomicExpr::AO__scoped_atomic_store_n:
1226 LibCallName =
"__atomic_store";
1234 case AtomicExpr::AO__atomic_load:
1235 case AtomicExpr::AO__atomic_load_n:
1236 case AtomicExpr::AO__c11_atomic_load:
1237 case AtomicExpr::AO__hip_atomic_load:
1238 case AtomicExpr::AO__opencl_atomic_load:
1239 case AtomicExpr::AO__scoped_atomic_load:
1240 case AtomicExpr::AO__scoped_atomic_load_n:
1241 LibCallName =
"__atomic_load";
1243 case AtomicExpr::AO__atomic_add_fetch:
1244 case AtomicExpr::AO__scoped_atomic_add_fetch:
1245 case AtomicExpr::AO__atomic_fetch_add:
1246 case AtomicExpr::AO__c11_atomic_fetch_add:
1247 case AtomicExpr::AO__hip_atomic_fetch_add:
1248 case AtomicExpr::AO__opencl_atomic_fetch_add:
1249 case AtomicExpr::AO__scoped_atomic_fetch_add:
1250 case AtomicExpr::AO__atomic_and_fetch:
1251 case AtomicExpr::AO__scoped_atomic_and_fetch:
1252 case AtomicExpr::AO__atomic_fetch_and:
1253 case AtomicExpr::AO__c11_atomic_fetch_and:
1254 case AtomicExpr::AO__hip_atomic_fetch_and:
1255 case AtomicExpr::AO__opencl_atomic_fetch_and:
1256 case AtomicExpr::AO__scoped_atomic_fetch_and:
1257 case AtomicExpr::AO__atomic_or_fetch:
1258 case AtomicExpr::AO__scoped_atomic_or_fetch:
1259 case AtomicExpr::AO__atomic_fetch_or:
1260 case AtomicExpr::AO__c11_atomic_fetch_or:
1261 case AtomicExpr::AO__hip_atomic_fetch_or:
1262 case AtomicExpr::AO__opencl_atomic_fetch_or:
1263 case AtomicExpr::AO__scoped_atomic_fetch_or:
1264 case AtomicExpr::AO__atomic_sub_fetch:
1265 case AtomicExpr::AO__scoped_atomic_sub_fetch:
1266 case AtomicExpr::AO__atomic_fetch_sub:
1267 case AtomicExpr::AO__c11_atomic_fetch_sub:
1268 case AtomicExpr::AO__hip_atomic_fetch_sub:
1269 case AtomicExpr::AO__opencl_atomic_fetch_sub:
1270 case AtomicExpr::AO__scoped_atomic_fetch_sub:
1271 case AtomicExpr::AO__atomic_xor_fetch:
1272 case AtomicExpr::AO__scoped_atomic_xor_fetch:
1273 case AtomicExpr::AO__atomic_fetch_xor:
1274 case AtomicExpr::AO__c11_atomic_fetch_xor:
1275 case AtomicExpr::AO__hip_atomic_fetch_xor:
1276 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1277 case AtomicExpr::AO__scoped_atomic_fetch_xor:
1278 case AtomicExpr::AO__atomic_nand_fetch:
1279 case AtomicExpr::AO__atomic_fetch_nand:
1280 case AtomicExpr::AO__c11_atomic_fetch_nand:
1281 case AtomicExpr::AO__scoped_atomic_fetch_nand:
1282 case AtomicExpr::AO__scoped_atomic_nand_fetch:
1283 case AtomicExpr::AO__atomic_min_fetch:
1284 case AtomicExpr::AO__atomic_fetch_min:
1285 case AtomicExpr::AO__c11_atomic_fetch_min:
1286 case AtomicExpr::AO__hip_atomic_fetch_min:
1287 case AtomicExpr::AO__opencl_atomic_fetch_min:
1288 case AtomicExpr::AO__scoped_atomic_fetch_min:
1289 case AtomicExpr::AO__scoped_atomic_min_fetch:
1290 case AtomicExpr::AO__atomic_max_fetch:
1291 case AtomicExpr::AO__atomic_fetch_max:
1292 case AtomicExpr::AO__c11_atomic_fetch_max:
1293 case AtomicExpr::AO__hip_atomic_fetch_max:
1294 case AtomicExpr::AO__opencl_atomic_fetch_max:
1295 case AtomicExpr::AO__scoped_atomic_fetch_max:
1296 case AtomicExpr::AO__scoped_atomic_max_fetch:
1297 case AtomicExpr::AO__scoped_atomic_fetch_uinc:
1298 case AtomicExpr::AO__scoped_atomic_fetch_udec:
1299 case AtomicExpr::AO__atomic_test_and_set:
1300 case AtomicExpr::AO__atomic_clear:
1301 case AtomicExpr::AO__atomic_fetch_uinc:
1302 case AtomicExpr::AO__atomic_fetch_udec:
1303 llvm_unreachable(
"Integral atomic operations always become atomicrmw!");
1308 std::string(
"__opencl") + StringRef(LibCallName).drop_front(1).str();
1336 bool IsStore = E->
getOp() == AtomicExpr::AO__c11_atomic_store ||
1337 E->
getOp() == AtomicExpr::AO__opencl_atomic_store ||
1338 E->
getOp() == AtomicExpr::AO__hip_atomic_store ||
1339 E->
getOp() == AtomicExpr::AO__atomic_store ||
1340 E->
getOp() == AtomicExpr::AO__atomic_store_n ||
1341 E->
getOp() == AtomicExpr::AO__scoped_atomic_store ||
1342 E->
getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
1343 E->
getOp() == AtomicExpr::AO__atomic_clear;
1344 bool IsLoad = E->
getOp() == AtomicExpr::AO__c11_atomic_load ||
1345 E->
getOp() == AtomicExpr::AO__opencl_atomic_load ||
1346 E->
getOp() == AtomicExpr::AO__hip_atomic_load ||
1347 E->
getOp() == AtomicExpr::AO__atomic_load ||
1348 E->
getOp() == AtomicExpr::AO__atomic_load_n ||
1349 E->
getOp() == AtomicExpr::AO__scoped_atomic_load ||
1350 E->
getOp() == AtomicExpr::AO__scoped_atomic_load_n;
1356 if (llvm::isValidAtomicOrderingCABI(ord))
1357 switch ((llvm::AtomicOrderingCABI)ord) {
1358 case llvm::AtomicOrderingCABI::relaxed:
1359 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1360 OrderFail, Size, llvm::AtomicOrdering::Monotonic,
Scope);
1362 case llvm::AtomicOrderingCABI::consume:
1363 case llvm::AtomicOrderingCABI::acquire:
1366 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1367 OrderFail, Size, llvm::AtomicOrdering::Acquire,
Scope);
1369 case llvm::AtomicOrderingCABI::release:
1372 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1373 OrderFail, Size, llvm::AtomicOrdering::Release,
Scope);
1375 case llvm::AtomicOrderingCABI::acq_rel:
1376 if (IsLoad || IsStore)
1378 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1379 OrderFail, Size, llvm::AtomicOrdering::AcquireRelease,
1382 case llvm::AtomicOrderingCABI::seq_cst:
1383 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1385 llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
1398 llvm::BasicBlock *MonotonicBB =
nullptr, *AcquireBB =
nullptr,
1399 *ReleaseBB =
nullptr, *AcqRelBB =
nullptr,
1400 *SeqCstBB =
nullptr;
1406 if (!IsLoad && !IsStore)
1415 Order =
Builder.CreateIntCast(Order,
Builder.getInt32Ty(),
false);
1416 llvm::SwitchInst *SI =
Builder.CreateSwitch(Order, MonotonicBB);
1419 Builder.SetInsertPoint(MonotonicBB);
1420 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail,
1421 Size, llvm::AtomicOrdering::Monotonic,
Scope);
1424 Builder.SetInsertPoint(AcquireBB);
1425 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1426 OrderFail, Size, llvm::AtomicOrdering::Acquire,
Scope);
1428 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
1430 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
1434 Builder.SetInsertPoint(ReleaseBB);
1435 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1436 OrderFail, Size, llvm::AtomicOrdering::Release,
Scope);
1438 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::release),
1441 if (!IsLoad && !IsStore) {
1442 Builder.SetInsertPoint(AcqRelBB);
1443 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1444 OrderFail, Size, llvm::AtomicOrdering::AcquireRelease,
Scope);
1446 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acq_rel),
1449 Builder.SetInsertPoint(SeqCstBB);
1450 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail,
1451 Size, llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
1453 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
1457 Builder.SetInsertPoint(ContBB);
1461 assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1467 llvm::IntegerType *ty =
1473 llvm::Type *Ty =
Addr.getElementType();
1475 if (SourceSizeInBits != AtomicSizeInBits) {
1476 Address Tmp = CreateTempAlloca();
1483 std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1487 return castToAtomicIntPointer(
Addr);
1490RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1491 AggValueSlot resultSlot,
1493 bool asValue)
const {
1494 if (LVal.isSimple()) {
1509 if (LVal.isBitField())
1511 LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1512 LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1513 if (LVal.isVectorElt())
1515 LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1516 LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1517 assert(LVal.isExtVectorElt());
1519 addr, LVal.getExtVectorElts(), LVal.getType(),
1520 LVal.getBaseInfo(), TBAAAccessInfo()));
1523RValue AtomicInfo::ConvertToValueOrAtomic(llvm::Value *Val,
1524 AggValueSlot ResultSlot,
1525 SourceLocation Loc,
bool AsValue,
1526 bool CmpXchg)
const {
1528 assert((Val->getType()->isIntegerTy() || Val->getType()->isPointerTy() ||
1529 Val->getType()->isIEEELikeFPTy()) &&
1530 "Expected integer, pointer or floating point value when converting "
1533 (((!LVal.isBitField() ||
1534 LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1537 auto *ValTy = AsValue
1539 : getAtomicAddress().getElementType();
1541 assert((!ValTy->isIntegerTy() || Val->getType() == ValTy) &&
1542 "Different integer types.");
1545 if (llvm::CastInst::isBitCastable(Val->getType(), ValTy))
1552 bool TempIsVolatile =
false;
1558 Temp = CreateTempAlloca();
1562 Address CastTemp = castToAtomicIntPointer(Temp);
1565 return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1568void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1569 llvm::AtomicOrdering AO,
bool) {
1581llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1582 bool IsVolatile,
bool CmpXchg) {
1586 Addr = castToAtomicIntPointer(
Addr);
1588 Load->setAtomic(AO);
1592 Load->setVolatile(
true);
1601 if (!
CGM.getLangOpts().MSVolatile)
return false;
1602 AtomicInfo AI(*
this, LV);
1605 bool AtomicIsInline = !AI.shouldUseLibcall();
1610 return IsVolatile && AtomicIsInline;
1615 llvm::AtomicOrdering AO;
1618 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1620 AO = llvm::AtomicOrdering::Acquire;
1627 bool AsValue, llvm::AtomicOrdering AO,
1630 if (shouldUseLibcall()) {
1632 if (LVal.isSimple() && !ResultSlot.
isIgnored()) {
1636 TempAddr = CreateTempAlloca();
1638 EmitAtomicLoadLibcall(TempAddr.
emitRawPointer(CGF), AO, IsVolatile);
1642 return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1646 auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1654 return ConvertToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1660 llvm::AtomicOrdering AO,
bool IsVolatile,
1662 AtomicInfo Atomics(*
this, src);
1663 return Atomics.EmitAtomicLoad(resultSlot, loc,
true, AO,
1669void AtomicInfo::emitCopyIntoMemory(
RValue rvalue)
const {
1670 assert(LVal.isSimple());
1679 LVal.isVolatileQualified();
1688 emitMemSetZeroIfNecessary();
1691 LValue TempLVal = projectValue();
1704Address AtomicInfo::materializeRValue(RValue rvalue)
const {
1711 LValue TempLV = CGF.
MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1712 AtomicInfo Atomics(CGF, TempLV);
1713 Atomics.emitCopyIntoMemory(rvalue);
1714 return TempLV.getAddress();
1717llvm::Value *AtomicInfo::getScalarRValValueOrNull(RValue RVal)
const {
1718 if (RVal.
isScalar() && (!hasPadding() || !LVal.isSimple()))
1723llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal,
bool CmpXchg)
const {
1726 if (llvm::Value *
Value = getScalarRValValueOrNull(RVal)) {
1730 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1732 LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1733 if (llvm::BitCastInst::isBitCastable(
Value->getType(), InputIntTy))
1742 Addr = castToAtomicIntPointer(
Addr);
1746std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1747 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1748 llvm::AtomicOrdering
Success, llvm::AtomicOrdering
Failure,
bool IsWeak) {
1750 Address Addr = getAtomicAddressAsAtomicIntPointer();
1754 Inst->setVolatile(LVal.isVolatileQualified());
1755 Inst->setWeak(IsWeak);
1758 auto *PreviousVal = CGF.
Builder.CreateExtractValue(Inst, 0);
1759 auto *SuccessFailureVal = CGF.
Builder.CreateExtractValue(Inst, 1);
1760 return std::make_pair(PreviousVal, SuccessFailureVal);
1764AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1765 llvm::Value *DesiredAddr,
1767 llvm::AtomicOrdering
Failure) {
1776 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(
Success))),
1779 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(
Failure))),
1784 return SuccessFailureRVal.getScalarVal();
1787std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1788 RValue Expected, RValue Desired, llvm::AtomicOrdering
Success,
1789 llvm::AtomicOrdering
Failure,
bool IsWeak) {
1791 if (shouldUseLibcall()) {
1793 Address ExpectedAddr = materializeRValue(Expected);
1795 llvm::Value *DesiredPtr = materializeRValue(Desired).emitRawPointer(CGF);
1796 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr,
1798 return std::make_pair(
1800 SourceLocation(),
false),
1806 auto *ExpectedVal = convertRValueToInt(Expected,
true);
1807 auto *DesiredVal = convertRValueToInt(Desired,
true);
1808 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal,
Success,
1810 return std::make_pair(
1812 SourceLocation(),
false,
1822 LValue AtomicLVal = Atomics.getAtomicLValue();
1824 if (AtomicLVal.isSimple()) {
1826 DesiredLVal = CGF.
MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1829 Address Ptr = Atomics.materializeRValue(OldRVal);
1831 if (AtomicLVal.isBitField()) {
1833 LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1834 AtomicLVal.getType(),
1835 AtomicLVal.getBaseInfo(),
1836 AtomicLVal.getTBAAInfo());
1838 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1839 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1840 AtomicLVal.getTBAAInfo());
1841 }
else if (AtomicLVal.isVectorElt()) {
1842 UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1843 AtomicLVal.getType(),
1844 AtomicLVal.getBaseInfo(),
1845 AtomicLVal.getTBAAInfo());
1846 DesiredLVal = LValue::MakeVectorElt(
1847 DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1848 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1850 assert(AtomicLVal.isExtVectorElt());
1851 UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1852 AtomicLVal.getType(),
1853 AtomicLVal.getBaseInfo(),
1854 AtomicLVal.getTBAAInfo());
1855 DesiredLVal = LValue::MakeExtVectorElt(
1856 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1857 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1862 RValue NewRVal = UpdateOp(UpRVal);
1872void AtomicInfo::EmitAtomicUpdateLibcall(
1873 llvm::AtomicOrdering AO,
const llvm::function_ref<RValue(RValue)> &UpdateOp,
1875 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1877 Address ExpectedAddr = CreateTempAlloca();
1879 EmitAtomicLoadLibcall(ExpectedAddr.
emitRawPointer(CGF), AO, IsVolatile);
1883 Address DesiredAddr = CreateTempAlloca();
1884 if ((LVal.isBitField() && BFI.
Size != ValueSizeInBits) ||
1885 requiresMemSetZero(getAtomicAddress().getElementType())) {
1889 auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1891 SourceLocation(),
false);
1896 EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO,
Failure);
1897 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1901void AtomicInfo::EmitAtomicUpdateOp(
1902 llvm::AtomicOrdering AO,
const llvm::function_ref<RValue(RValue)> &UpdateOp,
1904 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1907 auto *OldVal = EmitAtomicLoadOp(
Failure, IsVolatile,
true);
1911 auto *CurBB = CGF.
Builder.GetInsertBlock();
1913 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1915 PHI->addIncoming(OldVal, CurBB);
1916 Address NewAtomicAddr = CreateTempAlloca();
1919 ? castToAtomicIntPointer(NewAtomicAddr)
1922 if ((LVal.isBitField() && BFI.
Size != ValueSizeInBits) ||
1923 requiresMemSetZero(getAtomicAddress().getElementType())) {
1927 SourceLocation(),
false,
1932 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO,
Failure);
1933 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1934 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1940 LValue AtomicLVal = Atomics.getAtomicLValue();
1943 if (AtomicLVal.isBitField()) {
1945 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1946 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1947 AtomicLVal.getTBAAInfo());
1948 }
else if (AtomicLVal.isVectorElt()) {
1950 LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1951 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1952 AtomicLVal.getTBAAInfo());
1954 assert(AtomicLVal.isExtVectorElt());
1955 DesiredLVal = LValue::MakeExtVectorElt(
1956 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1957 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1964void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1965 RValue UpdateRVal,
bool IsVolatile) {
1966 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1968 Address ExpectedAddr = CreateTempAlloca();
1970 EmitAtomicLoadLibcall(ExpectedAddr.
emitRawPointer(CGF), AO, IsVolatile);
1974 Address DesiredAddr = CreateTempAlloca();
1975 if ((LVal.isBitField() && BFI.
Size != ValueSizeInBits) ||
1976 requiresMemSetZero(getAtomicAddress().getElementType())) {
1984 EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO,
Failure);
1985 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1989void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1991 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1994 auto *OldVal = EmitAtomicLoadOp(
Failure, IsVolatile,
true);
1998 auto *CurBB = CGF.
Builder.GetInsertBlock();
2000 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
2002 PHI->addIncoming(OldVal, CurBB);
2003 Address NewAtomicAddr = CreateTempAlloca();
2004 Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr);
2005 if ((LVal.isBitField() && BFI.
Size != ValueSizeInBits) ||
2006 requiresMemSetZero(getAtomicAddress().getElementType())) {
2012 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO,
Failure);
2013 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
2014 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
2018void AtomicInfo::EmitAtomicUpdate(
2019 llvm::AtomicOrdering AO,
const llvm::function_ref<RValue(RValue)> &UpdateOp,
2021 if (shouldUseLibcall()) {
2022 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
2024 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
2028void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
2030 if (shouldUseLibcall()) {
2031 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
2033 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
2040 llvm::AtomicOrdering AO;
2042 AO = llvm::AtomicOrdering::SequentiallyConsistent;
2044 AO = llvm::AtomicOrdering::Release;
2056 llvm::AtomicOrdering AO,
bool IsVolatile,
2064 AtomicInfo atomics(*
this, dest);
2065 LValue LVal = atomics.getAtomicLValue();
2070 atomics.emitCopyIntoMemory(rvalue);
2075 if (atomics.shouldUseLibcall()) {
2077 Address srcAddr = atomics.materializeRValue(rvalue);
2094 llvm::Value *ValToStore = atomics.convertRValueToInt(rvalue);
2098 if (llvm::Value *
Value = atomics.getScalarRValValueOrNull(rvalue))
2100 Addr = atomics.castToAtomicIntPointer(
Addr);
2101 ValToStore =
Builder.CreateIntCast(ValToStore,
Addr.getElementType(),
2104 llvm::StoreInst *store =
Builder.CreateStore(ValToStore,
Addr);
2106 if (AO == llvm::AtomicOrdering::Acquire)
2107 AO = llvm::AtomicOrdering::Monotonic;
2108 else if (AO == llvm::AtomicOrdering::AcquireRelease)
2109 AO = llvm::AtomicOrdering::Release;
2112 store->setAtomic(AO);
2116 store->setVolatile(
true);
2122 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2129 llvm::AtomicOrdering
Success, llvm::AtomicOrdering
Failure,
bool IsWeak,
2134 Expected.getAggregateAddress().getElementType() ==
2139 AtomicInfo Atomics(*
this, Obj);
2145llvm::AtomicRMWInst *
2147 llvm::Value *Val, llvm::AtomicOrdering Order,
2148 llvm::SyncScope::ID SSID,
2150 llvm::AtomicRMWInst *RMW =
2151 Builder.CreateAtomicRMW(Op,
Addr, Val, Order, SSID);
2157 LValue LVal, llvm::AtomicOrdering AO,
2158 const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
bool IsVolatile) {
2159 AtomicInfo Atomics(*
this, LVal);
2160 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2164 AtomicInfo atomics(*
this, dest);
2166 switch (atomics.getEvaluationKind()) {
2182 bool Zeroed =
false;
2184 Zeroed = atomics.emitMemSetZeroIfNecessary();
2185 dest = atomics.projectValue();
2199 llvm_unreachable(
"bad evaluation kind");
Defines the clang::ASTContext interface.
static llvm::Value * EmitPostAtomicMinMax(CGBuilderTy &Builder, AtomicExpr::AtomicOp Op, bool IsSigned, llvm::Value *OldVal, llvm::Value *RHS)
Duplicate the atomic min/max operation in conventional IR for the builtin variants that return the ne...
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, Address DesiredAddr)
static Address EmitValToTemp(CodeGenFunction &CGF, Expr *E)
static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, Address ExpectedResult, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, llvm::AtomicOrdering Order, llvm::SyncScope::ID Scope)
static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, KnownNonNull_t IsKnownNonNull, CodeGenFunction &CGF)
static bool shouldCastToInt(mlir::Type valueTy, bool cmpxchg)
Return true if.
static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, uint64_t size, cir::MemOrder successOrder, cir::MemOrder failureOrder, cir::SyncScopeKind scope)
static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, Expr *failureOrderExpr, uint64_t size, cir::MemOrder successOrder, cir::SyncScopeKind scope)
Result
Implement __builtin_bit_cast and related operations.
static QualType getPointeeType(const MemRegion *R)
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
static std::unique_ptr< AtomicScopeModel > getScopeModel(AtomicOp Op)
Get atomic scope model for the atomic op code.
QualType getValueType() const
SourceLocation getBeginLoc() const LLVM_READONLY
Expr * getOrderFail() const
CharUnits - This is an opaque type for sizes expressed in character units.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
CharUnits getAlignment() const
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Address getAddress() const
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(Address Addr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallArgList - Type for representing both the value and type of arguments in a call.
void add(RValue rvalue, QualType type)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * performAddrSpaceCast(llvm::Value *Src, llvm::Type *DestTy)
void EmitAtomicInit(Expr *E, LValue lvalue)
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
void maybeAttachRangeForLoad(llvm::LoadInst *Load, QualType Ty, SourceLocation Loc)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
const TargetInfo & getTarget() const
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
CGDebugInfo * getDebugInfo()
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
const TargetCodeGenInfo & getTargetHooks() const
ASTContext & getContext() const
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System, const AtomicExpr *AE=nullptr)
Emit an atomicrmw instruction, and applying relevant metadata when applicable.
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
llvm::Type * ConvertTypeForMem(QualType T)
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
RValue EmitAtomicExpr(AtomicExpr *E)
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
RValue EmitLoadOfExtVectorElementLValue(LValue V)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
llvm::LLVMContext & getLLVMContext()
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
This class organizes the cross-function state that is used while generating LLVM code.
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const llvm::DataLayout & getDataLayout() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
llvm::LLVMContext & getLLVMContext()
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
LValue - This represents an lvalue references.
bool isVolatileQualified() const
Address getAddress() const
TBAAAccessInfo getTBAAInfo() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
bool isVolatileQualified() const
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const
Get the syncscope used in LLVM IR as a SyncScope ID.
virtual void setTargetAtomicMetadata(CodeGenFunction &CGF, llvm::Instruction &AtomicInst, const AtomicExpr *Expr=nullptr) const
Allow the target to apply other metadata to an atomic instruction.
Concrete class used by the front-end to report problems and issues.
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
PointerType - C99 6.7.5.1 - Pointer Declarators.
A (possibly-)qualified type.
bool isNull() const
Return true if this QualType doesn't point to a type yet.
LangAS getAddressSpace() const
Return the address space of this type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Scope - A scope is a transient data structure that is used while parsing the program.
Encodes a location in the source.
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
bool isPointerType() const
const T * castAs() const
Member-template castAs<specific type>.
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isAtomicType() const
bool isFloatingType() const
const T * getAs() const
Member-template getAs<specific type>'.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
@ Address
A pointer to a ValueDecl.
bool Load(InterpState &S, CodePtr OpPC)
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
@ Success
Annotation was successful.
llvm::Expected< QualType > ExpectedType
llvm::StringRef getAsString(SyncScope S)
U cast(CodeGen::Address addr)
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
llvm::PointerType * VoidPtrTy
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * SizeTy
llvm::IntegerType * IntTy
int
llvm::PointerType * DefaultPtrTy