21#include "llvm/ADT/DenseMap.h"
22#include "llvm/IR/DataLayout.h"
23#include "llvm/IR/Intrinsics.h"
35 CharUnits AtomicAlign;
42 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
43 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45 assert(!lvalue.isGlobalReg());
47 if (lvalue.isSimple()) {
48 AtomicTy = lvalue.getType();
49 if (
auto *ATy = AtomicTy->
getAs<AtomicType>())
50 ValueTy = ATy->getValueType();
57 TypeInfo ValueTI =
C.getTypeInfo(ValueTy);
58 ValueSizeInBits = ValueTI.
Width;
59 ValueAlignInBits = ValueTI.
Align;
61 TypeInfo AtomicTI =
C.getTypeInfo(AtomicTy);
62 AtomicSizeInBits = AtomicTI.
Width;
63 AtomicAlignInBits = AtomicTI.
Align;
65 assert(ValueSizeInBits <= AtomicSizeInBits);
66 assert(ValueAlignInBits <= AtomicAlignInBits);
68 AtomicAlign =
C.toCharUnitsFromBits(AtomicAlignInBits);
69 ValueAlign =
C.toCharUnitsFromBits(ValueAlignInBits);
70 if (lvalue.getAlignment().isZero())
71 lvalue.setAlignment(AtomicAlign);
74 }
else if (lvalue.isBitField()) {
75 ValueTy = lvalue.getType();
76 ValueSizeInBits =
C.getTypeSize(ValueTy);
77 auto &OrigBFI = lvalue.getBitFieldInfo();
78 auto Offset = OrigBFI.Offset %
C.toBits(lvalue.getAlignment());
79 AtomicSizeInBits =
C.toBits(
80 C.toCharUnitsFromBits(Offset + OrigBFI.Size +
C.getCharWidth() - 1)
81 .alignTo(lvalue.getAlignment()));
82 llvm::Value *BitFieldPtr = lvalue.getRawBitFieldPointer(CGF);
84 (
C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
85 lvalue.getAlignment();
86 llvm::Value *StoragePtr = CGF.
Builder.CreateConstGEP1_64(
87 CGF.
Int8Ty, BitFieldPtr, OffsetInChars.getQuantity());
89 StoragePtr, CGF.
UnqualPtrTy,
"atomic_bitfield_base");
94 llvm::Type *StorageTy = CGF.
Builder.getIntNTy(AtomicSizeInBits);
95 LVal = LValue::MakeBitfield(
96 Address(StoragePtr, StorageTy, lvalue.getAlignment()), BFI,
97 lvalue.getType(), lvalue.getBaseInfo(), lvalue.getTBAAInfo());
98 AtomicTy =
C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
102 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
103 AtomicTy =
C.getConstantArrayType(
C.CharTy, Size,
nullptr,
104 ArraySizeModifier::Normal,
107 AtomicAlign = ValueAlign = lvalue.getAlignment();
108 }
else if (lvalue.isVectorElt()) {
109 ValueTy = lvalue.getType()->
castAs<VectorType>()->getElementType();
110 ValueSizeInBits =
C.getTypeSize(ValueTy);
111 AtomicTy = lvalue.getType();
112 AtomicSizeInBits =
C.getTypeSize(AtomicTy);
113 AtomicAlign = ValueAlign = lvalue.getAlignment();
116 assert(lvalue.isExtVectorElt());
117 ValueTy = lvalue.getType();
118 ValueSizeInBits =
C.getTypeSize(ValueTy);
121 lvalue.getExtVectorAddress().getElementType())
123 AtomicSizeInBits =
C.getTypeSize(AtomicTy);
124 AtomicAlign = ValueAlign = lvalue.getAlignment();
127 UseLibcall = !
C.getTargetInfo().hasBuiltinAtomic(
128 AtomicSizeInBits,
C.toBits(lvalue.getAlignment()));
131 QualType getAtomicType()
const {
return AtomicTy; }
132 QualType getValueType()
const {
return ValueTy; }
133 CharUnits getAtomicAlignment()
const {
return AtomicAlign; }
134 uint64_t getAtomicSizeInBits()
const {
return AtomicSizeInBits; }
135 uint64_t getValueSizeInBits()
const {
return ValueSizeInBits; }
137 bool shouldUseLibcall()
const {
return UseLibcall; }
138 const LValue &getAtomicLValue()
const {
return LVal; }
139 llvm::Value *getAtomicPointer()
const {
141 return LVal.emitRawPointer(CGF);
142 else if (LVal.isBitField())
143 return LVal.getRawBitFieldPointer(CGF);
144 else if (LVal.isVectorElt())
145 return LVal.getRawVectorPointer(CGF);
146 assert(LVal.isExtVectorElt());
147 return LVal.getRawExtVectorPointer(CGF);
149 Address getAtomicAddress()
const {
152 ElTy = LVal.getAddress().getElementType();
153 else if (LVal.isBitField())
154 ElTy = LVal.getBitFieldAddress().getElementType();
155 else if (LVal.isVectorElt())
156 ElTy = LVal.getVectorAddress().getElementType();
158 ElTy = LVal.getExtVectorAddress().getElementType();
159 return Address(getAtomicPointer(), ElTy, getAtomicAlignment());
162 Address getAtomicAddressAsAtomicIntPointer()
const {
163 return castToAtomicIntPointer(getAtomicAddress());
172 bool hasPadding()
const {
173 return (ValueSizeInBits != AtomicSizeInBits);
176 bool emitMemSetZeroIfNecessary()
const;
178 llvm::Value *getAtomicSizeValue()
const {
185 Address castToAtomicIntPointer(Address
Addr)
const;
190 Address convertToAtomicIntPointer(Address
Addr)
const;
193 RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
194 SourceLocation loc,
bool AsValue)
const;
196 llvm::Value *getScalarRValValueOrNull(RValue RVal)
const;
199 llvm::Value *convertRValueToInt(RValue RVal,
bool CmpXchg =
false)
const;
201 RValue ConvertToValueOrAtomic(llvm::Value *IntVal, AggValueSlot ResultSlot,
202 SourceLocation Loc,
bool AsValue,
203 bool CmpXchg =
false)
const;
206 void emitCopyIntoMemory(RValue rvalue)
const;
209 LValue projectValue()
const {
210 assert(LVal.isSimple());
211 Address addr = getAtomicAddress();
215 return LValue::MakeAddr(addr, getValueType(), CGF.
getContext(),
216 LVal.getBaseInfo(), LVal.getTBAAInfo());
221 RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
222 bool AsValue, llvm::AtomicOrdering AO,
233 std::pair<RValue, llvm::Value *>
234 EmitAtomicCompareExchange(RValue Expected, RValue Desired,
236 llvm::AtomicOrdering::SequentiallyConsistent,
237 llvm::AtomicOrdering Failure =
238 llvm::AtomicOrdering::SequentiallyConsistent,
239 bool IsWeak =
false);
244 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
245 const llvm::function_ref<RValue(RValue)> &UpdateOp,
249 void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
253 Address materializeRValue(RValue rvalue)
const;
256 Address CreateTempAlloca()
const;
258 bool requiresMemSetZero(llvm::Type *
type)
const;
262 void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
263 llvm::AtomicOrdering AO,
bool IsVolatile);
265 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO,
bool IsVolatile,
266 bool CmpXchg =
false);
268 llvm::Value *EmitAtomicCompareExchangeLibcall(
269 llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
271 llvm::AtomicOrdering::SequentiallyConsistent,
272 llvm::AtomicOrdering Failure =
273 llvm::AtomicOrdering::SequentiallyConsistent);
275 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
276 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
278 llvm::AtomicOrdering::SequentiallyConsistent,
279 llvm::AtomicOrdering Failure =
280 llvm::AtomicOrdering::SequentiallyConsistent,
281 bool IsWeak =
false);
284 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
285 const llvm::function_ref<RValue(RValue)> &UpdateOp,
288 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
289 const llvm::function_ref<RValue(RValue)> &UpdateOp,
292 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
295 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
300Address AtomicInfo::CreateTempAlloca()
const {
302 (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
304 getAtomicAlignment(),
307 if (LVal.isBitField())
309 TempAlloca, getAtomicAddress().
getType(),
310 getAtomicAddress().getElementType());
322 fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
323 fnAttrB.addAttribute(llvm::Attribute::WillReturn);
324 llvm::AttributeList fnAttrs = llvm::AttributeList::get(
325 CGF.
getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);
327 llvm::FunctionCallee fn =
335 uint64_t expectedSize) {
342bool AtomicInfo::requiresMemSetZero(llvm::Type *
type)
const {
344 if (hasPadding())
return true;
347 switch (getEvaluationKind()) {
354 AtomicSizeInBits / 2);
360 llvm_unreachable(
"bad evaluation kind");
363bool AtomicInfo::emitMemSetZeroIfNecessary()
const {
364 assert(LVal.isSimple());
365 Address addr = LVal.getAddress();
372 LVal.getAlignment().getAsAlign());
380 llvm::AtomicOrdering SuccessOrder,
381 llvm::AtomicOrdering FailureOrder,
382 llvm::SyncScope::ID
Scope) {
390 Pair->setWeak(IsWeak);
395 llvm::Value *Old = CGF.
Builder.CreateExtractValue(Pair, 0);
396 llvm::Value *Cmp = CGF.
Builder.CreateExtractValue(Pair, 1);
400 llvm::BasicBlock *StoreExpectedBB =
405 llvm::BasicBlock *ContinueBB =
410 CGF.
Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
412 CGF.
Builder.SetInsertPoint(StoreExpectedBB);
418 CGF.
Builder.CreateBr(ContinueBB);
420 CGF.
Builder.SetInsertPoint(ContinueBB);
431 llvm::Value *FailureOrderVal,
433 llvm::AtomicOrdering SuccessOrder,
434 llvm::SyncScope::ID
Scope) {
435 llvm::AtomicOrdering FailureOrder;
436 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
437 auto FOS = FO->getSExtValue();
438 if (!llvm::isValidAtomicOrderingCABI(FOS))
439 FailureOrder = llvm::AtomicOrdering::Monotonic;
441 switch ((llvm::AtomicOrderingCABI)FOS) {
442 case llvm::AtomicOrderingCABI::relaxed:
445 case llvm::AtomicOrderingCABI::release:
446 case llvm::AtomicOrderingCABI::acq_rel:
447 FailureOrder = llvm::AtomicOrdering::Monotonic;
449 case llvm::AtomicOrderingCABI::consume:
450 case llvm::AtomicOrderingCABI::acquire:
451 FailureOrder = llvm::AtomicOrdering::Acquire;
453 case llvm::AtomicOrderingCABI::seq_cst:
454 FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
462 FailureOrder,
Scope);
475 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
477 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
479 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
481 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
485 CGF.
Builder.SetInsertPoint(MonotonicBB);
487 Size, SuccessOrder, llvm::AtomicOrdering::Monotonic,
Scope);
490 CGF.
Builder.SetInsertPoint(AcquireBB);
492 llvm::AtomicOrdering::Acquire,
Scope);
495 CGF.
Builder.SetInsertPoint(SeqCstBB);
497 llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
500 CGF.
Builder.SetInsertPoint(ContBB);
510 const bool IsFP = OldVal->getType()->isFloatingPointTy();
513 llvm::Intrinsic::ID IID = (Op == AtomicExpr::AO__atomic_max_fetch ||
514 Op == AtomicExpr::AO__scoped_atomic_max_fetch)
515 ? llvm::Intrinsic::maxnum
516 : llvm::Intrinsic::minnum;
518 return Builder.CreateBinaryIntrinsic(IID, OldVal, RHS, llvm::FMFSource(),
522 llvm::CmpInst::Predicate Pred;
525 llvm_unreachable(
"Unexpected min/max operation");
526 case AtomicExpr::AO__atomic_max_fetch:
527 case AtomicExpr::AO__scoped_atomic_max_fetch:
528 Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
530 case AtomicExpr::AO__atomic_min_fetch:
531 case AtomicExpr::AO__scoped_atomic_min_fetch:
532 Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
535 llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS,
"tst");
536 return Builder.CreateSelect(Cmp, OldVal, RHS,
"newval");
541 llvm::Value *IsWeak, llvm::Value *FailureOrder,
542 uint64_t Size, llvm::AtomicOrdering Order,
543 llvm::SyncScope::ID
Scope) {
544 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
545 bool PostOpMinMax =
false;
548 switch (E->
getOp()) {
549 case AtomicExpr::AO__c11_atomic_init:
550 case AtomicExpr::AO__opencl_atomic_init:
551 llvm_unreachable(
"Already handled!");
553 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
554 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
555 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
557 FailureOrder, Size, Order,
Scope);
559 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
560 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
561 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
563 FailureOrder, Size, Order,
Scope);
565 case AtomicExpr::AO__atomic_compare_exchange:
566 case AtomicExpr::AO__atomic_compare_exchange_n:
567 case AtomicExpr::AO__scoped_atomic_compare_exchange:
568 case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {
569 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
571 Val1, Val2, FailureOrder, Size, Order,
Scope);
574 llvm::BasicBlock *StrongBB =
577 llvm::BasicBlock *ContBB =
580 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(IsWeak, WeakBB);
581 SI->addCase(CGF.
Builder.getInt1(
false), StrongBB);
583 CGF.
Builder.SetInsertPoint(StrongBB);
585 FailureOrder, Size, Order,
Scope);
588 CGF.
Builder.SetInsertPoint(WeakBB);
590 FailureOrder, Size, Order,
Scope);
593 CGF.
Builder.SetInsertPoint(ContBB);
597 case AtomicExpr::AO__c11_atomic_load:
598 case AtomicExpr::AO__opencl_atomic_load:
599 case AtomicExpr::AO__hip_atomic_load:
600 case AtomicExpr::AO__atomic_load_n:
601 case AtomicExpr::AO__atomic_load:
602 case AtomicExpr::AO__scoped_atomic_load_n:
603 case AtomicExpr::AO__scoped_atomic_load: {
605 Load->setAtomic(Order,
Scope);
613 case AtomicExpr::AO__c11_atomic_store:
614 case AtomicExpr::AO__opencl_atomic_store:
615 case AtomicExpr::AO__hip_atomic_store:
616 case AtomicExpr::AO__atomic_store:
617 case AtomicExpr::AO__atomic_store_n:
618 case AtomicExpr::AO__scoped_atomic_store:
619 case AtomicExpr::AO__scoped_atomic_store_n: {
622 Store->setAtomic(Order,
Scope);
628 case AtomicExpr::AO__c11_atomic_exchange:
629 case AtomicExpr::AO__hip_atomic_exchange:
630 case AtomicExpr::AO__opencl_atomic_exchange:
631 case AtomicExpr::AO__atomic_exchange_n:
632 case AtomicExpr::AO__atomic_exchange:
633 case AtomicExpr::AO__scoped_atomic_exchange_n:
634 case AtomicExpr::AO__scoped_atomic_exchange:
635 Op = llvm::AtomicRMWInst::Xchg;
638 case AtomicExpr::AO__atomic_add_fetch:
639 case AtomicExpr::AO__scoped_atomic_add_fetch:
641 : llvm::Instruction::Add;
643 case AtomicExpr::AO__c11_atomic_fetch_add:
644 case AtomicExpr::AO__hip_atomic_fetch_add:
645 case AtomicExpr::AO__opencl_atomic_fetch_add:
646 case AtomicExpr::AO__atomic_fetch_add:
647 case AtomicExpr::AO__scoped_atomic_fetch_add:
649 : llvm::AtomicRMWInst::Add;
652 case AtomicExpr::AO__atomic_sub_fetch:
653 case AtomicExpr::AO__scoped_atomic_sub_fetch:
655 : llvm::Instruction::Sub;
657 case AtomicExpr::AO__c11_atomic_fetch_sub:
658 case AtomicExpr::AO__hip_atomic_fetch_sub:
659 case AtomicExpr::AO__opencl_atomic_fetch_sub:
660 case AtomicExpr::AO__atomic_fetch_sub:
661 case AtomicExpr::AO__scoped_atomic_fetch_sub:
663 : llvm::AtomicRMWInst::Sub;
666 case AtomicExpr::AO__atomic_min_fetch:
667 case AtomicExpr::AO__scoped_atomic_min_fetch:
670 case AtomicExpr::AO__c11_atomic_fetch_min:
671 case AtomicExpr::AO__hip_atomic_fetch_min:
672 case AtomicExpr::AO__opencl_atomic_fetch_min:
673 case AtomicExpr::AO__atomic_fetch_min:
674 case AtomicExpr::AO__scoped_atomic_fetch_min:
676 ? llvm::AtomicRMWInst::FMin
678 ? llvm::AtomicRMWInst::Min
679 : llvm::AtomicRMWInst::UMin);
682 case AtomicExpr::AO__atomic_max_fetch:
683 case AtomicExpr::AO__scoped_atomic_max_fetch:
686 case AtomicExpr::AO__c11_atomic_fetch_max:
687 case AtomicExpr::AO__hip_atomic_fetch_max:
688 case AtomicExpr::AO__opencl_atomic_fetch_max:
689 case AtomicExpr::AO__atomic_fetch_max:
690 case AtomicExpr::AO__scoped_atomic_fetch_max:
692 ? llvm::AtomicRMWInst::FMax
694 ? llvm::AtomicRMWInst::Max
695 : llvm::AtomicRMWInst::UMax);
698 case AtomicExpr::AO__atomic_and_fetch:
699 case AtomicExpr::AO__scoped_atomic_and_fetch:
700 PostOp = llvm::Instruction::And;
702 case AtomicExpr::AO__c11_atomic_fetch_and:
703 case AtomicExpr::AO__hip_atomic_fetch_and:
704 case AtomicExpr::AO__opencl_atomic_fetch_and:
705 case AtomicExpr::AO__atomic_fetch_and:
706 case AtomicExpr::AO__scoped_atomic_fetch_and:
707 Op = llvm::AtomicRMWInst::And;
710 case AtomicExpr::AO__atomic_or_fetch:
711 case AtomicExpr::AO__scoped_atomic_or_fetch:
712 PostOp = llvm::Instruction::Or;
714 case AtomicExpr::AO__c11_atomic_fetch_or:
715 case AtomicExpr::AO__hip_atomic_fetch_or:
716 case AtomicExpr::AO__opencl_atomic_fetch_or:
717 case AtomicExpr::AO__atomic_fetch_or:
718 case AtomicExpr::AO__scoped_atomic_fetch_or:
719 Op = llvm::AtomicRMWInst::Or;
722 case AtomicExpr::AO__atomic_xor_fetch:
723 case AtomicExpr::AO__scoped_atomic_xor_fetch:
724 PostOp = llvm::Instruction::Xor;
726 case AtomicExpr::AO__c11_atomic_fetch_xor:
727 case AtomicExpr::AO__hip_atomic_fetch_xor:
728 case AtomicExpr::AO__opencl_atomic_fetch_xor:
729 case AtomicExpr::AO__atomic_fetch_xor:
730 case AtomicExpr::AO__scoped_atomic_fetch_xor:
731 Op = llvm::AtomicRMWInst::Xor;
734 case AtomicExpr::AO__atomic_nand_fetch:
735 case AtomicExpr::AO__scoped_atomic_nand_fetch:
736 PostOp = llvm::Instruction::And;
738 case AtomicExpr::AO__c11_atomic_fetch_nand:
739 case AtomicExpr::AO__atomic_fetch_nand:
740 case AtomicExpr::AO__scoped_atomic_fetch_nand:
741 Op = llvm::AtomicRMWInst::Nand;
744 case AtomicExpr::AO__atomic_test_and_set: {
745 llvm::AtomicRMWInst *RMWI =
756 case AtomicExpr::AO__atomic_clear: {
757 llvm::StoreInst *Store =
759 Store->setAtomic(Order,
Scope);
767 llvm::AtomicRMWInst *RMWI =
773 llvm::Value *Result = RMWI;
779 Result = CGF.
Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
781 if (E->
getOp() == AtomicExpr::AO__atomic_nand_fetch ||
782 E->
getOp() == AtomicExpr::AO__scoped_atomic_nand_fetch)
783 Result = CGF.
Builder.CreateNot(Result);
800 llvm::Value *IsWeak, llvm::Value *FailureOrder,
801 uint64_t Size, llvm::AtomicOrdering Order,
802 llvm::Value *
Scope) {
803 auto ScopeModel =
Expr->getScopeModel();
808 llvm::SyncScope::ID SS;
818 SS = llvm::SyncScope::System;
825 if (
auto SC = dyn_cast<llvm::ConstantInt>(
Scope)) {
836 auto Scopes = ScopeModel->getRuntimeValues();
837 llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
838 for (
auto S : Scopes)
841 llvm::BasicBlock *ContBB =
844 auto *SC = Builder.CreateIntCast(
Scope, Builder.getInt32Ty(),
false);
847 auto FallBack = ScopeModel->getFallBackValue();
848 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
849 for (
auto S : Scopes) {
852 SI->addCase(Builder.getInt32(S), B);
854 Builder.SetInsertPoint(B);
861 Builder.CreateBr(ContBB);
864 Builder.SetInsertPoint(ContBB);
873 MemTy = AT->getValueType();
874 llvm::Value *IsWeak =
nullptr, *OrderFail =
nullptr;
881 if (E->
getOp() == AtomicExpr::AO__c11_atomic_init ||
882 E->
getOp() == AtomicExpr::AO__opencl_atomic_init) {
888 auto TInfo =
getContext().getTypeInfoInChars(AtomicTy);
889 uint64_t Size = TInfo.Width.getQuantity();
890 unsigned MaxInlineWidthInBits =
getTarget().getMaxAtomicInlineWidth();
893 getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
896 bool Oversized =
getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
899 << (int)TInfo.Width.getQuantity()
904 << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.
getQuantity();
910 bool ShouldCastToIntPtrTy =
true;
912 switch (E->
getOp()) {
913 case AtomicExpr::AO__c11_atomic_init:
914 case AtomicExpr::AO__opencl_atomic_init:
915 llvm_unreachable(
"Already handled above with EmitAtomicInit!");
917 case AtomicExpr::AO__atomic_load_n:
918 case AtomicExpr::AO__scoped_atomic_load_n:
919 case AtomicExpr::AO__c11_atomic_load:
920 case AtomicExpr::AO__opencl_atomic_load:
921 case AtomicExpr::AO__hip_atomic_load:
922 case AtomicExpr::AO__atomic_test_and_set:
923 case AtomicExpr::AO__atomic_clear:
926 case AtomicExpr::AO__atomic_load:
927 case AtomicExpr::AO__scoped_atomic_load:
931 case AtomicExpr::AO__atomic_store:
932 case AtomicExpr::AO__scoped_atomic_store:
936 case AtomicExpr::AO__atomic_exchange:
937 case AtomicExpr::AO__scoped_atomic_exchange:
942 case AtomicExpr::AO__atomic_compare_exchange:
943 case AtomicExpr::AO__atomic_compare_exchange_n:
944 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
945 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
946 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
947 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
948 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
949 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
950 case AtomicExpr::AO__scoped_atomic_compare_exchange:
951 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
953 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange ||
954 E->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
959 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
960 E->
getOp() == AtomicExpr::AO__atomic_compare_exchange ||
961 E->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
962 E->
getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
966 case AtomicExpr::AO__c11_atomic_fetch_add:
967 case AtomicExpr::AO__c11_atomic_fetch_sub:
968 case AtomicExpr::AO__hip_atomic_fetch_add:
969 case AtomicExpr::AO__hip_atomic_fetch_sub:
970 case AtomicExpr::AO__opencl_atomic_fetch_add:
971 case AtomicExpr::AO__opencl_atomic_fetch_sub:
981 Val1Scalar =
Builder.CreateMul(Val1Scalar,
CGM.getSize(PointeeIncAmt));
988 case AtomicExpr::AO__atomic_fetch_add:
989 case AtomicExpr::AO__atomic_fetch_max:
990 case AtomicExpr::AO__atomic_fetch_min:
991 case AtomicExpr::AO__atomic_fetch_sub:
992 case AtomicExpr::AO__atomic_add_fetch:
993 case AtomicExpr::AO__atomic_max_fetch:
994 case AtomicExpr::AO__atomic_min_fetch:
995 case AtomicExpr::AO__atomic_sub_fetch:
996 case AtomicExpr::AO__c11_atomic_fetch_max:
997 case AtomicExpr::AO__c11_atomic_fetch_min:
998 case AtomicExpr::AO__opencl_atomic_fetch_max:
999 case AtomicExpr::AO__opencl_atomic_fetch_min:
1000 case AtomicExpr::AO__hip_atomic_fetch_max:
1001 case AtomicExpr::AO__hip_atomic_fetch_min:
1002 case AtomicExpr::AO__scoped_atomic_fetch_add:
1003 case AtomicExpr::AO__scoped_atomic_fetch_max:
1004 case AtomicExpr::AO__scoped_atomic_fetch_min:
1005 case AtomicExpr::AO__scoped_atomic_fetch_sub:
1006 case AtomicExpr::AO__scoped_atomic_add_fetch:
1007 case AtomicExpr::AO__scoped_atomic_max_fetch:
1008 case AtomicExpr::AO__scoped_atomic_min_fetch:
1009 case AtomicExpr::AO__scoped_atomic_sub_fetch:
1013 case AtomicExpr::AO__atomic_fetch_and:
1014 case AtomicExpr::AO__atomic_fetch_nand:
1015 case AtomicExpr::AO__atomic_fetch_or:
1016 case AtomicExpr::AO__atomic_fetch_xor:
1017 case AtomicExpr::AO__atomic_and_fetch:
1018 case AtomicExpr::AO__atomic_nand_fetch:
1019 case AtomicExpr::AO__atomic_or_fetch:
1020 case AtomicExpr::AO__atomic_xor_fetch:
1021 case AtomicExpr::AO__atomic_store_n:
1022 case AtomicExpr::AO__atomic_exchange_n:
1023 case AtomicExpr::AO__c11_atomic_fetch_and:
1024 case AtomicExpr::AO__c11_atomic_fetch_nand:
1025 case AtomicExpr::AO__c11_atomic_fetch_or:
1026 case AtomicExpr::AO__c11_atomic_fetch_xor:
1027 case AtomicExpr::AO__c11_atomic_store:
1028 case AtomicExpr::AO__c11_atomic_exchange:
1029 case AtomicExpr::AO__hip_atomic_fetch_and:
1030 case AtomicExpr::AO__hip_atomic_fetch_or:
1031 case AtomicExpr::AO__hip_atomic_fetch_xor:
1032 case AtomicExpr::AO__hip_atomic_store:
1033 case AtomicExpr::AO__hip_atomic_exchange:
1034 case AtomicExpr::AO__opencl_atomic_fetch_and:
1035 case AtomicExpr::AO__opencl_atomic_fetch_or:
1036 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1037 case AtomicExpr::AO__opencl_atomic_store:
1038 case AtomicExpr::AO__opencl_atomic_exchange:
1039 case AtomicExpr::AO__scoped_atomic_fetch_and:
1040 case AtomicExpr::AO__scoped_atomic_fetch_nand:
1041 case AtomicExpr::AO__scoped_atomic_fetch_or:
1042 case AtomicExpr::AO__scoped_atomic_fetch_xor:
1043 case AtomicExpr::AO__scoped_atomic_and_fetch:
1044 case AtomicExpr::AO__scoped_atomic_nand_fetch:
1045 case AtomicExpr::AO__scoped_atomic_or_fetch:
1046 case AtomicExpr::AO__scoped_atomic_xor_fetch:
1047 case AtomicExpr::AO__scoped_atomic_store_n:
1048 case AtomicExpr::AO__scoped_atomic_exchange_n:
1059 AtomicInfo Atomics(*
this, AtomicVal);
1061 if (ShouldCastToIntPtrTy) {
1062 Ptr = Atomics.castToAtomicIntPointer(Ptr);
1064 Val1 = Atomics.convertToAtomicIntPointer(Val1);
1066 Val2 = Atomics.convertToAtomicIntPointer(Val2);
1069 if (ShouldCastToIntPtrTy)
1070 Dest = Atomics.castToAtomicIntPointer(Dest);
1074 Dest = Atomics.CreateTempAlloca();
1075 if (ShouldCastToIntPtrTy)
1076 Dest = Atomics.castToAtomicIntPointer(Dest);
1079 bool PowerOf2Size = (Size & (Size - 1)) == 0;
1080 bool UseLibcall = !PowerOf2Size || (Size > 16);
1100 auto CastToGenericAddrSpace = [&](llvm::Value *
V,
QualType PT) {
1107 auto *DestType = llvm::PointerType::get(
getLLVMContext(), DestAS);
1118 std::string LibCallName;
1120 bool HaveRetTy =
false;
1121 switch (E->
getOp()) {
1122 case AtomicExpr::AO__c11_atomic_init:
1123 case AtomicExpr::AO__opencl_atomic_init:
1124 llvm_unreachable(
"Already handled!");
1131 case AtomicExpr::AO__atomic_compare_exchange:
1132 case AtomicExpr::AO__atomic_compare_exchange_n:
1133 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1134 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1135 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
1136 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
1137 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1138 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1139 case AtomicExpr::AO__scoped_atomic_compare_exchange:
1140 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
1141 LibCallName =
"__atomic_compare_exchange";
1155 case AtomicExpr::AO__atomic_exchange:
1156 case AtomicExpr::AO__atomic_exchange_n:
1157 case AtomicExpr::AO__c11_atomic_exchange:
1158 case AtomicExpr::AO__hip_atomic_exchange:
1159 case AtomicExpr::AO__opencl_atomic_exchange:
1160 case AtomicExpr::AO__scoped_atomic_exchange:
1161 case AtomicExpr::AO__scoped_atomic_exchange_n:
1162 LibCallName =
"__atomic_exchange";
1168 case AtomicExpr::AO__atomic_store:
1169 case AtomicExpr::AO__atomic_store_n:
1170 case AtomicExpr::AO__c11_atomic_store:
1171 case AtomicExpr::AO__hip_atomic_store:
1172 case AtomicExpr::AO__opencl_atomic_store:
1173 case AtomicExpr::AO__scoped_atomic_store:
1174 case AtomicExpr::AO__scoped_atomic_store_n:
1175 LibCallName =
"__atomic_store";
1183 case AtomicExpr::AO__atomic_load:
1184 case AtomicExpr::AO__atomic_load_n:
1185 case AtomicExpr::AO__c11_atomic_load:
1186 case AtomicExpr::AO__hip_atomic_load:
1187 case AtomicExpr::AO__opencl_atomic_load:
1188 case AtomicExpr::AO__scoped_atomic_load:
1189 case AtomicExpr::AO__scoped_atomic_load_n:
1190 LibCallName =
"__atomic_load";
1192 case AtomicExpr::AO__atomic_add_fetch:
1193 case AtomicExpr::AO__scoped_atomic_add_fetch:
1194 case AtomicExpr::AO__atomic_fetch_add:
1195 case AtomicExpr::AO__c11_atomic_fetch_add:
1196 case AtomicExpr::AO__hip_atomic_fetch_add:
1197 case AtomicExpr::AO__opencl_atomic_fetch_add:
1198 case AtomicExpr::AO__scoped_atomic_fetch_add:
1199 case AtomicExpr::AO__atomic_and_fetch:
1200 case AtomicExpr::AO__scoped_atomic_and_fetch:
1201 case AtomicExpr::AO__atomic_fetch_and:
1202 case AtomicExpr::AO__c11_atomic_fetch_and:
1203 case AtomicExpr::AO__hip_atomic_fetch_and:
1204 case AtomicExpr::AO__opencl_atomic_fetch_and:
1205 case AtomicExpr::AO__scoped_atomic_fetch_and:
1206 case AtomicExpr::AO__atomic_or_fetch:
1207 case AtomicExpr::AO__scoped_atomic_or_fetch:
1208 case AtomicExpr::AO__atomic_fetch_or:
1209 case AtomicExpr::AO__c11_atomic_fetch_or:
1210 case AtomicExpr::AO__hip_atomic_fetch_or:
1211 case AtomicExpr::AO__opencl_atomic_fetch_or:
1212 case AtomicExpr::AO__scoped_atomic_fetch_or:
1213 case AtomicExpr::AO__atomic_sub_fetch:
1214 case AtomicExpr::AO__scoped_atomic_sub_fetch:
1215 case AtomicExpr::AO__atomic_fetch_sub:
1216 case AtomicExpr::AO__c11_atomic_fetch_sub:
1217 case AtomicExpr::AO__hip_atomic_fetch_sub:
1218 case AtomicExpr::AO__opencl_atomic_fetch_sub:
1219 case AtomicExpr::AO__scoped_atomic_fetch_sub:
1220 case AtomicExpr::AO__atomic_xor_fetch:
1221 case AtomicExpr::AO__scoped_atomic_xor_fetch:
1222 case AtomicExpr::AO__atomic_fetch_xor:
1223 case AtomicExpr::AO__c11_atomic_fetch_xor:
1224 case AtomicExpr::AO__hip_atomic_fetch_xor:
1225 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1226 case AtomicExpr::AO__scoped_atomic_fetch_xor:
1227 case AtomicExpr::AO__atomic_nand_fetch:
1228 case AtomicExpr::AO__atomic_fetch_nand:
1229 case AtomicExpr::AO__c11_atomic_fetch_nand:
1230 case AtomicExpr::AO__scoped_atomic_fetch_nand:
1231 case AtomicExpr::AO__scoped_atomic_nand_fetch:
1232 case AtomicExpr::AO__atomic_min_fetch:
1233 case AtomicExpr::AO__atomic_fetch_min:
1234 case AtomicExpr::AO__c11_atomic_fetch_min:
1235 case AtomicExpr::AO__hip_atomic_fetch_min:
1236 case AtomicExpr::AO__opencl_atomic_fetch_min:
1237 case AtomicExpr::AO__scoped_atomic_fetch_min:
1238 case AtomicExpr::AO__scoped_atomic_min_fetch:
1239 case AtomicExpr::AO__atomic_max_fetch:
1240 case AtomicExpr::AO__atomic_fetch_max:
1241 case AtomicExpr::AO__c11_atomic_fetch_max:
1242 case AtomicExpr::AO__hip_atomic_fetch_max:
1243 case AtomicExpr::AO__opencl_atomic_fetch_max:
1244 case AtomicExpr::AO__scoped_atomic_fetch_max:
1245 case AtomicExpr::AO__scoped_atomic_max_fetch:
1246 case AtomicExpr::AO__atomic_test_and_set:
1247 case AtomicExpr::AO__atomic_clear:
1248 llvm_unreachable(
"Integral atomic operations always become atomicrmw!");
1253 std::string(
"__opencl") + StringRef(LibCallName).drop_front(1).str();
1281 bool IsStore = E->
getOp() == AtomicExpr::AO__c11_atomic_store ||
1282 E->
getOp() == AtomicExpr::AO__opencl_atomic_store ||
1283 E->
getOp() == AtomicExpr::AO__hip_atomic_store ||
1284 E->
getOp() == AtomicExpr::AO__atomic_store ||
1285 E->
getOp() == AtomicExpr::AO__atomic_store_n ||
1286 E->
getOp() == AtomicExpr::AO__scoped_atomic_store ||
1287 E->
getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
1288 E->
getOp() == AtomicExpr::AO__atomic_clear;
1289 bool IsLoad = E->
getOp() == AtomicExpr::AO__c11_atomic_load ||
1290 E->
getOp() == AtomicExpr::AO__opencl_atomic_load ||
1291 E->
getOp() == AtomicExpr::AO__hip_atomic_load ||
1292 E->
getOp() == AtomicExpr::AO__atomic_load ||
1293 E->
getOp() == AtomicExpr::AO__atomic_load_n ||
1294 E->
getOp() == AtomicExpr::AO__scoped_atomic_load ||
1295 E->
getOp() == AtomicExpr::AO__scoped_atomic_load_n;
1301 if (llvm::isValidAtomicOrderingCABI(ord))
1302 switch ((llvm::AtomicOrderingCABI)ord) {
1303 case llvm::AtomicOrderingCABI::relaxed:
1304 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1305 llvm::AtomicOrdering::Monotonic,
Scope);
1307 case llvm::AtomicOrderingCABI::consume:
1308 case llvm::AtomicOrderingCABI::acquire:
1311 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1312 llvm::AtomicOrdering::Acquire,
Scope);
1314 case llvm::AtomicOrderingCABI::release:
1317 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1318 llvm::AtomicOrdering::Release,
Scope);
1320 case llvm::AtomicOrderingCABI::acq_rel:
1321 if (IsLoad || IsStore)
1323 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1324 llvm::AtomicOrdering::AcquireRelease,
Scope);
1326 case llvm::AtomicOrderingCABI::seq_cst:
1327 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1328 llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
1341 llvm::BasicBlock *MonotonicBB =
nullptr, *AcquireBB =
nullptr,
1342 *ReleaseBB =
nullptr, *AcqRelBB =
nullptr,
1343 *SeqCstBB =
nullptr;
1349 if (!IsLoad && !IsStore)
1358 Order =
Builder.CreateIntCast(Order,
Builder.getInt32Ty(),
false);
1359 llvm::SwitchInst *SI =
Builder.CreateSwitch(Order, MonotonicBB);
1362 Builder.SetInsertPoint(MonotonicBB);
1363 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1364 llvm::AtomicOrdering::Monotonic,
Scope);
1367 Builder.SetInsertPoint(AcquireBB);
1368 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1369 llvm::AtomicOrdering::Acquire,
Scope);
1371 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
1373 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
1377 Builder.SetInsertPoint(ReleaseBB);
1378 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1379 llvm::AtomicOrdering::Release,
Scope);
1381 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::release),
1384 if (!IsLoad && !IsStore) {
1385 Builder.SetInsertPoint(AcqRelBB);
1386 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1387 llvm::AtomicOrdering::AcquireRelease,
Scope);
1389 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acq_rel),
1392 Builder.SetInsertPoint(SeqCstBB);
1393 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1394 llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
1396 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
1400 Builder.SetInsertPoint(ContBB);
1404 assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1410 llvm::IntegerType *ty =
1416 llvm::Type *Ty =
Addr.getElementType();
1418 if (SourceSizeInBits != AtomicSizeInBits) {
1419 Address Tmp = CreateTempAlloca();
1421 std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1425 return castToAtomicIntPointer(
Addr);
1428RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1429 AggValueSlot resultSlot,
1431 bool asValue)
const {
1432 if (LVal.isSimple()) {
1447 if (LVal.isBitField())
1449 LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1450 LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1451 if (LVal.isVectorElt())
1453 LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1454 LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1455 assert(LVal.isExtVectorElt());
1457 addr, LVal.getExtVectorElts(), LVal.getType(),
1458 LVal.getBaseInfo(), TBAAAccessInfo()));
1467 if (ValTy->isFloatingPointTy())
1468 return ValTy->isX86_FP80Ty() || CmpXchg;
1469 return !ValTy->isIntegerTy() && !ValTy->isPointerTy();
1472RValue AtomicInfo::ConvertToValueOrAtomic(llvm::Value *Val,
1473 AggValueSlot ResultSlot,
1474 SourceLocation Loc,
bool AsValue,
1475 bool CmpXchg)
const {
1477 assert((Val->getType()->isIntegerTy() || Val->getType()->isPointerTy() ||
1478 Val->getType()->isIEEELikeFPTy()) &&
1479 "Expected integer, pointer or floating point value when converting "
1482 (((!LVal.isBitField() ||
1483 LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1486 auto *ValTy = AsValue
1488 : getAtomicAddress().getElementType();
1490 assert((!ValTy->isIntegerTy() || Val->getType() == ValTy) &&
1491 "Different integer types.");
1494 if (llvm::CastInst::isBitCastable(Val->getType(), ValTy))
1501 bool TempIsVolatile =
false;
1507 Temp = CreateTempAlloca();
1511 Address CastTemp = castToAtomicIntPointer(Temp);
1514 return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1517void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1518 llvm::AtomicOrdering AO,
bool) {
1530llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1531 bool IsVolatile,
bool CmpXchg) {
1533 Address
Addr = getAtomicAddress();
1535 Addr = castToAtomicIntPointer(
Addr);
1537 Load->setAtomic(AO);
1541 Load->setVolatile(
true);
1550 if (!
CGM.getLangOpts().MSVolatile)
return false;
1551 AtomicInfo AI(*
this, LV);
1554 bool AtomicIsInline = !AI.shouldUseLibcall();
1559 return IsVolatile && AtomicIsInline;
1564 llvm::AtomicOrdering AO;
1567 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1569 AO = llvm::AtomicOrdering::Acquire;
1576 bool AsValue, llvm::AtomicOrdering AO,
1579 if (shouldUseLibcall()) {
1581 if (LVal.isSimple() && !ResultSlot.
isIgnored()) {
1585 TempAddr = CreateTempAlloca();
1587 EmitAtomicLoadLibcall(TempAddr.
emitRawPointer(CGF), AO, IsVolatile);
1591 return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1595 auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1603 return ConvertToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1609 llvm::AtomicOrdering AO,
bool IsVolatile,
1611 AtomicInfo Atomics(*
this, src);
1612 return Atomics.EmitAtomicLoad(resultSlot, loc,
true, AO,
1618void AtomicInfo::emitCopyIntoMemory(
RValue rvalue)
const {
1619 assert(LVal.isSimple());
1628 LVal.isVolatileQualified();
1637 emitMemSetZeroIfNecessary();
1640 LValue TempLVal = projectValue();
1653Address AtomicInfo::materializeRValue(RValue rvalue)
const {
1660 LValue TempLV = CGF.
MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1661 AtomicInfo Atomics(CGF, TempLV);
1662 Atomics.emitCopyIntoMemory(rvalue);
1663 return TempLV.getAddress();
1666llvm::Value *AtomicInfo::getScalarRValValueOrNull(RValue RVal)
const {
1667 if (RVal.
isScalar() && (!hasPadding() || !LVal.isSimple()))
1672llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal,
bool CmpXchg)
const {
1675 if (llvm::Value *
Value = getScalarRValValueOrNull(RVal)) {
1679 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1681 LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1682 if (llvm::BitCastInst::isBitCastable(
Value->getType(), InputIntTy))
1688 Address
Addr = materializeRValue(RVal);
1691 Addr = castToAtomicIntPointer(
Addr);
1695std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1696 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1697 llvm::AtomicOrdering
Success, llvm::AtomicOrdering Failure,
bool IsWeak) {
1699 Address
Addr = getAtomicAddressAsAtomicIntPointer();
1703 Inst->setVolatile(LVal.isVolatileQualified());
1704 Inst->setWeak(IsWeak);
1707 auto *PreviousVal = CGF.
Builder.CreateExtractValue(Inst, 0);
1708 auto *SuccessFailureVal = CGF.
Builder.CreateExtractValue(Inst, 1);
1709 return std::make_pair(PreviousVal, SuccessFailureVal);
1713AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1714 llvm::Value *DesiredAddr,
1716 llvm::AtomicOrdering Failure) {
1725 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(
Success))),
1728 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(Failure))),
1733 return SuccessFailureRVal.getScalarVal();
1736std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1737 RValue Expected, RValue Desired, llvm::AtomicOrdering
Success,
1738 llvm::AtomicOrdering Failure,
bool IsWeak) {
1740 if (shouldUseLibcall()) {
1742 Address ExpectedAddr = materializeRValue(Expected);
1744 llvm::Value *DesiredPtr = materializeRValue(Desired).emitRawPointer(CGF);
1745 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr,
1747 return std::make_pair(
1749 SourceLocation(),
false),
1755 auto *ExpectedVal = convertRValueToInt(Expected,
true);
1756 auto *DesiredVal = convertRValueToInt(Desired,
true);
1757 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal,
Success,
1759 return std::make_pair(
1761 SourceLocation(),
false,
1771 LValue AtomicLVal = Atomics.getAtomicLValue();
1773 if (AtomicLVal.isSimple()) {
1775 DesiredLVal = CGF.
MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1778 Address Ptr = Atomics.materializeRValue(OldRVal);
1780 if (AtomicLVal.isBitField()) {
1782 LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1783 AtomicLVal.getType(),
1784 AtomicLVal.getBaseInfo(),
1785 AtomicLVal.getTBAAInfo());
1787 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1788 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1789 AtomicLVal.getTBAAInfo());
1790 }
else if (AtomicLVal.isVectorElt()) {
1791 UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1792 AtomicLVal.getType(),
1793 AtomicLVal.getBaseInfo(),
1794 AtomicLVal.getTBAAInfo());
1795 DesiredLVal = LValue::MakeVectorElt(
1796 DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1797 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1799 assert(AtomicLVal.isExtVectorElt());
1800 UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1801 AtomicLVal.getType(),
1802 AtomicLVal.getBaseInfo(),
1803 AtomicLVal.getTBAAInfo());
1804 DesiredLVal = LValue::MakeExtVectorElt(
1805 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1806 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1811 RValue NewRVal = UpdateOp(UpRVal);
1821void AtomicInfo::EmitAtomicUpdateLibcall(
1822 llvm::AtomicOrdering AO,
const llvm::function_ref<RValue(RValue)> &UpdateOp,
1824 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1826 Address ExpectedAddr = CreateTempAlloca();
1828 EmitAtomicLoadLibcall(ExpectedAddr.
emitRawPointer(CGF), AO, IsVolatile);
1832 Address DesiredAddr = CreateTempAlloca();
1833 if ((LVal.isBitField() && BFI.
Size != ValueSizeInBits) ||
1834 requiresMemSetZero(getAtomicAddress().getElementType())) {
1838 auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1840 SourceLocation(),
false);
1845 EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);
1846 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1850void AtomicInfo::EmitAtomicUpdateOp(
1851 llvm::AtomicOrdering AO,
const llvm::function_ref<RValue(RValue)> &UpdateOp,
1853 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1856 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile,
true);
1860 auto *CurBB = CGF.
Builder.GetInsertBlock();
1862 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1864 PHI->addIncoming(OldVal, CurBB);
1865 Address NewAtomicAddr = CreateTempAlloca();
1866 Address NewAtomicIntAddr =
1868 ? castToAtomicIntPointer(NewAtomicAddr)
1871 if ((LVal.isBitField() && BFI.
Size != ValueSizeInBits) ||
1872 requiresMemSetZero(getAtomicAddress().getElementType())) {
1876 SourceLocation(),
false,
1881 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1882 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1883 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1889 LValue AtomicLVal = Atomics.getAtomicLValue();
1892 if (AtomicLVal.isBitField()) {
1894 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1895 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1896 AtomicLVal.getTBAAInfo());
1897 }
else if (AtomicLVal.isVectorElt()) {
1899 LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1900 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1901 AtomicLVal.getTBAAInfo());
1903 assert(AtomicLVal.isExtVectorElt());
1904 DesiredLVal = LValue::MakeExtVectorElt(
1905 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1906 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1913void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1914 RValue UpdateRVal,
bool IsVolatile) {
1915 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1917 Address ExpectedAddr = CreateTempAlloca();
1919 EmitAtomicLoadLibcall(ExpectedAddr.
emitRawPointer(CGF), AO, IsVolatile);
1923 Address DesiredAddr = CreateTempAlloca();
1924 if ((LVal.isBitField() && BFI.
Size != ValueSizeInBits) ||
1925 requiresMemSetZero(getAtomicAddress().getElementType())) {
1933 EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);
1934 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1938void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1940 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1943 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile,
true);
1947 auto *CurBB = CGF.
Builder.GetInsertBlock();
1949 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1951 PHI->addIncoming(OldVal, CurBB);
1952 Address NewAtomicAddr = CreateTempAlloca();
1953 Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr);
1954 if ((LVal.isBitField() && BFI.
Size != ValueSizeInBits) ||
1955 requiresMemSetZero(getAtomicAddress().getElementType())) {
1961 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1962 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1963 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1967void AtomicInfo::EmitAtomicUpdate(
1968 llvm::AtomicOrdering AO,
const llvm::function_ref<RValue(RValue)> &UpdateOp,
1970 if (shouldUseLibcall()) {
1971 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1973 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1977void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1979 if (shouldUseLibcall()) {
1980 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1982 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1989 llvm::AtomicOrdering AO;
1991 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1993 AO = llvm::AtomicOrdering::Release;
2005 llvm::AtomicOrdering AO,
bool IsVolatile,
2013 AtomicInfo atomics(*
this, dest);
2014 LValue LVal = atomics.getAtomicLValue();
2019 atomics.emitCopyIntoMemory(rvalue);
2024 if (atomics.shouldUseLibcall()) {
2026 Address srcAddr = atomics.materializeRValue(rvalue);
2043 llvm::Value *ValToStore = atomics.convertRValueToInt(rvalue);
2047 if (llvm::Value *
Value = atomics.getScalarRValValueOrNull(rvalue))
2049 Addr = atomics.castToAtomicIntPointer(
Addr);
2050 ValToStore =
Builder.CreateIntCast(ValToStore,
Addr.getElementType(),
2053 llvm::StoreInst *store =
Builder.CreateStore(ValToStore,
Addr);
2055 if (AO == llvm::AtomicOrdering::Acquire)
2056 AO = llvm::AtomicOrdering::Monotonic;
2057 else if (AO == llvm::AtomicOrdering::AcquireRelease)
2058 AO = llvm::AtomicOrdering::Release;
2061 store->setAtomic(AO);
2065 store->setVolatile(
true);
2071 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2078 llvm::AtomicOrdering
Success, llvm::AtomicOrdering Failure,
bool IsWeak,
2083 Expected.getAggregateAddress().getElementType() ==
2088 AtomicInfo Atomics(*
this, Obj);
2090 return Atomics.EmitAtomicCompareExchange(
Expected, Desired,
Success, Failure,
2094llvm::AtomicRMWInst *
2096 llvm::Value *Val, llvm::AtomicOrdering Order,
2097 llvm::SyncScope::ID SSID,
2099 llvm::AtomicRMWInst *RMW =
2100 Builder.CreateAtomicRMW(Op,
Addr, Val, Order, SSID);
2106 LValue LVal, llvm::AtomicOrdering AO,
2107 const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
bool IsVolatile) {
2108 AtomicInfo Atomics(*
this, LVal);
2109 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2113 AtomicInfo atomics(*
this, dest);
2115 switch (atomics.getEvaluationKind()) {
2131 bool Zeroed =
false;
2133 Zeroed = atomics.emitMemSetZeroIfNecessary();
2134 dest = atomics.projectValue();
2148 llvm_unreachable(
"bad evaluation kind");
Defines the clang::ASTContext interface.
static llvm::Value * EmitPostAtomicMinMax(CGBuilderTy &Builder, AtomicExpr::AtomicOp Op, bool IsSigned, llvm::Value *OldVal, llvm::Value *RHS)
Duplicate the atomic min/max operation in conventional IR for the builtin variants that return the ne...
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, Address DesiredAddr)
static Address EmitValToTemp(CodeGenFunction &CGF, Expr *E)
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, llvm::AtomicOrdering Order, llvm::SyncScope::ID Scope)
static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)
static bool shouldCastToInt(llvm::Type *ValTy, bool CmpXchg)
Return true if.
static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, KnownNonNull_t IsKnownNonNull, CodeGenFunction &CGF)
static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, uint64_t size, cir::MemOrder successOrder, cir::MemOrder failureOrder)
static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, Expr *failureOrderExpr, uint64_t size, cir::MemOrder successOrder)
static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
static QualType getPointeeType(const MemRegion *R)
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
static std::unique_ptr< AtomicScopeModel > getScopeModel(AtomicOp Op)
Get atomic scope model for the atomic op code.
QualType getValueType() const
SourceLocation getBeginLoc() const LLVM_READONLY
Expr * getOrderFail() const
CharUnits - This is an opaque type for sizes expressed in character units.
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
CharUnits getAlignment() const
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Address getAddress() const
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(Address Addr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallArgList - Type for representing both the value and type of arguments in a call.
void add(RValue rvalue, QualType type)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitAtomicInit(Expr *E, LValue lvalue)
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
void maybeAttachRangeForLoad(llvm::LoadInst *Load, QualType Ty, SourceLocation Loc)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
const TargetInfo & getTarget() const
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
CGDebugInfo * getDebugInfo()
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
const TargetCodeGenInfo & getTargetHooks() const
ASTContext & getContext() const
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System, const AtomicExpr *AE=nullptr)
Emit an atomicrmw instruction, and applying relevant metadata when applicable.
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
llvm::Type * ConvertTypeForMem(QualType T)
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
RValue EmitAtomicExpr(AtomicExpr *E)
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
RValue EmitLoadOfExtVectorElementLValue(LValue V)
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
llvm::LLVMContext & getLLVMContext()
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
This class organizes the cross-function state that is used while generating LLVM code.
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const llvm::DataLayout & getDataLayout() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
llvm::LLVMContext & getLLVMContext()
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
LValue - This represents an lvalue references.
bool isVolatileQualified() const
Address getAddress() const
TBAAAccessInfo getTBAAInfo() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
bool isVolatileQualified() const
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const
Get the syncscope used in LLVM IR.
virtual void setTargetAtomicMetadata(CodeGenFunction &CGF, llvm::Instruction &AtomicInst, const AtomicExpr *Expr=nullptr) const
Allow the target to apply other metadata to an atomic instruction.
Concrete class used by the front-end to report problems and issues.
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
PointerType - C99 6.7.5.1 - Pointer Declarators.
A (possibly-)qualified type.
bool isNull() const
Return true if this QualType doesn't point to a type yet.
LangAS getAddressSpace() const
Return the address space of this type.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Scope - A scope is a transient data structure that is used while parsing the program.
Encodes a location in the source.
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
bool isPointerType() const
const T * castAs() const
Member-template castAs<specific type>.
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isAtomicType() const
bool isFloatingType() const
const T * getAs() const
Member-template getAs<specific type>'.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Load(InterpState &S, CodePtr OpPC)
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
@ Success
Annotation was successful.
llvm::StringRef getAsString(SyncScope S)
U cast(CodeGen::Address addr)
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
llvm::PointerType * VoidPtrTy
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * SizeTy
llvm::IntegerType * IntTy
int
llvm::PointerType * UnqualPtrTy