21#include "llvm/ADT/DenseMap.h"
22#include "llvm/IR/DataLayout.h"
23#include "llvm/IR/Intrinsics.h"
24#include "llvm/IR/Operator.h"
27using namespace CodeGen;
44 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
51 ValueTy = ATy->getValueType();
58 TypeInfo ValueTI =
C.getTypeInfo(ValueTy);
59 ValueSizeInBits = ValueTI.
Width;
60 ValueAlignInBits = ValueTI.
Align;
62 TypeInfo AtomicTI =
C.getTypeInfo(AtomicTy);
63 AtomicSizeInBits = AtomicTI.
Width;
64 AtomicAlignInBits = AtomicTI.
Align;
66 assert(ValueSizeInBits <= AtomicSizeInBits);
67 assert(ValueAlignInBits <= AtomicAlignInBits);
69 AtomicAlign =
C.toCharUnitsFromBits(AtomicAlignInBits);
70 ValueAlign =
C.toCharUnitsFromBits(ValueAlignInBits);
77 ValueSizeInBits =
C.getTypeSize(ValueTy);
79 auto Offset = OrigBFI.Offset %
C.toBits(lvalue.
getAlignment());
80 AtomicSizeInBits =
C.toBits(
81 C.toCharUnitsFromBits(Offset + OrigBFI.Size +
C.getCharWidth() - 1)
85 (
C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.
getAlignment()) *
87 llvm::Value *StoragePtr = CGF.
Builder.CreateConstGEP1_64(
88 CGF.
Int8Ty, BitFieldPtr, OffsetInChars.getQuantity());
90 StoragePtr, CGF.
UnqualPtrTy,
"atomic_bitfield_base");
95 llvm::Type *StorageTy = CGF.
Builder.getIntNTy(AtomicSizeInBits);
96 LVal = LValue::MakeBitfield(
99 AtomicTy =
C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
103 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
111 ValueSizeInBits =
C.getTypeSize(ValueTy);
113 AtomicSizeInBits =
C.getTypeSize(AtomicTy);
119 ValueSizeInBits =
C.getTypeSize(ValueTy);
121 lvalue.
getType(), cast<llvm::FixedVectorType>(
124 AtomicSizeInBits =
C.getTypeSize(AtomicTy);
128 UseLibcall = !
C.getTargetInfo().hasBuiltinAtomic(
132 QualType getAtomicType()
const {
return AtomicTy; }
133 QualType getValueType()
const {
return ValueTy; }
134 CharUnits getAtomicAlignment()
const {
return AtomicAlign; }
135 uint64_t getAtomicSizeInBits()
const {
return AtomicSizeInBits; }
136 uint64_t getValueSizeInBits()
const {
return ValueSizeInBits; }
138 bool shouldUseLibcall()
const {
return UseLibcall; }
139 const LValue &getAtomicLValue()
const {
return LVal; }
140 llvm::Value *getAtomicPointer()
const {
150 Address getAtomicAddress()
const {
160 return Address(getAtomicPointer(), ElTy, getAtomicAlignment());
163 Address getAtomicAddressAsAtomicIntPointer()
const {
164 return castToAtomicIntPointer(getAtomicAddress());
173 bool hasPadding()
const {
174 return (ValueSizeInBits != AtomicSizeInBits);
177 bool emitMemSetZeroIfNecessary()
const;
179 llvm::Value *getAtomicSizeValue()
const {
198 llvm::Value *convertRValueToInt(
RValue RVal)
const;
200 RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
205 void emitCopyIntoMemory(
RValue rvalue)
const;
208 LValue projectValue()
const {
210 Address addr = getAtomicAddress();
214 return LValue::MakeAddr(addr, getValueType(), CGF.
getContext(),
221 bool AsValue, llvm::AtomicOrdering AO,
232 std::pair<RValue, llvm::Value *>
234 llvm::AtomicOrdering Success =
235 llvm::AtomicOrdering::SequentiallyConsistent,
236 llvm::AtomicOrdering Failure =
237 llvm::AtomicOrdering::SequentiallyConsistent,
238 bool IsWeak =
false);
243 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
248 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
RValue UpdateRVal,
255 Address CreateTempAlloca()
const;
257 bool requiresMemSetZero(llvm::Type *
type)
const;
261 void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
262 llvm::AtomicOrdering AO,
bool IsVolatile);
264 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO,
bool IsVolatile);
266 llvm::Value *EmitAtomicCompareExchangeLibcall(
267 llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
268 llvm::AtomicOrdering Success =
269 llvm::AtomicOrdering::SequentiallyConsistent,
270 llvm::AtomicOrdering Failure =
271 llvm::AtomicOrdering::SequentiallyConsistent);
273 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
274 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
275 llvm::AtomicOrdering Success =
276 llvm::AtomicOrdering::SequentiallyConsistent,
277 llvm::AtomicOrdering Failure =
278 llvm::AtomicOrdering::SequentiallyConsistent,
279 bool IsWeak =
false);
282 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
286 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
290 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
RValue UpdateRVal,
293 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
RValue UpdateRal,
298Address AtomicInfo::CreateTempAlloca()
const {
300 (LVal.
isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
302 getAtomicAlignment(),
307 TempAlloca, getAtomicAddress().getType(),
308 getAtomicAddress().getElementType());
320 fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
321 fnAttrB.addAttribute(llvm::Attribute::WillReturn);
322 llvm::AttributeList fnAttrs = llvm::AttributeList::get(
323 CGF.
getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);
325 llvm::FunctionCallee fn =
333 uint64_t expectedSize) {
340bool AtomicInfo::requiresMemSetZero(llvm::Type *
type)
const {
342 if (hasPadding())
return true;
345 switch (getEvaluationKind()) {
352 AtomicSizeInBits / 2);
358 llvm_unreachable(
"bad evaluation kind");
361bool AtomicInfo::emitMemSetZeroIfNecessary()
const {
378 llvm::AtomicOrdering SuccessOrder,
379 llvm::AtomicOrdering FailureOrder,
380 llvm::SyncScope::ID
Scope) {
389 Pair->setWeak(IsWeak);
393 llvm::Value *Old = CGF.
Builder.CreateExtractValue(Pair, 0);
394 llvm::Value *Cmp = CGF.
Builder.CreateExtractValue(Pair, 1);
398 llvm::BasicBlock *StoreExpectedBB =
403 llvm::BasicBlock *ContinueBB =
408 CGF.
Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
410 CGF.
Builder.SetInsertPoint(StoreExpectedBB);
414 CGF.
Builder.CreateBr(ContinueBB);
416 CGF.
Builder.SetInsertPoint(ContinueBB);
427 llvm::Value *FailureOrderVal,
429 llvm::AtomicOrdering SuccessOrder,
430 llvm::SyncScope::ID
Scope) {
431 llvm::AtomicOrdering FailureOrder;
432 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
433 auto FOS = FO->getSExtValue();
434 if (!llvm::isValidAtomicOrderingCABI(FOS))
435 FailureOrder = llvm::AtomicOrdering::Monotonic;
437 switch ((llvm::AtomicOrderingCABI)FOS) {
438 case llvm::AtomicOrderingCABI::relaxed:
441 case llvm::AtomicOrderingCABI::release:
442 case llvm::AtomicOrderingCABI::acq_rel:
443 FailureOrder = llvm::AtomicOrdering::Monotonic;
445 case llvm::AtomicOrderingCABI::consume:
446 case llvm::AtomicOrderingCABI::acquire:
447 FailureOrder = llvm::AtomicOrdering::Acquire;
449 case llvm::AtomicOrderingCABI::seq_cst:
450 FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
458 FailureOrder,
Scope);
471 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
473 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
475 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
477 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
481 CGF.
Builder.SetInsertPoint(MonotonicBB);
483 Size, SuccessOrder, llvm::AtomicOrdering::Monotonic,
Scope);
486 CGF.
Builder.SetInsertPoint(AcquireBB);
488 llvm::AtomicOrdering::Acquire,
Scope);
491 CGF.
Builder.SetInsertPoint(SeqCstBB);
493 llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
496 CGF.
Builder.SetInsertPoint(ContBB);
506 llvm::CmpInst::Predicate Pred;
509 llvm_unreachable(
"Unexpected min/max operation");
510 case AtomicExpr::AO__atomic_max_fetch:
511 Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
513 case AtomicExpr::AO__atomic_min_fetch:
514 Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
517 llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS,
"tst");
518 return Builder.CreateSelect(Cmp, OldVal, RHS,
"newval");
523 llvm::Value *IsWeak, llvm::Value *FailureOrder,
524 uint64_t Size, llvm::AtomicOrdering Order,
525 llvm::SyncScope::ID
Scope) {
526 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
527 bool PostOpMinMax =
false;
530 switch (E->
getOp()) {
531 case AtomicExpr::AO__c11_atomic_init:
532 case AtomicExpr::AO__opencl_atomic_init:
533 llvm_unreachable(
"Already handled!");
535 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
536 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
537 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
539 FailureOrder, Size, Order,
Scope);
541 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
542 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
543 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
545 FailureOrder, Size, Order,
Scope);
547 case AtomicExpr::AO__atomic_compare_exchange:
548 case AtomicExpr::AO__atomic_compare_exchange_n: {
549 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
551 Val1, Val2, FailureOrder, Size, Order,
Scope);
554 llvm::BasicBlock *StrongBB =
557 llvm::BasicBlock *ContBB =
560 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(IsWeak, WeakBB);
561 SI->addCase(CGF.
Builder.getInt1(
false), StrongBB);
563 CGF.
Builder.SetInsertPoint(StrongBB);
565 FailureOrder, Size, Order,
Scope);
568 CGF.
Builder.SetInsertPoint(WeakBB);
570 FailureOrder, Size, Order,
Scope);
573 CGF.
Builder.SetInsertPoint(ContBB);
577 case AtomicExpr::AO__c11_atomic_load:
578 case AtomicExpr::AO__opencl_atomic_load:
579 case AtomicExpr::AO__hip_atomic_load:
580 case AtomicExpr::AO__atomic_load_n:
581 case AtomicExpr::AO__atomic_load: {
583 Load->setAtomic(Order,
Scope);
589 case AtomicExpr::AO__c11_atomic_store:
590 case AtomicExpr::AO__opencl_atomic_store:
591 case AtomicExpr::AO__hip_atomic_store:
592 case AtomicExpr::AO__atomic_store:
593 case AtomicExpr::AO__atomic_store_n: {
596 Store->setAtomic(Order,
Scope);
601 case AtomicExpr::AO__c11_atomic_exchange:
602 case AtomicExpr::AO__hip_atomic_exchange:
603 case AtomicExpr::AO__opencl_atomic_exchange:
604 case AtomicExpr::AO__atomic_exchange_n:
605 case AtomicExpr::AO__atomic_exchange:
606 Op = llvm::AtomicRMWInst::Xchg;
609 case AtomicExpr::AO__atomic_add_fetch:
611 : llvm::Instruction::Add;
613 case AtomicExpr::AO__c11_atomic_fetch_add:
614 case AtomicExpr::AO__hip_atomic_fetch_add:
615 case AtomicExpr::AO__opencl_atomic_fetch_add:
616 case AtomicExpr::AO__atomic_fetch_add:
618 : llvm::AtomicRMWInst::Add;
621 case AtomicExpr::AO__atomic_sub_fetch:
623 : llvm::Instruction::Sub;
625 case AtomicExpr::AO__c11_atomic_fetch_sub:
626 case AtomicExpr::AO__hip_atomic_fetch_sub:
627 case AtomicExpr::AO__opencl_atomic_fetch_sub:
628 case AtomicExpr::AO__atomic_fetch_sub:
630 : llvm::AtomicRMWInst::Sub;
633 case AtomicExpr::AO__atomic_min_fetch:
636 case AtomicExpr::AO__c11_atomic_fetch_min:
637 case AtomicExpr::AO__hip_atomic_fetch_min:
638 case AtomicExpr::AO__opencl_atomic_fetch_min:
639 case AtomicExpr::AO__atomic_fetch_min:
641 ? llvm::AtomicRMWInst::FMin
643 ? llvm::AtomicRMWInst::Min
644 : llvm::AtomicRMWInst::UMin);
647 case AtomicExpr::AO__atomic_max_fetch:
650 case AtomicExpr::AO__c11_atomic_fetch_max:
651 case AtomicExpr::AO__hip_atomic_fetch_max:
652 case AtomicExpr::AO__opencl_atomic_fetch_max:
653 case AtomicExpr::AO__atomic_fetch_max:
655 ? llvm::AtomicRMWInst::FMax
657 ? llvm::AtomicRMWInst::Max
658 : llvm::AtomicRMWInst::UMax);
661 case AtomicExpr::AO__atomic_and_fetch:
662 PostOp = llvm::Instruction::And;
664 case AtomicExpr::AO__c11_atomic_fetch_and:
665 case AtomicExpr::AO__hip_atomic_fetch_and:
666 case AtomicExpr::AO__opencl_atomic_fetch_and:
667 case AtomicExpr::AO__atomic_fetch_and:
668 Op = llvm::AtomicRMWInst::And;
671 case AtomicExpr::AO__atomic_or_fetch:
672 PostOp = llvm::Instruction::Or;
674 case AtomicExpr::AO__c11_atomic_fetch_or:
675 case AtomicExpr::AO__hip_atomic_fetch_or:
676 case AtomicExpr::AO__opencl_atomic_fetch_or:
677 case AtomicExpr::AO__atomic_fetch_or:
678 Op = llvm::AtomicRMWInst::Or;
681 case AtomicExpr::AO__atomic_xor_fetch:
682 PostOp = llvm::Instruction::Xor;
684 case AtomicExpr::AO__c11_atomic_fetch_xor:
685 case AtomicExpr::AO__hip_atomic_fetch_xor:
686 case AtomicExpr::AO__opencl_atomic_fetch_xor:
687 case AtomicExpr::AO__atomic_fetch_xor:
688 Op = llvm::AtomicRMWInst::Xor;
691 case AtomicExpr::AO__atomic_nand_fetch:
692 PostOp = llvm::Instruction::And;
694 case AtomicExpr::AO__c11_atomic_fetch_nand:
695 case AtomicExpr::AO__atomic_fetch_nand:
696 Op = llvm::AtomicRMWInst::Nand;
701 llvm::AtomicRMWInst *RMWI =
707 llvm::Value *Result = RMWI;
713 Result = CGF.
Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
715 if (E->
getOp() == AtomicExpr::AO__atomic_nand_fetch)
716 Result = CGF.
Builder.CreateNot(Result);
732 llvm::Value *IsWeak, llvm::Value *FailureOrder,
733 uint64_t Size, llvm::AtomicOrdering Order,
734 llvm::Value *
Scope) {
735 auto ScopeModel =
Expr->getScopeModel();
746 if (
auto SC = dyn_cast<llvm::ConstantInt>(
Scope)) {
757 auto Scopes = ScopeModel->getRuntimeValues();
758 llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
759 for (
auto S : Scopes)
762 llvm::BasicBlock *ContBB =
765 auto *SC = Builder.CreateIntCast(
Scope, Builder.getInt32Ty(),
false);
768 auto FallBack = ScopeModel->getFallBackValue();
769 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
770 for (
auto S : Scopes) {
773 SI->addCase(Builder.getInt32(S), B);
775 Builder.SetInsertPoint(B);
782 Builder.CreateBr(ContBB);
785 Builder.SetInsertPoint(ContBB);
790 bool UseOptimizedLibcall, llvm::Value *Val,
QualType ValTy,
792 if (UseOptimizedLibcall) {
798 llvm::Type *ITy = llvm::IntegerType::get(CGF.
getLLVMContext(), SizeInBits);
815 MemTy = AT->getValueType();
816 llvm::Value *IsWeak =
nullptr, *OrderFail =
nullptr;
823 if (E->
getOp() == AtomicExpr::AO__c11_atomic_init ||
824 E->
getOp() == AtomicExpr::AO__opencl_atomic_init) {
835 bool Misaligned = (Ptr.
getAlignment() % TInfo.Width) != 0;
836 bool UseLibcall = Misaligned | Oversized;
837 bool ShouldCastToIntPtrTy =
true;
846 << (
int)TInfo.Width.getQuantity()
852 << (
int)TInfo.Width.getQuantity() << (
int)MaxInlineWidth.
getQuantity();
859 switch (E->
getOp()) {
860 case AtomicExpr::AO__c11_atomic_init:
861 case AtomicExpr::AO__opencl_atomic_init:
862 llvm_unreachable(
"Already handled above with EmitAtomicInit!");
864 case AtomicExpr::AO__c11_atomic_load:
865 case AtomicExpr::AO__opencl_atomic_load:
866 case AtomicExpr::AO__hip_atomic_load:
867 case AtomicExpr::AO__atomic_load_n:
870 case AtomicExpr::AO__atomic_load:
874 case AtomicExpr::AO__atomic_store:
878 case AtomicExpr::AO__atomic_exchange:
883 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
884 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
885 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
886 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
887 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
888 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
889 case AtomicExpr::AO__atomic_compare_exchange_n:
890 case AtomicExpr::AO__atomic_compare_exchange:
892 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange)
897 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
898 E->
getOp() == AtomicExpr::AO__atomic_compare_exchange)
902 case AtomicExpr::AO__c11_atomic_fetch_add:
903 case AtomicExpr::AO__c11_atomic_fetch_sub:
904 case AtomicExpr::AO__hip_atomic_fetch_add:
905 case AtomicExpr::AO__hip_atomic_fetch_sub:
906 case AtomicExpr::AO__opencl_atomic_fetch_add:
907 case AtomicExpr::AO__opencl_atomic_fetch_sub:
924 case AtomicExpr::AO__atomic_fetch_add:
925 case AtomicExpr::AO__atomic_fetch_max:
926 case AtomicExpr::AO__atomic_fetch_min:
927 case AtomicExpr::AO__atomic_fetch_sub:
928 case AtomicExpr::AO__atomic_add_fetch:
929 case AtomicExpr::AO__atomic_max_fetch:
930 case AtomicExpr::AO__atomic_min_fetch:
931 case AtomicExpr::AO__atomic_sub_fetch:
932 case AtomicExpr::AO__c11_atomic_fetch_max:
933 case AtomicExpr::AO__c11_atomic_fetch_min:
934 case AtomicExpr::AO__opencl_atomic_fetch_max:
935 case AtomicExpr::AO__opencl_atomic_fetch_min:
936 case AtomicExpr::AO__hip_atomic_fetch_max:
937 case AtomicExpr::AO__hip_atomic_fetch_min:
941 case AtomicExpr::AO__c11_atomic_store:
942 case AtomicExpr::AO__c11_atomic_exchange:
943 case AtomicExpr::AO__opencl_atomic_store:
944 case AtomicExpr::AO__hip_atomic_store:
945 case AtomicExpr::AO__opencl_atomic_exchange:
946 case AtomicExpr::AO__hip_atomic_exchange:
947 case AtomicExpr::AO__atomic_store_n:
948 case AtomicExpr::AO__atomic_exchange_n:
949 case AtomicExpr::AO__c11_atomic_fetch_and:
950 case AtomicExpr::AO__c11_atomic_fetch_or:
951 case AtomicExpr::AO__c11_atomic_fetch_xor:
952 case AtomicExpr::AO__c11_atomic_fetch_nand:
953 case AtomicExpr::AO__opencl_atomic_fetch_and:
954 case AtomicExpr::AO__opencl_atomic_fetch_or:
955 case AtomicExpr::AO__opencl_atomic_fetch_xor:
956 case AtomicExpr::AO__atomic_fetch_and:
957 case AtomicExpr::AO__hip_atomic_fetch_and:
958 case AtomicExpr::AO__atomic_fetch_or:
959 case AtomicExpr::AO__hip_atomic_fetch_or:
960 case AtomicExpr::AO__atomic_fetch_xor:
961 case AtomicExpr::AO__hip_atomic_fetch_xor:
962 case AtomicExpr::AO__atomic_fetch_nand:
963 case AtomicExpr::AO__atomic_and_fetch:
964 case AtomicExpr::AO__atomic_or_fetch:
965 case AtomicExpr::AO__atomic_xor_fetch:
966 case AtomicExpr::AO__atomic_nand_fetch:
977 AtomicInfo Atomics(*
this, AtomicVal);
979 if (ShouldCastToIntPtrTy) {
980 Ptr = Atomics.castToAtomicIntPointer(Ptr);
982 Val1 = Atomics.convertToAtomicIntPointer(Val1);
984 Val2 = Atomics.convertToAtomicIntPointer(Val2);
987 if (ShouldCastToIntPtrTy)
988 Dest = Atomics.castToAtomicIntPointer(Dest);
992 Dest = Atomics.CreateTempAlloca();
993 if (ShouldCastToIntPtrTy)
994 Dest = Atomics.castToAtomicIntPointer(Dest);
999 bool UseOptimizedLibcall =
false;
1000 switch (E->
getOp()) {
1001 case AtomicExpr::AO__c11_atomic_init:
1002 case AtomicExpr::AO__opencl_atomic_init:
1003 llvm_unreachable(
"Already handled above with EmitAtomicInit!");
1005 case AtomicExpr::AO__c11_atomic_fetch_add:
1006 case AtomicExpr::AO__opencl_atomic_fetch_add:
1007 case AtomicExpr::AO__atomic_fetch_add:
1008 case AtomicExpr::AO__hip_atomic_fetch_add:
1009 case AtomicExpr::AO__c11_atomic_fetch_and:
1010 case AtomicExpr::AO__opencl_atomic_fetch_and:
1011 case AtomicExpr::AO__hip_atomic_fetch_and:
1012 case AtomicExpr::AO__atomic_fetch_and:
1013 case AtomicExpr::AO__c11_atomic_fetch_or:
1014 case AtomicExpr::AO__opencl_atomic_fetch_or:
1015 case AtomicExpr::AO__hip_atomic_fetch_or:
1016 case AtomicExpr::AO__atomic_fetch_or:
1017 case AtomicExpr::AO__c11_atomic_fetch_nand:
1018 case AtomicExpr::AO__atomic_fetch_nand:
1019 case AtomicExpr::AO__c11_atomic_fetch_sub:
1020 case AtomicExpr::AO__opencl_atomic_fetch_sub:
1021 case AtomicExpr::AO__atomic_fetch_sub:
1022 case AtomicExpr::AO__hip_atomic_fetch_sub:
1023 case AtomicExpr::AO__c11_atomic_fetch_xor:
1024 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1025 case AtomicExpr::AO__opencl_atomic_fetch_min:
1026 case AtomicExpr::AO__opencl_atomic_fetch_max:
1027 case AtomicExpr::AO__atomic_fetch_xor:
1028 case AtomicExpr::AO__hip_atomic_fetch_xor:
1029 case AtomicExpr::AO__c11_atomic_fetch_max:
1030 case AtomicExpr::AO__c11_atomic_fetch_min:
1031 case AtomicExpr::AO__atomic_add_fetch:
1032 case AtomicExpr::AO__atomic_and_fetch:
1033 case AtomicExpr::AO__atomic_nand_fetch:
1034 case AtomicExpr::AO__atomic_or_fetch:
1035 case AtomicExpr::AO__atomic_sub_fetch:
1036 case AtomicExpr::AO__atomic_xor_fetch:
1037 case AtomicExpr::AO__atomic_fetch_max:
1038 case AtomicExpr::AO__hip_atomic_fetch_max:
1039 case AtomicExpr::AO__atomic_fetch_min:
1040 case AtomicExpr::AO__hip_atomic_fetch_min:
1041 case AtomicExpr::AO__atomic_max_fetch:
1042 case AtomicExpr::AO__atomic_min_fetch:
1044 UseOptimizedLibcall =
true;
1047 case AtomicExpr::AO__atomic_load:
1048 case AtomicExpr::AO__atomic_store:
1049 case AtomicExpr::AO__atomic_exchange:
1050 case AtomicExpr::AO__atomic_compare_exchange:
1056 case AtomicExpr::AO__c11_atomic_load:
1057 case AtomicExpr::AO__c11_atomic_store:
1058 case AtomicExpr::AO__c11_atomic_exchange:
1059 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1060 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1061 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
1062 case AtomicExpr::AO__opencl_atomic_load:
1063 case AtomicExpr::AO__hip_atomic_load:
1064 case AtomicExpr::AO__opencl_atomic_store:
1065 case AtomicExpr::AO__hip_atomic_store:
1066 case AtomicExpr::AO__opencl_atomic_exchange:
1067 case AtomicExpr::AO__hip_atomic_exchange:
1068 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1069 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
1070 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1071 case AtomicExpr::AO__atomic_load_n:
1072 case AtomicExpr::AO__atomic_store_n:
1073 case AtomicExpr::AO__atomic_exchange_n:
1074 case AtomicExpr::AO__atomic_compare_exchange_n:
1077 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
1078 UseOptimizedLibcall =
true;
1083 if (!UseOptimizedLibcall) {
1091 auto CastToGenericAddrSpace = [&](llvm::Value *
V,
QualType PT) {
1094 auto AS = PT->castAs<
PointerType>()->getPointeeType().getAddressSpace();
1098 auto *DestType = llvm::PointerType::get(
getLLVMContext(), DestAS);
1108 std::string LibCallName;
1112 bool HaveRetTy =
false;
1113 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
1114 bool PostOpMinMax =
false;
1115 switch (E->
getOp()) {
1116 case AtomicExpr::AO__c11_atomic_init:
1117 case AtomicExpr::AO__opencl_atomic_init:
1118 llvm_unreachable(
"Already handled!");
1127 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1128 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1129 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1130 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
1131 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1132 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
1133 case AtomicExpr::AO__atomic_compare_exchange:
1134 case AtomicExpr::AO__atomic_compare_exchange_n:
1135 LibCallName =
"__atomic_compare_exchange";
1149 case AtomicExpr::AO__c11_atomic_exchange:
1150 case AtomicExpr::AO__opencl_atomic_exchange:
1151 case AtomicExpr::AO__atomic_exchange_n:
1152 case AtomicExpr::AO__atomic_exchange:
1153 case AtomicExpr::AO__hip_atomic_exchange:
1154 LibCallName =
"__atomic_exchange";
1160 case AtomicExpr::AO__c11_atomic_store:
1161 case AtomicExpr::AO__opencl_atomic_store:
1162 case AtomicExpr::AO__hip_atomic_store:
1163 case AtomicExpr::AO__atomic_store:
1164 case AtomicExpr::AO__atomic_store_n:
1165 LibCallName =
"__atomic_store";
1173 case AtomicExpr::AO__c11_atomic_load:
1174 case AtomicExpr::AO__opencl_atomic_load:
1175 case AtomicExpr::AO__hip_atomic_load:
1176 case AtomicExpr::AO__atomic_load:
1177 case AtomicExpr::AO__atomic_load_n:
1178 LibCallName =
"__atomic_load";
1182 case AtomicExpr::AO__atomic_add_fetch:
1183 PostOp = llvm::Instruction::Add;
1185 case AtomicExpr::AO__c11_atomic_fetch_add:
1186 case AtomicExpr::AO__opencl_atomic_fetch_add:
1187 case AtomicExpr::AO__atomic_fetch_add:
1188 case AtomicExpr::AO__hip_atomic_fetch_add:
1189 LibCallName =
"__atomic_fetch_add";
1195 case AtomicExpr::AO__atomic_and_fetch:
1196 PostOp = llvm::Instruction::And;
1198 case AtomicExpr::AO__c11_atomic_fetch_and:
1199 case AtomicExpr::AO__opencl_atomic_fetch_and:
1200 case AtomicExpr::AO__hip_atomic_fetch_and:
1201 case AtomicExpr::AO__atomic_fetch_and:
1202 LibCallName =
"__atomic_fetch_and";
1208 case AtomicExpr::AO__atomic_or_fetch:
1209 PostOp = llvm::Instruction::Or;
1211 case AtomicExpr::AO__c11_atomic_fetch_or:
1212 case AtomicExpr::AO__opencl_atomic_fetch_or:
1213 case AtomicExpr::AO__hip_atomic_fetch_or:
1214 case AtomicExpr::AO__atomic_fetch_or:
1215 LibCallName =
"__atomic_fetch_or";
1221 case AtomicExpr::AO__atomic_sub_fetch:
1222 PostOp = llvm::Instruction::Sub;
1224 case AtomicExpr::AO__c11_atomic_fetch_sub:
1225 case AtomicExpr::AO__opencl_atomic_fetch_sub:
1226 case AtomicExpr::AO__hip_atomic_fetch_sub:
1227 case AtomicExpr::AO__atomic_fetch_sub:
1228 LibCallName =
"__atomic_fetch_sub";
1234 case AtomicExpr::AO__atomic_xor_fetch:
1235 PostOp = llvm::Instruction::Xor;
1237 case AtomicExpr::AO__c11_atomic_fetch_xor:
1238 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1239 case AtomicExpr::AO__hip_atomic_fetch_xor:
1240 case AtomicExpr::AO__atomic_fetch_xor:
1241 LibCallName =
"__atomic_fetch_xor";
1245 case AtomicExpr::AO__atomic_min_fetch:
1246 PostOpMinMax =
true;
1248 case AtomicExpr::AO__c11_atomic_fetch_min:
1249 case AtomicExpr::AO__atomic_fetch_min:
1250 case AtomicExpr::AO__hip_atomic_fetch_min:
1251 case AtomicExpr::AO__opencl_atomic_fetch_min:
1253 ?
"__atomic_fetch_min"
1254 :
"__atomic_fetch_umin";
1258 case AtomicExpr::AO__atomic_max_fetch:
1259 PostOpMinMax =
true;
1261 case AtomicExpr::AO__c11_atomic_fetch_max:
1262 case AtomicExpr::AO__atomic_fetch_max:
1263 case AtomicExpr::AO__hip_atomic_fetch_max:
1264 case AtomicExpr::AO__opencl_atomic_fetch_max:
1266 ?
"__atomic_fetch_max"
1267 :
"__atomic_fetch_umax";
1273 case AtomicExpr::AO__atomic_nand_fetch:
1274 PostOp = llvm::Instruction::And;
1276 case AtomicExpr::AO__c11_atomic_fetch_nand:
1277 case AtomicExpr::AO__atomic_fetch_nand:
1278 LibCallName =
"__atomic_fetch_nand";
1285 LibCallName = std::string(
"__opencl") +
1286 StringRef(LibCallName).drop_front(1).str();
1290 if (UseOptimizedLibcall)
1291 LibCallName +=
"_" + llvm::utostr(Size);
1294 if (UseOptimizedLibcall) {
1314 assert(UseOptimizedLibcall || (!PostOp && !PostOpMinMax));
1326 llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1330 }
else if (PostOp) {
1331 llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1332 ResVal =
Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1334 if (E->
getOp() == AtomicExpr::AO__atomic_nand_fetch)
1335 ResVal =
Builder.CreateNot(ResVal);
1347 bool IsStore = E->
getOp() == AtomicExpr::AO__c11_atomic_store ||
1348 E->
getOp() == AtomicExpr::AO__opencl_atomic_store ||
1349 E->
getOp() == AtomicExpr::AO__hip_atomic_store ||
1350 E->
getOp() == AtomicExpr::AO__atomic_store ||
1351 E->
getOp() == AtomicExpr::AO__atomic_store_n;
1352 bool IsLoad = E->
getOp() == AtomicExpr::AO__c11_atomic_load ||
1353 E->
getOp() == AtomicExpr::AO__opencl_atomic_load ||
1354 E->
getOp() == AtomicExpr::AO__hip_atomic_load ||
1355 E->
getOp() == AtomicExpr::AO__atomic_load ||
1356 E->
getOp() == AtomicExpr::AO__atomic_load_n;
1358 if (isa<llvm::ConstantInt>(Order)) {
1359 auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1362 if (llvm::isValidAtomicOrderingCABI(ord))
1363 switch ((llvm::AtomicOrderingCABI)ord) {
1364 case llvm::AtomicOrderingCABI::relaxed:
1365 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1366 llvm::AtomicOrdering::Monotonic,
Scope);
1368 case llvm::AtomicOrderingCABI::consume:
1369 case llvm::AtomicOrderingCABI::acquire:
1372 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1373 llvm::AtomicOrdering::Acquire,
Scope);
1375 case llvm::AtomicOrderingCABI::release:
1378 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1379 llvm::AtomicOrdering::Release,
Scope);
1381 case llvm::AtomicOrderingCABI::acq_rel:
1382 if (IsLoad || IsStore)
1384 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1385 llvm::AtomicOrdering::AcquireRelease,
Scope);
1387 case llvm::AtomicOrderingCABI::seq_cst:
1388 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1389 llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
1402 llvm::BasicBlock *MonotonicBB =
nullptr, *AcquireBB =
nullptr,
1403 *ReleaseBB =
nullptr, *AcqRelBB =
nullptr,
1404 *SeqCstBB =
nullptr;
1410 if (!IsLoad && !IsStore)
1419 Order =
Builder.CreateIntCast(Order,
Builder.getInt32Ty(),
false);
1420 llvm::SwitchInst *SI =
Builder.CreateSwitch(Order, MonotonicBB);
1423 Builder.SetInsertPoint(MonotonicBB);
1424 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1425 llvm::AtomicOrdering::Monotonic,
Scope);
1428 Builder.SetInsertPoint(AcquireBB);
1429 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1430 llvm::AtomicOrdering::Acquire,
Scope);
1432 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
1434 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
1438 Builder.SetInsertPoint(ReleaseBB);
1439 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1440 llvm::AtomicOrdering::Release,
Scope);
1442 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::release),
1445 if (!IsLoad && !IsStore) {
1446 Builder.SetInsertPoint(AcqRelBB);
1447 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1448 llvm::AtomicOrdering::AcquireRelease,
Scope);
1450 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acq_rel),
1453 Builder.SetInsertPoint(SeqCstBB);
1454 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1455 llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
1457 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
1461 Builder.SetInsertPoint(ContBB);
1465 assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1471 llvm::IntegerType *ty =
1476Address AtomicInfo::convertToAtomicIntPointer(
Address Addr)
const {
1479 if (SourceSizeInBits != AtomicSizeInBits) {
1480 Address Tmp = CreateTempAlloca();
1482 std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1486 return castToAtomicIntPointer(Addr);
1492 bool asValue)
const {
1522RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1525 bool AsValue)
const {
1527 assert(IntVal->getType()->isIntegerTy() &&
"Expected integer value");
1533 auto *ValTy = AsValue
1535 : getAtomicAddress().getElementType();
1536 if (ValTy->isIntegerTy()) {
1537 assert(IntVal->getType() == ValTy &&
"Different integer types.");
1539 }
else if (ValTy->isPointerTy())
1541 else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1548 bool TempIsVolatile =
false;
1554 Temp = CreateTempAlloca();
1558 Address CastTemp = castToAtomicIntPointer(Temp);
1560 ->setVolatile(TempIsVolatile);
1562 return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1565void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1566 llvm::AtomicOrdering AO,
bool) {
1578llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1581 Address Addr = getAtomicAddressAsAtomicIntPointer();
1583 Load->setAtomic(AO);
1587 Load->setVolatile(
true);
1597 AtomicInfo AI(*
this, LV);
1600 bool AtomicIsInline = !AI.shouldUseLibcall();
1605 return IsVolatile && AtomicIsInline;
1610 llvm::AtomicOrdering AO;
1613 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1615 AO = llvm::AtomicOrdering::Acquire;
1622 bool AsValue, llvm::AtomicOrdering AO,
1625 if (shouldUseLibcall()) {
1631 TempAddr = CreateTempAlloca();
1633 EmitAtomicLoadLibcall(TempAddr.
getPointer(), AO, IsVolatile);
1637 return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1641 auto *
Load = EmitAtomicLoadOp(AO, IsVolatile);
1649 return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1655 llvm::AtomicOrdering AO,
bool IsVolatile,
1657 AtomicInfo Atomics(*
this, src);
1658 return Atomics.EmitAtomicLoad(resultSlot, loc,
true, AO,
1664void AtomicInfo::emitCopyIntoMemory(
RValue rvalue)
const {
1683 emitMemSetZeroIfNecessary();
1686 LValue TempLVal = projectValue();
1707 AtomicInfo Atomics(CGF, TempLV);
1708 Atomics.emitCopyIntoMemory(rvalue);
1712llvm::Value *AtomicInfo::convertRValueToInt(
RValue RVal)
const {
1720 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1722 LVal.
isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1725 else if (llvm::BitCastInst::isBitCastable(
Value->
getType(), InputIntTy))
1731 Address Addr = materializeRValue(RVal);
1734 Addr = castToAtomicIntPointer(Addr);
1738std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1739 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1740 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure,
bool IsWeak) {
1742 Address Addr = getAtomicAddressAsAtomicIntPointer();
1744 ExpectedVal, DesiredVal,
1748 Inst->setWeak(IsWeak);
1751 auto *PreviousVal = CGF.
Builder.CreateExtractValue(Inst, 0);
1752 auto *SuccessFailureVal = CGF.
Builder.CreateExtractValue(Inst, 1);
1753 return std::make_pair(PreviousVal, SuccessFailureVal);
1757AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1758 llvm::Value *DesiredAddr,
1759 llvm::AtomicOrdering Success,
1760 llvm::AtomicOrdering Failure) {
1769 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(Success))),
1772 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(Failure))),
1777 return SuccessFailureRVal.getScalarVal();
1780std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1782 llvm::AtomicOrdering Failure,
bool IsWeak) {
1784 if (shouldUseLibcall()) {
1787 Address DesiredAddr = materializeRValue(Desired);
1788 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1791 return std::make_pair(
1799 auto *ExpectedVal = convertRValueToInt(
Expected);
1800 auto *DesiredVal = convertRValueToInt(Desired);
1801 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1803 return std::make_pair(
1814 LValue AtomicLVal = Atomics.getAtomicLValue();
1821 Address Ptr = Atomics.materializeRValue(OldRVal);
1854 RValue NewRVal = UpdateOp(UpRVal);
1864void AtomicInfo::EmitAtomicUpdateLibcall(
1865 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1867 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1869 Address ExpectedAddr = CreateTempAlloca();
1871 EmitAtomicLoadLibcall(ExpectedAddr.
getPointer(), AO, IsVolatile);
1875 Address DesiredAddr = CreateTempAlloca();
1877 requiresMemSetZero(getAtomicAddress().getElementType())) {
1881 auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1886 EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1889 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1893void AtomicInfo::EmitAtomicUpdateOp(
1894 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1896 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1899 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
1903 auto *CurBB = CGF.
Builder.GetInsertBlock();
1905 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1907 PHI->addIncoming(OldVal, CurBB);
1908 Address NewAtomicAddr = CreateTempAlloca();
1909 Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr);
1911 requiresMemSetZero(getAtomicAddress().getElementType())) {
1919 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1920 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1921 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1927 LValue AtomicLVal = Atomics.getAtomicLValue();
1951void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1952 RValue UpdateRVal,
bool IsVolatile) {
1953 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1955 Address ExpectedAddr = CreateTempAlloca();
1957 EmitAtomicLoadLibcall(ExpectedAddr.
getPointer(), AO, IsVolatile);
1961 Address DesiredAddr = CreateTempAlloca();
1963 requiresMemSetZero(getAtomicAddress().getElementType())) {
1969 EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1972 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1976void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
RValue UpdateRVal,
1978 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1981 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
1985 auto *CurBB = CGF.
Builder.GetInsertBlock();
1987 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1989 PHI->addIncoming(OldVal, CurBB);
1990 Address NewAtomicAddr = CreateTempAlloca();
1991 Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr);
1993 requiresMemSetZero(getAtomicAddress().getElementType())) {
1999 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
2000 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
2001 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
2005void AtomicInfo::EmitAtomicUpdate(
2006 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
2008 if (shouldUseLibcall()) {
2009 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
2011 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
2015void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO,
RValue UpdateRVal,
2017 if (shouldUseLibcall()) {
2018 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
2020 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
2027 llvm::AtomicOrdering AO;
2029 AO = llvm::AtomicOrdering::SequentiallyConsistent;
2031 AO = llvm::AtomicOrdering::Release;
2043 llvm::AtomicOrdering AO,
bool IsVolatile,
2051 AtomicInfo atomics(*
this, dest);
2052 LValue LVal = atomics.getAtomicLValue();
2057 atomics.emitCopyIntoMemory(rvalue);
2062 if (atomics.shouldUseLibcall()) {
2064 Address srcAddr = atomics.materializeRValue(rvalue);
2080 llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
2083 Address addr = atomics.castToAtomicIntPointer(atomics.getAtomicAddress());
2084 intValue =
Builder.CreateIntCast(
2088 if (AO == llvm::AtomicOrdering::Acquire)
2089 AO = llvm::AtomicOrdering::Monotonic;
2090 else if (AO == llvm::AtomicOrdering::AcquireRelease)
2091 AO = llvm::AtomicOrdering::Release;
2094 store->setAtomic(AO);
2098 store->setVolatile(
true);
2104 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2111 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure,
bool IsWeak,
2116 Expected.getAggregateAddress().getElementType() ==
2121 AtomicInfo Atomics(*
this, Obj);
2123 return Atomics.EmitAtomicCompareExchange(
Expected, Desired, Success, Failure,
2128 LValue LVal, llvm::AtomicOrdering AO,
2129 const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
bool IsVolatile) {
2130 AtomicInfo Atomics(*
this, LVal);
2131 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2135 AtomicInfo atomics(*
this, dest);
2137 switch (atomics.getEvaluationKind()) {
2153 bool Zeroed =
false;
2155 Zeroed = atomics.emitMemSetZeroIfNecessary();
2156 dest = atomics.projectValue();
2170 llvm_unreachable(
"bad evaluation kind");
Defines the clang::ASTContext interface.
static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
static llvm::Value * EmitPostAtomicMinMax(CGBuilderTy &Builder, AtomicExpr::AtomicOp Op, bool IsSigned, llvm::Value *OldVal, llvm::Value *RHS)
Duplicate the atomic min/max operation in conventional IR for the builtin variants that return the ne...
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, Address DesiredAddr)
static Address EmitValToTemp(CodeGenFunction &CGF, Expr *E)
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, llvm::AtomicOrdering Order, llvm::SyncScope::ID Scope)
static void AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args, bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy, SourceLocation Loc, CharUnits SizeInChars)
static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *FailureOrderVal, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope)
Given an ordering required on success, emit all possible cmpxchg instructions to cope with the provid...
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::AtomicOrdering FailureOrder, llvm::SyncScope::ID Scope)
CodeGenFunction::ComplexPairTy ComplexPairTy
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
TypeInfoChars getTypeInfoInChars(const Type *T) const
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
QualType getIntPtrType() const
Return a type compatible with "intptr_t" (C99 7.18.1.4), as defined by the target.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
static std::unique_ptr< AtomicScopeModel > getScopeModel(AtomicOp Op)
Get atomic scope model for the atomic op code.
QualType getValueType() const
SourceLocation getBeginLoc() const LLVM_READONLY
Expr * getOrderFail() const
CharUnits - This is an opaque type for sizes expressed in character units.
bool isZero() const
isZero - Test whether the quantity equals zero.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
CharUnits getAlignment() const
Return the alignment of this pointer.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
llvm::Value * getPointer() const
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Address getAddress() const
static AggValueSlot forLValue(const LValue &LV, CodeGenFunction &CGF, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
llvm::AtomicRMWInst * CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, llvm::Value *Ptr, llvm::Value *Val, llvm::AtomicOrdering Ordering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(llvm::Value *Ptr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallArgList - Type for representing both the value and type of arguments in a call.
void add(RValue rvalue, QualType type)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke, bool IsMustTail, SourceLocation Loc)
EmitCall - Generate a call of the given function, expecting the given result type,...
llvm::Type * ConvertTypeForMem(QualType T)
void EmitAtomicInit(Expr *E, LValue lvalue)
const TargetInfo & getTarget() const
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
RValue EmitLoadOfExtVectorElementLValue(LValue V)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
const TargetCodeGenInfo & getTargetHooks() const
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
ASTContext & getContext() const
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
bool LValueIsSuitableForInlineAtomic(LValue Src)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
This class organizes the cross-function state that is used while generating LLVM code.
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const llvm::DataLayout & getDataLayout() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
llvm::LLVMContext & getLLVMContext()
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
LValue - This represents an lvalue references.
llvm::Constant * getExtVectorElts() const
static LValue MakeExtVectorElt(Address vecAddress, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
void setAlignment(CharUnits A)
llvm::Value * getBitFieldPointer() const
bool isVolatileQualified() const
CharUnits getAlignment() const
Address getAddress(CodeGenFunction &CGF) const
llvm::Value * getVectorPointer() const
bool isExtVectorElt() const
llvm::Value * getVectorIdx() const
LValueBaseInfo getBaseInfo() const
llvm::Value * getPointer(CodeGenFunction &CGF) const
llvm::Value * getExtVectorPointer() const
const CGBitFieldInfo & getBitFieldInfo() const
TBAAAccessInfo getTBAAInfo() const
Address getVectorAddress() const
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Address getExtVectorAddress() const
Address getBitFieldAddress() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
static RValue getAggregate(Address addr, bool isVolatile=false)
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
bool isVolatileQualified() const
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const
Get the syncscope used in LLVM IR.
virtual llvm::Value * performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, llvm::Value *V, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
Perform address space cast of an expression of pointer type.
Concrete class used by the front-end to report problems and issues.
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
PointerType - C99 6.7.5.1 - Pointer Declarators.
A (possibly-)qualified type.
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Scope - A scope is a transient data structure that is used while parsing the program.
Encodes a location in the source.
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
bool isPointerType() const
const T * castAs() const
Member-template castAs<specific type>.
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isAtomicType() const
bool isFloatingType() const
const T * getAs() const
Member-template getAs<specific type>'.
Represents a GCC generic vector type.
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Load(InterpState &S, CodePtr OpPC)
@ C
Languages that the frontend can parse and compile.
llvm::StringRef getAsString(SyncScope S)
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
llvm::PointerType * VoidPtrTy
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * SizeTy
llvm::IntegerType * IntTy
int
llvm::PointerType * UnqualPtrTy