21#include "llvm/ADT/DenseMap.h"
22#include "llvm/IR/DataLayout.h"
23#include "llvm/IR/Intrinsics.h"
24#include "llvm/IR/Operator.h"
27using namespace CodeGen;
44 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
51 ValueTy = ATy->getValueType();
58 TypeInfo ValueTI =
C.getTypeInfo(ValueTy);
59 ValueSizeInBits = ValueTI.
Width;
60 ValueAlignInBits = ValueTI.
Align;
62 TypeInfo AtomicTI =
C.getTypeInfo(AtomicTy);
63 AtomicSizeInBits = AtomicTI.
Width;
64 AtomicAlignInBits = AtomicTI.
Align;
66 assert(ValueSizeInBits <= AtomicSizeInBits);
67 assert(ValueAlignInBits <= AtomicAlignInBits);
69 AtomicAlign =
C.toCharUnitsFromBits(AtomicAlignInBits);
70 ValueAlign =
C.toCharUnitsFromBits(ValueAlignInBits);
77 ValueSizeInBits =
C.getTypeSize(ValueTy);
80 AtomicSizeInBits =
C.toBits(
81 C.toCharUnitsFromBits(
Offset + OrigBFI.Size +
C.getCharWidth() - 1)
85 (
C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.
getAlignment()) *
87 VoidPtrAddr = CGF.
Builder.CreateConstGEP1_64(
88 CGF.
Int8Ty, VoidPtrAddr, OffsetInChars.getQuantity());
89 llvm::Type *IntTy = CGF.
Builder.getIntNTy(AtomicSizeInBits);
91 VoidPtrAddr, IntTy->getPointerTo(),
"atomic_bitfield_base");
99 AtomicTy =
C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
103 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
111 ValueSizeInBits =
C.getTypeSize(ValueTy);
113 AtomicSizeInBits =
C.getTypeSize(AtomicTy);
119 ValueSizeInBits =
C.getTypeSize(ValueTy);
121 lvalue.
getType(), cast<llvm::FixedVectorType>(
124 AtomicSizeInBits =
C.getTypeSize(AtomicTy);
128 UseLibcall = !
C.getTargetInfo().hasBuiltinAtomic(
132 QualType getAtomicType()
const {
return AtomicTy; }
133 QualType getValueType()
const {
return ValueTy; }
134 CharUnits getAtomicAlignment()
const {
return AtomicAlign; }
135 uint64_t getAtomicSizeInBits()
const {
return AtomicSizeInBits; }
136 uint64_t getValueSizeInBits()
const {
return ValueSizeInBits; }
138 bool shouldUseLibcall()
const {
return UseLibcall; }
139 const LValue &getAtomicLValue()
const {
return LVal; }
140 llvm::Value *getAtomicPointer()
const {
150 Address getAtomicAddress()
const {
160 return Address(getAtomicPointer(), ElTy, getAtomicAlignment());
163 Address getAtomicAddressAsAtomicIntPointer()
const {
164 return emitCastToAtomicIntPointer(getAtomicAddress());
173 bool hasPadding()
const {
174 return (ValueSizeInBits != AtomicSizeInBits);
177 bool emitMemSetZeroIfNecessary()
const;
179 llvm::Value *getAtomicSizeValue()
const {
198 llvm::Value *convertRValueToInt(
RValue RVal)
const;
200 RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
205 void emitCopyIntoMemory(
RValue rvalue)
const;
208 LValue projectValue()
const {
210 Address addr = getAtomicAddress();
214 return LValue::MakeAddr(addr, getValueType(), CGF.
getContext(),
221 bool AsValue, llvm::AtomicOrdering AO,
232 std::pair<RValue, llvm::Value *>
234 llvm::AtomicOrdering Success =
235 llvm::AtomicOrdering::SequentiallyConsistent,
236 llvm::AtomicOrdering Failure =
237 llvm::AtomicOrdering::SequentiallyConsistent,
238 bool IsWeak =
false);
243 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
248 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
RValue UpdateRVal,
255 Address CreateTempAlloca()
const;
257 bool requiresMemSetZero(llvm::Type *
type)
const;
261 void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
262 llvm::AtomicOrdering AO,
bool IsVolatile);
264 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO,
bool IsVolatile);
266 llvm::Value *EmitAtomicCompareExchangeLibcall(
267 llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
268 llvm::AtomicOrdering Success =
269 llvm::AtomicOrdering::SequentiallyConsistent,
270 llvm::AtomicOrdering Failure =
271 llvm::AtomicOrdering::SequentiallyConsistent);
273 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
274 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
275 llvm::AtomicOrdering Success =
276 llvm::AtomicOrdering::SequentiallyConsistent,
277 llvm::AtomicOrdering Failure =
278 llvm::AtomicOrdering::SequentiallyConsistent,
279 bool IsWeak =
false);
282 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
286 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
290 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
RValue UpdateRVal,
293 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
RValue UpdateRal,
298Address AtomicInfo::CreateTempAlloca()
const {
300 (LVal.
isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
302 getAtomicAlignment(),
307 TempAlloca, getAtomicAddress().getType(),
308 getAtomicAddress().getElementType());
320 fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
321 fnAttrB.addAttribute(llvm::Attribute::WillReturn);
322 llvm::AttributeList fnAttrs = llvm::AttributeList::get(
323 CGF.
getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);
325 llvm::FunctionCallee fn =
333 uint64_t expectedSize) {
340bool AtomicInfo::requiresMemSetZero(llvm::Type *
type)
const {
342 if (hasPadding())
return true;
345 switch (getEvaluationKind()) {
352 AtomicSizeInBits / 2);
358 llvm_unreachable(
"bad evaluation kind");
361bool AtomicInfo::emitMemSetZeroIfNecessary()
const {
378 llvm::AtomicOrdering SuccessOrder,
379 llvm::AtomicOrdering FailureOrder,
380 llvm::SyncScope::ID
Scope) {
389 Pair->setWeak(IsWeak);
393 llvm::Value *Old = CGF.
Builder.CreateExtractValue(Pair, 0);
394 llvm::Value *Cmp = CGF.
Builder.CreateExtractValue(Pair, 1);
398 llvm::BasicBlock *StoreExpectedBB =
403 llvm::BasicBlock *ContinueBB =
408 CGF.
Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
410 CGF.
Builder.SetInsertPoint(StoreExpectedBB);
414 CGF.
Builder.CreateBr(ContinueBB);
416 CGF.
Builder.SetInsertPoint(ContinueBB);
427 llvm::Value *FailureOrderVal,
429 llvm::AtomicOrdering SuccessOrder,
430 llvm::SyncScope::ID
Scope) {
431 llvm::AtomicOrdering FailureOrder;
432 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
433 auto FOS = FO->getSExtValue();
434 if (!llvm::isValidAtomicOrderingCABI(FOS))
435 FailureOrder = llvm::AtomicOrdering::Monotonic;
437 switch ((llvm::AtomicOrderingCABI)FOS) {
438 case llvm::AtomicOrderingCABI::relaxed:
441 case llvm::AtomicOrderingCABI::release:
442 case llvm::AtomicOrderingCABI::acq_rel:
443 FailureOrder = llvm::AtomicOrdering::Monotonic;
445 case llvm::AtomicOrderingCABI::consume:
446 case llvm::AtomicOrderingCABI::acquire:
447 FailureOrder = llvm::AtomicOrdering::Acquire;
449 case llvm::AtomicOrderingCABI::seq_cst:
450 FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
458 FailureOrder,
Scope);
471 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
473 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
475 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
477 SI->addCase(CGF.
Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
481 CGF.
Builder.SetInsertPoint(MonotonicBB);
483 Size, SuccessOrder, llvm::AtomicOrdering::Monotonic,
Scope);
486 CGF.
Builder.SetInsertPoint(AcquireBB);
488 llvm::AtomicOrdering::Acquire,
Scope);
491 CGF.
Builder.SetInsertPoint(SeqCstBB);
493 llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
496 CGF.
Builder.SetInsertPoint(ContBB);
506 llvm::CmpInst::Predicate Pred;
509 llvm_unreachable(
"Unexpected min/max operation");
510 case AtomicExpr::AO__atomic_max_fetch:
511 Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
513 case AtomicExpr::AO__atomic_min_fetch:
514 Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
517 llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS,
"tst");
518 return Builder.CreateSelect(Cmp, OldVal, RHS,
"newval");
523 llvm::Value *IsWeak, llvm::Value *FailureOrder,
524 uint64_t Size, llvm::AtomicOrdering Order,
525 llvm::SyncScope::ID
Scope) {
526 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
527 bool PostOpMinMax =
false;
530 switch (E->
getOp()) {
531 case AtomicExpr::AO__c11_atomic_init:
532 case AtomicExpr::AO__opencl_atomic_init:
533 llvm_unreachable(
"Already handled!");
535 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
536 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
537 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
539 FailureOrder, Size, Order,
Scope);
541 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
542 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
543 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
545 FailureOrder, Size, Order,
Scope);
547 case AtomicExpr::AO__atomic_compare_exchange:
548 case AtomicExpr::AO__atomic_compare_exchange_n: {
549 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
551 Val1, Val2, FailureOrder, Size, Order,
Scope);
554 llvm::BasicBlock *StrongBB =
557 llvm::BasicBlock *ContBB =
560 llvm::SwitchInst *SI = CGF.
Builder.CreateSwitch(IsWeak, WeakBB);
561 SI->addCase(CGF.
Builder.getInt1(
false), StrongBB);
563 CGF.
Builder.SetInsertPoint(StrongBB);
565 FailureOrder, Size, Order,
Scope);
568 CGF.
Builder.SetInsertPoint(WeakBB);
570 FailureOrder, Size, Order,
Scope);
573 CGF.
Builder.SetInsertPoint(ContBB);
577 case AtomicExpr::AO__c11_atomic_load:
578 case AtomicExpr::AO__opencl_atomic_load:
579 case AtomicExpr::AO__hip_atomic_load:
580 case AtomicExpr::AO__atomic_load_n:
581 case AtomicExpr::AO__atomic_load: {
583 Load->setAtomic(Order,
Scope);
589 case AtomicExpr::AO__c11_atomic_store:
590 case AtomicExpr::AO__opencl_atomic_store:
591 case AtomicExpr::AO__hip_atomic_store:
592 case AtomicExpr::AO__atomic_store:
593 case AtomicExpr::AO__atomic_store_n: {
596 Store->setAtomic(Order,
Scope);
601 case AtomicExpr::AO__c11_atomic_exchange:
602 case AtomicExpr::AO__hip_atomic_exchange:
603 case AtomicExpr::AO__opencl_atomic_exchange:
604 case AtomicExpr::AO__atomic_exchange_n:
605 case AtomicExpr::AO__atomic_exchange:
606 Op = llvm::AtomicRMWInst::Xchg;
609 case AtomicExpr::AO__atomic_add_fetch:
611 : llvm::Instruction::Add;
613 case AtomicExpr::AO__c11_atomic_fetch_add:
614 case AtomicExpr::AO__hip_atomic_fetch_add:
615 case AtomicExpr::AO__opencl_atomic_fetch_add:
616 case AtomicExpr::AO__atomic_fetch_add:
618 : llvm::AtomicRMWInst::Add;
621 case AtomicExpr::AO__atomic_sub_fetch:
623 : llvm::Instruction::Sub;
625 case AtomicExpr::AO__c11_atomic_fetch_sub:
626 case AtomicExpr::AO__opencl_atomic_fetch_sub:
627 case AtomicExpr::AO__atomic_fetch_sub:
629 : llvm::AtomicRMWInst::Sub;
632 case AtomicExpr::AO__atomic_min_fetch:
635 case AtomicExpr::AO__c11_atomic_fetch_min:
636 case AtomicExpr::AO__hip_atomic_fetch_min:
637 case AtomicExpr::AO__opencl_atomic_fetch_min:
638 case AtomicExpr::AO__atomic_fetch_min:
640 : llvm::AtomicRMWInst::UMin;
643 case AtomicExpr::AO__atomic_max_fetch:
646 case AtomicExpr::AO__c11_atomic_fetch_max:
647 case AtomicExpr::AO__hip_atomic_fetch_max:
648 case AtomicExpr::AO__opencl_atomic_fetch_max:
649 case AtomicExpr::AO__atomic_fetch_max:
651 : llvm::AtomicRMWInst::UMax;
654 case AtomicExpr::AO__atomic_and_fetch:
655 PostOp = llvm::Instruction::And;
657 case AtomicExpr::AO__c11_atomic_fetch_and:
658 case AtomicExpr::AO__hip_atomic_fetch_and:
659 case AtomicExpr::AO__opencl_atomic_fetch_and:
660 case AtomicExpr::AO__atomic_fetch_and:
661 Op = llvm::AtomicRMWInst::And;
664 case AtomicExpr::AO__atomic_or_fetch:
665 PostOp = llvm::Instruction::Or;
667 case AtomicExpr::AO__c11_atomic_fetch_or:
668 case AtomicExpr::AO__hip_atomic_fetch_or:
669 case AtomicExpr::AO__opencl_atomic_fetch_or:
670 case AtomicExpr::AO__atomic_fetch_or:
671 Op = llvm::AtomicRMWInst::Or;
674 case AtomicExpr::AO__atomic_xor_fetch:
675 PostOp = llvm::Instruction::Xor;
677 case AtomicExpr::AO__c11_atomic_fetch_xor:
678 case AtomicExpr::AO__hip_atomic_fetch_xor:
679 case AtomicExpr::AO__opencl_atomic_fetch_xor:
680 case AtomicExpr::AO__atomic_fetch_xor:
681 Op = llvm::AtomicRMWInst::Xor;
684 case AtomicExpr::AO__atomic_nand_fetch:
685 PostOp = llvm::Instruction::And;
687 case AtomicExpr::AO__c11_atomic_fetch_nand:
688 case AtomicExpr::AO__atomic_fetch_nand:
689 Op = llvm::AtomicRMWInst::Nand;
694 llvm::AtomicRMWInst *RMWI =
700 llvm::Value *Result = RMWI;
706 Result = CGF.
Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
708 if (E->
getOp() == AtomicExpr::AO__atomic_nand_fetch)
709 Result = CGF.
Builder.CreateNot(Result);
725 llvm::Value *IsWeak, llvm::Value *FailureOrder,
726 uint64_t Size, llvm::AtomicOrdering Order,
727 llvm::Value *
Scope) {
728 auto ScopeModel =
Expr->getScopeModel();
739 if (
auto SC = dyn_cast<llvm::ConstantInt>(
Scope)) {
750 auto Scopes = ScopeModel->getRuntimeValues();
751 llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
752 for (
auto S : Scopes)
755 llvm::BasicBlock *ContBB =
758 auto *SC = Builder.CreateIntCast(
Scope, Builder.getInt32Ty(),
false);
761 auto FallBack = ScopeModel->getFallBackValue();
762 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
763 for (
auto S : Scopes) {
766 SI->addCase(Builder.getInt32(S), B);
768 Builder.SetInsertPoint(B);
775 Builder.CreateBr(ContBB);
778 Builder.SetInsertPoint(ContBB);
783 bool UseOptimizedLibcall, llvm::Value *Val,
QualType ValTy,
785 if (UseOptimizedLibcall) {
791 llvm::Type *ITy = llvm::IntegerType::get(CGF.
getLLVMContext(), SizeInBits);
810 MemTy = AT->getValueType();
811 llvm::Value *IsWeak =
nullptr, *OrderFail =
nullptr;
818 if (E->
getOp() == AtomicExpr::AO__c11_atomic_init ||
819 E->
getOp() == AtomicExpr::AO__opencl_atomic_init) {
830 bool Misaligned = (Ptr.
getAlignment() % TInfo.Width) != 0;
831 bool UseLibcall = Misaligned | Oversized;
832 bool ShouldCastToIntPtrTy =
true;
841 << (
int)TInfo.Width.getQuantity()
847 << (
int)TInfo.Width.getQuantity() << (
int)MaxInlineWidth.
getQuantity();
854 switch (E->
getOp()) {
855 case AtomicExpr::AO__c11_atomic_init:
856 case AtomicExpr::AO__opencl_atomic_init:
857 llvm_unreachable(
"Already handled above with EmitAtomicInit!");
859 case AtomicExpr::AO__c11_atomic_load:
860 case AtomicExpr::AO__opencl_atomic_load:
861 case AtomicExpr::AO__hip_atomic_load:
862 case AtomicExpr::AO__atomic_load_n:
865 case AtomicExpr::AO__atomic_load:
869 case AtomicExpr::AO__atomic_store:
873 case AtomicExpr::AO__atomic_exchange:
878 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
879 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
880 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
881 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
882 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
883 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
884 case AtomicExpr::AO__atomic_compare_exchange_n:
885 case AtomicExpr::AO__atomic_compare_exchange:
887 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange)
892 if (E->
getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
893 E->
getOp() == AtomicExpr::AO__atomic_compare_exchange)
897 case AtomicExpr::AO__c11_atomic_fetch_add:
898 case AtomicExpr::AO__c11_atomic_fetch_sub:
899 case AtomicExpr::AO__hip_atomic_fetch_add:
900 case AtomicExpr::AO__opencl_atomic_fetch_add:
901 case AtomicExpr::AO__opencl_atomic_fetch_sub:
918 case AtomicExpr::AO__atomic_fetch_add:
919 case AtomicExpr::AO__atomic_fetch_sub:
920 case AtomicExpr::AO__atomic_add_fetch:
921 case AtomicExpr::AO__atomic_sub_fetch:
925 case AtomicExpr::AO__c11_atomic_store:
926 case AtomicExpr::AO__c11_atomic_exchange:
927 case AtomicExpr::AO__opencl_atomic_store:
928 case AtomicExpr::AO__hip_atomic_store:
929 case AtomicExpr::AO__opencl_atomic_exchange:
930 case AtomicExpr::AO__hip_atomic_exchange:
931 case AtomicExpr::AO__atomic_store_n:
932 case AtomicExpr::AO__atomic_exchange_n:
933 case AtomicExpr::AO__c11_atomic_fetch_and:
934 case AtomicExpr::AO__c11_atomic_fetch_or:
935 case AtomicExpr::AO__c11_atomic_fetch_xor:
936 case AtomicExpr::AO__c11_atomic_fetch_nand:
937 case AtomicExpr::AO__c11_atomic_fetch_max:
938 case AtomicExpr::AO__c11_atomic_fetch_min:
939 case AtomicExpr::AO__opencl_atomic_fetch_and:
940 case AtomicExpr::AO__opencl_atomic_fetch_or:
941 case AtomicExpr::AO__opencl_atomic_fetch_xor:
942 case AtomicExpr::AO__opencl_atomic_fetch_min:
943 case AtomicExpr::AO__opencl_atomic_fetch_max:
944 case AtomicExpr::AO__atomic_fetch_and:
945 case AtomicExpr::AO__hip_atomic_fetch_and:
946 case AtomicExpr::AO__atomic_fetch_or:
947 case AtomicExpr::AO__hip_atomic_fetch_or:
948 case AtomicExpr::AO__atomic_fetch_xor:
949 case AtomicExpr::AO__hip_atomic_fetch_xor:
950 case AtomicExpr::AO__atomic_fetch_nand:
951 case AtomicExpr::AO__atomic_and_fetch:
952 case AtomicExpr::AO__atomic_or_fetch:
953 case AtomicExpr::AO__atomic_xor_fetch:
954 case AtomicExpr::AO__atomic_nand_fetch:
955 case AtomicExpr::AO__atomic_max_fetch:
956 case AtomicExpr::AO__atomic_min_fetch:
957 case AtomicExpr::AO__atomic_fetch_max:
958 case AtomicExpr::AO__hip_atomic_fetch_max:
959 case AtomicExpr::AO__atomic_fetch_min:
960 case AtomicExpr::AO__hip_atomic_fetch_min:
971 AtomicInfo Atomics(*
this, AtomicVal);
973 if (ShouldCastToIntPtrTy) {
974 Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
976 Val1 = Atomics.convertToAtomicIntPointer(Val1);
978 Val2 = Atomics.convertToAtomicIntPointer(Val2);
981 if (ShouldCastToIntPtrTy)
982 Dest = Atomics.emitCastToAtomicIntPointer(Dest);
986 Dest = Atomics.CreateTempAlloca();
987 if (ShouldCastToIntPtrTy)
988 Dest = Atomics.emitCastToAtomicIntPointer(Dest);
993 bool UseOptimizedLibcall =
false;
994 switch (E->
getOp()) {
995 case AtomicExpr::AO__c11_atomic_init:
996 case AtomicExpr::AO__opencl_atomic_init:
997 llvm_unreachable(
"Already handled above with EmitAtomicInit!");
999 case AtomicExpr::AO__c11_atomic_fetch_add:
1000 case AtomicExpr::AO__opencl_atomic_fetch_add:
1001 case AtomicExpr::AO__atomic_fetch_add:
1002 case AtomicExpr::AO__hip_atomic_fetch_add:
1003 case AtomicExpr::AO__c11_atomic_fetch_and:
1004 case AtomicExpr::AO__opencl_atomic_fetch_and:
1005 case AtomicExpr::AO__hip_atomic_fetch_and:
1006 case AtomicExpr::AO__atomic_fetch_and:
1007 case AtomicExpr::AO__c11_atomic_fetch_or:
1008 case AtomicExpr::AO__opencl_atomic_fetch_or:
1009 case AtomicExpr::AO__hip_atomic_fetch_or:
1010 case AtomicExpr::AO__atomic_fetch_or:
1011 case AtomicExpr::AO__c11_atomic_fetch_nand:
1012 case AtomicExpr::AO__atomic_fetch_nand:
1013 case AtomicExpr::AO__c11_atomic_fetch_sub:
1014 case AtomicExpr::AO__opencl_atomic_fetch_sub:
1015 case AtomicExpr::AO__atomic_fetch_sub:
1016 case AtomicExpr::AO__c11_atomic_fetch_xor:
1017 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1018 case AtomicExpr::AO__opencl_atomic_fetch_min:
1019 case AtomicExpr::AO__opencl_atomic_fetch_max:
1020 case AtomicExpr::AO__atomic_fetch_xor:
1021 case AtomicExpr::AO__hip_atomic_fetch_xor:
1022 case AtomicExpr::AO__c11_atomic_fetch_max:
1023 case AtomicExpr::AO__c11_atomic_fetch_min:
1024 case AtomicExpr::AO__atomic_add_fetch:
1025 case AtomicExpr::AO__atomic_and_fetch:
1026 case AtomicExpr::AO__atomic_nand_fetch:
1027 case AtomicExpr::AO__atomic_or_fetch:
1028 case AtomicExpr::AO__atomic_sub_fetch:
1029 case AtomicExpr::AO__atomic_xor_fetch:
1030 case AtomicExpr::AO__atomic_fetch_max:
1031 case AtomicExpr::AO__hip_atomic_fetch_max:
1032 case AtomicExpr::AO__atomic_fetch_min:
1033 case AtomicExpr::AO__hip_atomic_fetch_min:
1034 case AtomicExpr::AO__atomic_max_fetch:
1035 case AtomicExpr::AO__atomic_min_fetch:
1037 UseOptimizedLibcall =
true;
1040 case AtomicExpr::AO__atomic_load:
1041 case AtomicExpr::AO__atomic_store:
1042 case AtomicExpr::AO__atomic_exchange:
1043 case AtomicExpr::AO__atomic_compare_exchange:
1049 case AtomicExpr::AO__c11_atomic_load:
1050 case AtomicExpr::AO__c11_atomic_store:
1051 case AtomicExpr::AO__c11_atomic_exchange:
1052 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1053 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1054 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
1055 case AtomicExpr::AO__opencl_atomic_load:
1056 case AtomicExpr::AO__hip_atomic_load:
1057 case AtomicExpr::AO__opencl_atomic_store:
1058 case AtomicExpr::AO__hip_atomic_store:
1059 case AtomicExpr::AO__opencl_atomic_exchange:
1060 case AtomicExpr::AO__hip_atomic_exchange:
1061 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1062 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
1063 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1064 case AtomicExpr::AO__atomic_load_n:
1065 case AtomicExpr::AO__atomic_store_n:
1066 case AtomicExpr::AO__atomic_exchange_n:
1067 case AtomicExpr::AO__atomic_compare_exchange_n:
1070 if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
1071 UseOptimizedLibcall =
true;
1076 if (!UseOptimizedLibcall) {
1084 auto CastToGenericAddrSpace = [&](llvm::Value *
V,
QualType PT) {
1087 auto AS = PT->castAs<
PointerType>()->getPointeeType().getAddressSpace();
1091 auto T = llvm::cast<llvm::PointerType>(
V->getType());
1092 auto *DestType = llvm::PointerType::getWithSamePointeeType(T, DestAS);
1102 std::string LibCallName;
1106 bool HaveRetTy =
false;
1107 llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
1108 bool PostOpMinMax =
false;
1109 switch (E->
getOp()) {
1110 case AtomicExpr::AO__c11_atomic_init:
1111 case AtomicExpr::AO__opencl_atomic_init:
1112 llvm_unreachable(
"Already handled!");
1121 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1122 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1123 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1124 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
1125 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1126 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
1127 case AtomicExpr::AO__atomic_compare_exchange:
1128 case AtomicExpr::AO__atomic_compare_exchange_n:
1129 LibCallName =
"__atomic_compare_exchange";
1144 case AtomicExpr::AO__c11_atomic_exchange:
1145 case AtomicExpr::AO__opencl_atomic_exchange:
1146 case AtomicExpr::AO__atomic_exchange_n:
1147 case AtomicExpr::AO__atomic_exchange:
1148 case AtomicExpr::AO__hip_atomic_exchange:
1149 LibCallName =
"__atomic_exchange";
1155 case AtomicExpr::AO__c11_atomic_store:
1156 case AtomicExpr::AO__opencl_atomic_store:
1157 case AtomicExpr::AO__hip_atomic_store:
1158 case AtomicExpr::AO__atomic_store:
1159 case AtomicExpr::AO__atomic_store_n:
1160 LibCallName =
"__atomic_store";
1168 case AtomicExpr::AO__c11_atomic_load:
1169 case AtomicExpr::AO__opencl_atomic_load:
1170 case AtomicExpr::AO__hip_atomic_load:
1171 case AtomicExpr::AO__atomic_load:
1172 case AtomicExpr::AO__atomic_load_n:
1173 LibCallName =
"__atomic_load";
1177 case AtomicExpr::AO__atomic_add_fetch:
1178 PostOp = llvm::Instruction::Add;
1180 case AtomicExpr::AO__c11_atomic_fetch_add:
1181 case AtomicExpr::AO__opencl_atomic_fetch_add:
1182 case AtomicExpr::AO__atomic_fetch_add:
1183 case AtomicExpr::AO__hip_atomic_fetch_add:
1184 LibCallName =
"__atomic_fetch_add";
1190 case AtomicExpr::AO__atomic_and_fetch:
1191 PostOp = llvm::Instruction::And;
1193 case AtomicExpr::AO__c11_atomic_fetch_and:
1194 case AtomicExpr::AO__opencl_atomic_fetch_and:
1195 case AtomicExpr::AO__hip_atomic_fetch_and:
1196 case AtomicExpr::AO__atomic_fetch_and:
1197 LibCallName =
"__atomic_fetch_and";
1203 case AtomicExpr::AO__atomic_or_fetch:
1204 PostOp = llvm::Instruction::Or;
1206 case AtomicExpr::AO__c11_atomic_fetch_or:
1207 case AtomicExpr::AO__opencl_atomic_fetch_or:
1208 case AtomicExpr::AO__hip_atomic_fetch_or:
1209 case AtomicExpr::AO__atomic_fetch_or:
1210 LibCallName =
"__atomic_fetch_or";
1216 case AtomicExpr::AO__atomic_sub_fetch:
1217 PostOp = llvm::Instruction::Sub;
1219 case AtomicExpr::AO__c11_atomic_fetch_sub:
1220 case AtomicExpr::AO__opencl_atomic_fetch_sub:
1221 case AtomicExpr::AO__atomic_fetch_sub:
1222 LibCallName =
"__atomic_fetch_sub";
1228 case AtomicExpr::AO__atomic_xor_fetch:
1229 PostOp = llvm::Instruction::Xor;
1231 case AtomicExpr::AO__c11_atomic_fetch_xor:
1232 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1233 case AtomicExpr::AO__hip_atomic_fetch_xor:
1234 case AtomicExpr::AO__atomic_fetch_xor:
1235 LibCallName =
"__atomic_fetch_xor";
1239 case AtomicExpr::AO__atomic_min_fetch:
1240 PostOpMinMax =
true;
1242 case AtomicExpr::AO__c11_atomic_fetch_min:
1243 case AtomicExpr::AO__atomic_fetch_min:
1244 case AtomicExpr::AO__hip_atomic_fetch_min:
1245 case AtomicExpr::AO__opencl_atomic_fetch_min:
1247 ?
"__atomic_fetch_min"
1248 :
"__atomic_fetch_umin";
1252 case AtomicExpr::AO__atomic_max_fetch:
1253 PostOpMinMax =
true;
1255 case AtomicExpr::AO__c11_atomic_fetch_max:
1256 case AtomicExpr::AO__atomic_fetch_max:
1257 case AtomicExpr::AO__hip_atomic_fetch_max:
1258 case AtomicExpr::AO__opencl_atomic_fetch_max:
1260 ?
"__atomic_fetch_max"
1261 :
"__atomic_fetch_umax";
1267 case AtomicExpr::AO__atomic_nand_fetch:
1268 PostOp = llvm::Instruction::And;
1270 case AtomicExpr::AO__c11_atomic_fetch_nand:
1271 case AtomicExpr::AO__atomic_fetch_nand:
1272 LibCallName =
"__atomic_fetch_nand";
1279 LibCallName = std::string(
"__opencl") +
1280 StringRef(LibCallName).drop_front(1).str();
1284 if (UseOptimizedLibcall)
1285 LibCallName +=
"_" + llvm::utostr(Size);
1288 if (UseOptimizedLibcall) {
1309 assert(UseOptimizedLibcall || (!PostOp && !PostOpMinMax));
1321 llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1325 }
else if (PostOp) {
1326 llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1327 ResVal =
Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1329 if (E->
getOp() == AtomicExpr::AO__atomic_nand_fetch)
1330 ResVal =
Builder.CreateNot(ResVal);
1344 bool IsStore = E->
getOp() == AtomicExpr::AO__c11_atomic_store ||
1345 E->
getOp() == AtomicExpr::AO__opencl_atomic_store ||
1346 E->
getOp() == AtomicExpr::AO__hip_atomic_store ||
1347 E->
getOp() == AtomicExpr::AO__atomic_store ||
1348 E->
getOp() == AtomicExpr::AO__atomic_store_n;
1349 bool IsLoad = E->
getOp() == AtomicExpr::AO__c11_atomic_load ||
1350 E->
getOp() == AtomicExpr::AO__opencl_atomic_load ||
1351 E->
getOp() == AtomicExpr::AO__hip_atomic_load ||
1352 E->
getOp() == AtomicExpr::AO__atomic_load ||
1353 E->
getOp() == AtomicExpr::AO__atomic_load_n;
1355 if (isa<llvm::ConstantInt>(Order)) {
1356 auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1359 if (llvm::isValidAtomicOrderingCABI(ord))
1360 switch ((llvm::AtomicOrderingCABI)ord) {
1361 case llvm::AtomicOrderingCABI::relaxed:
1362 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1363 llvm::AtomicOrdering::Monotonic,
Scope);
1365 case llvm::AtomicOrderingCABI::consume:
1366 case llvm::AtomicOrderingCABI::acquire:
1369 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1370 llvm::AtomicOrdering::Acquire,
Scope);
1372 case llvm::AtomicOrderingCABI::release:
1375 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1376 llvm::AtomicOrdering::Release,
Scope);
1378 case llvm::AtomicOrderingCABI::acq_rel:
1379 if (IsLoad || IsStore)
1381 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1382 llvm::AtomicOrdering::AcquireRelease,
Scope);
1384 case llvm::AtomicOrderingCABI::seq_cst:
1385 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1386 llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
1400 llvm::BasicBlock *MonotonicBB =
nullptr, *AcquireBB =
nullptr,
1401 *ReleaseBB =
nullptr, *AcqRelBB =
nullptr,
1402 *SeqCstBB =
nullptr;
1408 if (!IsLoad && !IsStore)
1417 Order =
Builder.CreateIntCast(Order,
Builder.getInt32Ty(),
false);
1418 llvm::SwitchInst *SI =
Builder.CreateSwitch(Order, MonotonicBB);
1421 Builder.SetInsertPoint(MonotonicBB);
1422 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1423 llvm::AtomicOrdering::Monotonic,
Scope);
1426 Builder.SetInsertPoint(AcquireBB);
1427 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1428 llvm::AtomicOrdering::Acquire,
Scope);
1430 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::consume),
1432 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acquire),
1436 Builder.SetInsertPoint(ReleaseBB);
1437 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1438 llvm::AtomicOrdering::Release,
Scope);
1440 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::release),
1443 if (!IsLoad && !IsStore) {
1444 Builder.SetInsertPoint(AcqRelBB);
1445 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1446 llvm::AtomicOrdering::AcquireRelease,
Scope);
1448 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::acq_rel),
1451 Builder.SetInsertPoint(SeqCstBB);
1452 EmitAtomicOp(*
this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1453 llvm::AtomicOrdering::SequentiallyConsistent,
Scope);
1455 SI->addCase(
Builder.getInt32((
int)llvm::AtomicOrderingCABI::seq_cst),
1459 Builder.SetInsertPoint(ContBB);
1463 assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1469Address AtomicInfo::emitCastToAtomicIntPointer(
Address addr)
const {
1470 llvm::IntegerType *ty =
1475Address AtomicInfo::convertToAtomicIntPointer(
Address Addr)
const {
1478 if (SourceSizeInBits != AtomicSizeInBits) {
1479 Address Tmp = CreateTempAlloca();
1481 std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1485 return emitCastToAtomicIntPointer(Addr);
1491 bool asValue)
const {
1521RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1524 bool AsValue)
const {
1526 assert(IntVal->getType()->isIntegerTy() &&
"Expected integer value");
1532 auto *ValTy = AsValue
1534 : getAtomicAddress().getElementType();
1535 if (ValTy->isIntegerTy()) {
1536 assert(IntVal->getType() == ValTy &&
"Different integer types.");
1538 }
else if (ValTy->isPointerTy())
1540 else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1547 bool TempIsVolatile =
false;
1553 Temp = CreateTempAlloca();
1557 Address CastTemp = emitCastToAtomicIntPointer(Temp);
1559 ->setVolatile(TempIsVolatile);
1561 return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1564void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1565 llvm::AtomicOrdering AO,
bool) {
1579llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1582 Address Addr = getAtomicAddressAsAtomicIntPointer();
1584 Load->setAtomic(AO);
1588 Load->setVolatile(
true);
1598 AtomicInfo AI(*
this, LV);
1601 bool AtomicIsInline = !AI.shouldUseLibcall();
1606 return IsVolatile && AtomicIsInline;
1611 llvm::AtomicOrdering AO;
1614 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1616 AO = llvm::AtomicOrdering::Acquire;
1623 bool AsValue, llvm::AtomicOrdering AO,
1626 if (shouldUseLibcall()) {
1632 TempAddr = CreateTempAlloca();
1634 EmitAtomicLoadLibcall(TempAddr.
getPointer(), AO, IsVolatile);
1638 return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1642 auto *
Load = EmitAtomicLoadOp(AO, IsVolatile);
1650 return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1656 llvm::AtomicOrdering AO,
bool IsVolatile,
1658 AtomicInfo Atomics(*
this, src);
1659 return Atomics.EmitAtomicLoad(resultSlot, loc,
true, AO,
1665void AtomicInfo::emitCopyIntoMemory(
RValue rvalue)
const {
1684 emitMemSetZeroIfNecessary();
1687 LValue TempLVal = projectValue();
1708 AtomicInfo Atomics(CGF, TempLV);
1709 Atomics.emitCopyIntoMemory(rvalue);
1713llvm::Value *AtomicInfo::convertRValueToInt(
RValue RVal)
const {
1718 if (isa<llvm::IntegerType>(
Value->getType()))
1721 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1723 LVal.
isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1724 if (isa<llvm::PointerType>(
Value->getType()))
1726 else if (llvm::BitCastInst::isBitCastable(
Value->getType(), InputIntTy))
1732 Address Addr = materializeRValue(RVal);
1735 Addr = emitCastToAtomicIntPointer(Addr);
1739std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1740 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1741 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure,
bool IsWeak) {
1743 Address Addr = getAtomicAddressAsAtomicIntPointer();
1745 ExpectedVal, DesiredVal,
1749 Inst->setWeak(IsWeak);
1752 auto *PreviousVal = CGF.
Builder.CreateExtractValue(Inst, 0);
1753 auto *SuccessFailureVal = CGF.
Builder.CreateExtractValue(Inst, 1);
1754 return std::make_pair(PreviousVal, SuccessFailureVal);
1758AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1759 llvm::Value *DesiredAddr,
1760 llvm::AtomicOrdering Success,
1761 llvm::AtomicOrdering Failure) {
1773 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(Success))),
1776 llvm::ConstantInt::get(CGF.
IntTy, (
int)llvm::toCABI(Failure))),
1781 return SuccessFailureRVal.getScalarVal();
1784std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1786 llvm::AtomicOrdering Failure,
bool IsWeak) {
1788 if (shouldUseLibcall()) {
1791 Address DesiredAddr = materializeRValue(Desired);
1792 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1795 return std::make_pair(
1803 auto *ExpectedVal = convertRValueToInt(
Expected);
1804 auto *DesiredVal = convertRValueToInt(Desired);
1805 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1807 return std::make_pair(
1818 LValue AtomicLVal = Atomics.getAtomicLValue();
1825 Address Ptr = Atomics.materializeRValue(OldRVal);
1858 RValue NewRVal = UpdateOp(UpRVal);
1868void AtomicInfo::EmitAtomicUpdateLibcall(
1869 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1871 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1873 Address ExpectedAddr = CreateTempAlloca();
1875 EmitAtomicLoadLibcall(ExpectedAddr.
getPointer(), AO, IsVolatile);
1879 Address DesiredAddr = CreateTempAlloca();
1881 requiresMemSetZero(getAtomicAddress().getElementType())) {
1885 auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1890 EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1893 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1897void AtomicInfo::EmitAtomicUpdateOp(
1898 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
1900 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1903 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
1907 auto *CurBB = CGF.
Builder.GetInsertBlock();
1909 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1911 PHI->addIncoming(OldVal, CurBB);
1912 Address NewAtomicAddr = CreateTempAlloca();
1913 Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1915 requiresMemSetZero(getAtomicAddress().getElementType())) {
1923 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1924 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
1925 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1931 LValue AtomicLVal = Atomics.getAtomicLValue();
1955void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1956 RValue UpdateRVal,
bool IsVolatile) {
1957 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1959 Address ExpectedAddr = CreateTempAlloca();
1961 EmitAtomicLoadLibcall(ExpectedAddr.
getPointer(), AO, IsVolatile);
1965 Address DesiredAddr = CreateTempAlloca();
1967 requiresMemSetZero(getAtomicAddress().getElementType())) {
1973 EmitAtomicCompareExchangeLibcall(ExpectedAddr.
getPointer(),
1976 CGF.
Builder.CreateCondBr(Res, ExitBB, ContBB);
1980void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
RValue UpdateRVal,
1982 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1985 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
1989 auto *CurBB = CGF.
Builder.GetInsertBlock();
1991 llvm::PHINode *PHI = CGF.
Builder.CreatePHI(OldVal->getType(),
1993 PHI->addIncoming(OldVal, CurBB);
1994 Address NewAtomicAddr = CreateTempAlloca();
1995 Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1997 requiresMemSetZero(getAtomicAddress().getElementType())) {
2003 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
2004 PHI->addIncoming(Res.first, CGF.
Builder.GetInsertBlock());
2005 CGF.
Builder.CreateCondBr(Res.second, ExitBB, ContBB);
2009void AtomicInfo::EmitAtomicUpdate(
2010 llvm::AtomicOrdering AO,
const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
2012 if (shouldUseLibcall()) {
2013 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
2015 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
2019void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO,
RValue UpdateRVal,
2021 if (shouldUseLibcall()) {
2022 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
2024 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
2031 llvm::AtomicOrdering AO;
2033 AO = llvm::AtomicOrdering::SequentiallyConsistent;
2035 AO = llvm::AtomicOrdering::Release;
2047 llvm::AtomicOrdering AO,
bool IsVolatile,
2055 AtomicInfo atomics(*
this, dest);
2056 LValue LVal = atomics.getAtomicLValue();
2061 atomics.emitCopyIntoMemory(rvalue);
2066 if (atomics.shouldUseLibcall()) {
2068 Address srcAddr = atomics.materializeRValue(rvalue);
2086 llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
2090 atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
2091 intValue =
Builder.CreateIntCast(
2095 if (AO == llvm::AtomicOrdering::Acquire)
2096 AO = llvm::AtomicOrdering::Monotonic;
2097 else if (AO == llvm::AtomicOrdering::AcquireRelease)
2098 AO = llvm::AtomicOrdering::Release;
2101 store->setAtomic(AO);
2105 store->setVolatile(
true);
2111 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2118 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure,
bool IsWeak,
2123 Expected.getAggregateAddress().getElementType() ==
2128 AtomicInfo Atomics(*
this, Obj);
2130 return Atomics.EmitAtomicCompareExchange(
Expected, Desired, Success, Failure,
2135 LValue LVal, llvm::AtomicOrdering AO,
2136 const llvm::function_ref<
RValue(
RValue)> &UpdateOp,
bool IsVolatile) {
2137 AtomicInfo Atomics(*
this, LVal);
2138 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2142 AtomicInfo atomics(*
this, dest);
2144 switch (atomics.getEvaluationKind()) {
2160 bool Zeroed =
false;
2162 Zeroed = atomics.emitMemSetZeroIfNecessary();
2163 dest = atomics.projectValue();
2177 llvm_unreachable(
"bad evaluation kind");
Defines the clang::ASTContext interface.
static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
static llvm::Value * EmitPostAtomicMinMax(CGBuilderTy &Builder, AtomicExpr::AtomicOp Op, bool IsSigned, llvm::Value *OldVal, llvm::Value *RHS)
Duplicate the atomic min/max operation in conventional IR for the builtin variants that return the ne...
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, Address DesiredAddr)
static Address EmitValToTemp(CodeGenFunction &CGF, Expr *E)
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, llvm::AtomicOrdering Order, llvm::SyncScope::ID Scope)
static void AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args, bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy, SourceLocation Loc, CharUnits SizeInChars)
static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *FailureOrderVal, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope)
Given an ordering required on success, emit all possible cmpxchg instructions to cope with the provid...
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::AtomicOrdering FailureOrder, llvm::SyncScope::ID Scope)
CodeGenFunction::ComplexPairTy ComplexPairTy
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
TypeInfoChars getTypeInfoInChars(const Type *T) const
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
QualType getIntPtrType() const
Return a type compatible with "intptr_t" (C99 7.18.1.4), as defined by the target.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
static std::unique_ptr< AtomicScopeModel > getScopeModel(AtomicOp Op)
Get atomic scope model for the atomic op code.
QualType getValueType() const
SourceLocation getBeginLoc() const LLVM_READONLY
Expr * getOrderFail() const
CharUnits - This is an opaque type for sizes expressed in character units.
bool isZero() const
isZero - Test whether the quantity equals zero.
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
CharUnits getAlignment() const
Return the alignment of this pointer.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
llvm::Value * getPointer() const
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Address getAddress() const
static AggValueSlot forLValue(const LValue &LV, CodeGenFunction &CGF, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Address CreateElementBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Cast the element type of the given address to a different type, preserving information like the align...
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
llvm::AtomicRMWInst * CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, llvm::Value *Ptr, llvm::Value *Val, llvm::AtomicOrdering Ordering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(llvm::Value *Ptr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallArgList - Type for representing both the value and type of arguments in a call.
void add(RValue rvalue, QualType type)
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke, bool IsMustTail, SourceLocation Loc)
EmitCall - Generate a call of the given function, expecting the given result type,...
llvm::Type * ConvertTypeForMem(QualType T)
void EmitAtomicInit(Expr *E, LValue lvalue)
const TargetInfo & getTarget() const
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
RValue EmitLoadOfExtVectorElementLValue(LValue V)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
const TargetCodeGenInfo & getTargetHooks() const
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
ASTContext & getContext() const
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
llvm::Value * EmitCastToVoidPtr(llvm::Value *value)
Emit a cast to void* in the appropriate address space.
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
bool LValueIsSuitableForInlineAtomic(LValue Src)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
This class organizes the cross-function state that is used while generating LLVM code.
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
CodeGenTypes & getTypes()
const llvm::DataLayout & getDataLayout() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
llvm::LLVMContext & getLLVMContext()
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
LValue - This represents an lvalue references.
llvm::Constant * getExtVectorElts() const
static LValue MakeExtVectorElt(Address vecAddress, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
void setAlignment(CharUnits A)
llvm::Value * getBitFieldPointer() const
bool isVolatileQualified() const
CharUnits getAlignment() const
Address getAddress(CodeGenFunction &CGF) const
llvm::Value * getVectorPointer() const
bool isExtVectorElt() const
llvm::Value * getVectorIdx() const
LValueBaseInfo getBaseInfo() const
llvm::Value * getPointer(CodeGenFunction &CGF) const
llvm::Value * getExtVectorPointer() const
const CGBitFieldInfo & getBitFieldInfo() const
TBAAAccessInfo getTBAAInfo() const
Address getVectorAddress() const
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Address getExtVectorAddress() const
Address getBitFieldAddress() const
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
static RValue get(llvm::Value *V)
static RValue getAggregate(Address addr, bool isVolatile=false)
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
bool isVolatileQualified() const
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const
Get the syncscope used in LLVM IR.
virtual llvm::Value * performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, llvm::Value *V, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
Perform address space cast of an expression of pointer type.
Concrete class used by the front-end to report problems and issues.
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
PointerType - C99 6.7.5.1 - Pointer Declarators.
A (possibly-)qualified type.
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Scope - A scope is a transient data structure that is used while parsing the program.
Encodes a location in the source.
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
bool isPointerType() const
const T * castAs() const
Member-template castAs<specific type>.
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
bool isAtomicType() const
bool isFloatingType() const
const T * getAs() const
Member-template getAs<specific type>'.
Represents a GCC generic vector type.
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Load(InterpState &S, CodePtr OpPC)
@ C
Languages that the frontend can parse and compile.
llvm::StringRef getAsString(SyncScope S)
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
llvm::PointerType * VoidPtrTy
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * SizeTy
llvm::IntegerType * IntTy
int