clang 23.0.0git
CGAtomic.cpp
Go to the documentation of this file.
1//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the code for emitting atomic operations.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCall.h"
14#include "CGRecordLayout.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "TargetInfo.h"
21#include "llvm/ADT/DenseMap.h"
22#include "llvm/IR/DataLayout.h"
23#include "llvm/IR/Intrinsics.h"
24
25using namespace clang;
26using namespace CodeGen;
27
28namespace {
29 class AtomicInfo {
30 CodeGenFunction &CGF;
31 QualType AtomicTy;
32 QualType ValueTy;
33 uint64_t AtomicSizeInBits;
34 uint64_t ValueSizeInBits;
35 CharUnits AtomicAlign;
36 CharUnits ValueAlign;
37 TypeEvaluationKind EvaluationKind;
38 bool UseLibcall;
39 LValue LVal;
40 CGBitFieldInfo BFI;
41 public:
42 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
43 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
44 EvaluationKind(TEK_Scalar), UseLibcall(true) {
45 assert(!lvalue.isGlobalReg());
46 ASTContext &C = CGF.getContext();
47 if (lvalue.isSimple()) {
48 AtomicTy = lvalue.getType();
49 if (auto *ATy = AtomicTy->getAs<AtomicType>())
50 ValueTy = ATy->getValueType();
51 else
52 ValueTy = AtomicTy;
53 EvaluationKind = CGF.getEvaluationKind(ValueTy);
54
55 uint64_t ValueAlignInBits;
56 uint64_t AtomicAlignInBits;
57 TypeInfo ValueTI = C.getTypeInfo(ValueTy);
58 ValueSizeInBits = ValueTI.Width;
59 ValueAlignInBits = ValueTI.Align;
60
61 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
62 AtomicSizeInBits = AtomicTI.Width;
63 AtomicAlignInBits = AtomicTI.Align;
64
65 assert(ValueSizeInBits <= AtomicSizeInBits);
66 assert(ValueAlignInBits <= AtomicAlignInBits);
67
68 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
69 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
70 if (lvalue.getAlignment().isZero())
71 lvalue.setAlignment(AtomicAlign);
72
73 LVal = lvalue;
74 } else if (lvalue.isBitField()) {
75 ValueTy = lvalue.getType();
76 ValueSizeInBits = C.getTypeSize(ValueTy);
77 auto &OrigBFI = lvalue.getBitFieldInfo();
78 auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
79 AtomicSizeInBits = C.toBits(
80 C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
81 .alignTo(lvalue.getAlignment()));
82 llvm::Value *BitFieldPtr = lvalue.getRawBitFieldPointer(CGF);
83 auto OffsetInChars =
84 (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
85 lvalue.getAlignment();
86 llvm::Value *StoragePtr = CGF.Builder.CreateConstGEP1_64(
87 CGF.Int8Ty, BitFieldPtr, OffsetInChars.getQuantity());
88 StoragePtr = CGF.Builder.CreateAddrSpaceCast(
89 StoragePtr, CGF.DefaultPtrTy, "atomic_bitfield_base");
90 BFI = OrigBFI;
91 BFI.Offset = Offset;
92 BFI.StorageSize = AtomicSizeInBits;
93 BFI.StorageOffset += OffsetInChars;
94 llvm::Type *StorageTy = CGF.Builder.getIntNTy(AtomicSizeInBits);
95 LVal = LValue::MakeBitfield(
96 Address(StoragePtr, StorageTy, lvalue.getAlignment()), BFI,
97 lvalue.getType(), lvalue.getBaseInfo(), lvalue.getTBAAInfo());
98 AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
99 if (AtomicTy.isNull()) {
100 llvm::APInt Size(
101 /*numBits=*/32,
102 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
103 AtomicTy = C.getConstantArrayType(C.CharTy, Size, nullptr,
104 ArraySizeModifier::Normal,
105 /*IndexTypeQuals=*/0);
106 }
107 AtomicAlign = ValueAlign = lvalue.getAlignment();
108 } else if (lvalue.isVectorElt()) {
109 ValueTy = lvalue.getType()->castAs<VectorType>()->getElementType();
110 ValueSizeInBits = C.getTypeSize(ValueTy);
111 AtomicTy = lvalue.getType();
112 AtomicSizeInBits = C.getTypeSize(AtomicTy);
113 AtomicAlign = ValueAlign = lvalue.getAlignment();
114 LVal = lvalue;
115 } else {
116 assert(lvalue.isExtVectorElt());
117 ValueTy = lvalue.getType();
118 ValueSizeInBits = C.getTypeSize(ValueTy);
119 AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
120 lvalue.getType(), cast<llvm::FixedVectorType>(
121 lvalue.getExtVectorAddress().getElementType())
122 ->getNumElements());
123 AtomicSizeInBits = C.getTypeSize(AtomicTy);
124 AtomicAlign = ValueAlign = lvalue.getAlignment();
125 LVal = lvalue;
126 }
127 UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
128 AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
129 }
130
131 QualType getAtomicType() const { return AtomicTy; }
132 QualType getValueType() const { return ValueTy; }
133 CharUnits getAtomicAlignment() const { return AtomicAlign; }
134 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
135 uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
136 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
137 bool shouldUseLibcall() const { return UseLibcall; }
138 const LValue &getAtomicLValue() const { return LVal; }
139 llvm::Value *getAtomicPointer() const {
140 if (LVal.isSimple())
141 return LVal.emitRawPointer(CGF);
142 else if (LVal.isBitField())
143 return LVal.getRawBitFieldPointer(CGF);
144 else if (LVal.isVectorElt())
145 return LVal.getRawVectorPointer(CGF);
146 assert(LVal.isExtVectorElt());
147 return LVal.getRawExtVectorPointer(CGF);
148 }
149 Address getAtomicAddress() const {
150 llvm::Type *ElTy;
151 if (LVal.isSimple())
152 ElTy = LVal.getAddress().getElementType();
153 else if (LVal.isBitField())
154 ElTy = LVal.getBitFieldAddress().getElementType();
155 else if (LVal.isVectorElt())
156 ElTy = LVal.getVectorAddress().getElementType();
157 else
158 ElTy = LVal.getExtVectorAddress().getElementType();
159 return Address(getAtomicPointer(), ElTy, getAtomicAlignment());
160 }
161
162 Address getAtomicAddressAsAtomicIntPointer() const {
163 return castToAtomicIntPointer(getAtomicAddress());
164 }
165
166 /// Is the atomic size larger than the underlying value type?
167 ///
168 /// Note that the absence of padding does not mean that atomic
169 /// objects are completely interchangeable with non-atomic
170 /// objects: we might have promoted the alignment of a type
171 /// without making it bigger.
172 bool hasPadding() const {
173 return (ValueSizeInBits != AtomicSizeInBits);
174 }
175
176 bool emitMemSetZeroIfNecessary() const;
177
178 llvm::Value *getAtomicSizeValue() const {
179 CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
180 return CGF.CGM.getSize(size);
181 }
182
183 /// Cast the given pointer to an integer pointer suitable for atomic
184 /// operations if the source.
185 Address castToAtomicIntPointer(Address Addr) const;
186
187 /// If Addr is compatible with the iN that will be used for an atomic
188 /// operation, bitcast it. Otherwise, create a temporary that is suitable
189 /// and copy the value across.
190 Address convertToAtomicIntPointer(Address Addr) const;
191
192 /// Turn an atomic-layout object into an r-value.
193 RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
194 SourceLocation loc, bool AsValue) const;
195
196 llvm::Value *getScalarRValValueOrNull(RValue RVal) const;
197
198 /// Converts an rvalue to integer value if needed.
199 llvm::Value *convertRValueToInt(RValue RVal, bool CmpXchg = false) const;
200
201 RValue ConvertToValueOrAtomic(llvm::Value *IntVal, AggValueSlot ResultSlot,
202 SourceLocation Loc, bool AsValue,
203 bool CmpXchg = false) const;
204
205 /// Copy an atomic r-value into atomic-layout memory.
206 void emitCopyIntoMemory(RValue rvalue) const;
207
208 /// Project an l-value down to the value field.
209 LValue projectValue() const {
210 assert(LVal.isSimple());
211 Address addr = getAtomicAddress();
212 if (hasPadding())
213 addr = CGF.Builder.CreateStructGEP(addr, 0);
214
215 return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
216 LVal.getBaseInfo(), LVal.getTBAAInfo());
217 }
218
219 /// Emits atomic load.
220 /// \returns Loaded value.
221 RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
222 bool AsValue, llvm::AtomicOrdering AO,
223 bool IsVolatile);
224
225 /// Emits atomic compare-and-exchange sequence.
226 /// \param Expected Expected value.
227 /// \param Desired Desired value.
228 /// \param Success Atomic ordering for success operation.
229 /// \param Failure Atomic ordering for failed operation.
230 /// \param IsWeak true if atomic operation is weak, false otherwise.
231 /// \returns Pair of values: previous value from storage (value type) and
232 /// boolean flag (i1 type) with true if success and false otherwise.
233 std::pair<RValue, llvm::Value *>
234 EmitAtomicCompareExchange(RValue Expected, RValue Desired,
235 llvm::AtomicOrdering Success =
236 llvm::AtomicOrdering::SequentiallyConsistent,
237 llvm::AtomicOrdering Failure =
238 llvm::AtomicOrdering::SequentiallyConsistent,
239 bool IsWeak = false);
240
241 /// Emits atomic update.
242 /// \param AO Atomic ordering.
243 /// \param UpdateOp Update operation for the current lvalue.
244 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
245 const llvm::function_ref<RValue(RValue)> &UpdateOp,
246 bool IsVolatile);
247 /// Emits atomic update.
248 /// \param AO Atomic ordering.
249 void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
250 bool IsVolatile);
251
252 /// Materialize an atomic r-value in atomic-layout memory.
253 Address materializeRValue(RValue rvalue) const;
254
255 /// Creates temp alloca for intermediate operations on atomic value.
256 Address CreateTempAlloca() const;
257 private:
258 bool requiresMemSetZero(llvm::Type *type) const;
259
260
261 /// Emits atomic load as a libcall.
262 void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
263 llvm::AtomicOrdering AO, bool IsVolatile);
264 /// Emits atomic load as LLVM instruction.
265 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile,
266 bool CmpXchg = false);
267 /// Emits atomic compare-and-exchange op as a libcall.
268 llvm::Value *EmitAtomicCompareExchangeLibcall(
269 llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
270 llvm::AtomicOrdering Success =
271 llvm::AtomicOrdering::SequentiallyConsistent,
272 llvm::AtomicOrdering Failure =
273 llvm::AtomicOrdering::SequentiallyConsistent);
274 /// Emits atomic compare-and-exchange op as LLVM instruction.
275 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
276 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
277 llvm::AtomicOrdering Success =
278 llvm::AtomicOrdering::SequentiallyConsistent,
279 llvm::AtomicOrdering Failure =
280 llvm::AtomicOrdering::SequentiallyConsistent,
281 bool IsWeak = false);
282 /// Emit atomic update as libcalls.
283 void
284 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
285 const llvm::function_ref<RValue(RValue)> &UpdateOp,
286 bool IsVolatile);
287 /// Emit atomic update as LLVM instructions.
288 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
289 const llvm::function_ref<RValue(RValue)> &UpdateOp,
290 bool IsVolatile);
291 /// Emit atomic update as libcalls.
292 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
293 bool IsVolatile);
294 /// Emit atomic update as LLVM instructions.
295 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
296 bool IsVolatile);
297 };
298}
299
300Address AtomicInfo::CreateTempAlloca() const {
301 Address TempAlloca = CGF.CreateMemTemp(
302 (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
303 : AtomicTy,
304 getAtomicAlignment(),
305 "atomic-temp");
306 // Cast to pointer to value type for bitfields.
307 if (LVal.isBitField())
309 TempAlloca, getAtomicAddress().getType(),
310 getAtomicAddress().getElementType());
311 return TempAlloca;
312}
313
315 StringRef fnName,
316 QualType resultType,
317 CallArgList &args) {
318 const CGFunctionInfo &fnInfo =
319 CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
320 llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
321 llvm::AttrBuilder fnAttrB(CGF.getLLVMContext());
322 fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
323 fnAttrB.addAttribute(llvm::Attribute::WillReturn);
324 llvm::AttributeList fnAttrs = llvm::AttributeList::get(
325 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);
326
327 llvm::FunctionCallee fn =
328 CGF.CGM.CreateRuntimeFunction(fnTy, fnName, fnAttrs);
329 auto callee = CGCallee::forDirect(fn);
330 return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
331}
332
333/// Does a store of the given IR type modify the full expected width?
334static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
335 uint64_t expectedSize) {
336 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
337}
338
339/// Does the atomic type require memsetting to zero before initialization?
340///
341/// The IR type is provided as a way of making certain queries faster.
342bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
343 // If the atomic type has size padding, we definitely need a memset.
344 if (hasPadding()) return true;
345
346 // Otherwise, do some simple heuristics to try to avoid it:
347 switch (getEvaluationKind()) {
348 // For scalars and complexes, check whether the store size of the
349 // type uses the full size.
350 case TEK_Scalar:
351 return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
352 case TEK_Complex:
353 return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
354 AtomicSizeInBits / 2);
355
356 // Padding in structs has an undefined bit pattern. User beware.
357 case TEK_Aggregate:
358 return false;
359 }
360 llvm_unreachable("bad evaluation kind");
361}
362
363bool AtomicInfo::emitMemSetZeroIfNecessary() const {
364 assert(LVal.isSimple());
365 Address addr = LVal.getAddress();
366 if (!requiresMemSetZero(addr.getElementType()))
367 return false;
368
370 addr.emitRawPointer(CGF), llvm::ConstantInt::get(CGF.Int8Ty, 0),
371 CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
372 LVal.getAlignment().getAsAlign());
373 return true;
374}
375
376static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
377 Address Dest, Address Ptr, Address Val1,
378 Address Val2, Address ExpectedResult,
379 uint64_t Size, llvm::AtomicOrdering SuccessOrder,
380 llvm::AtomicOrdering FailureOrder,
381 llvm::SyncScope::ID Scope) {
382 // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
383 llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
384 llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
385
386 llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
387 Ptr, Expected, Desired, SuccessOrder, FailureOrder, Scope);
388 Pair->setVolatile(E->isVolatile());
389 Pair->setWeak(IsWeak);
390 CGF.getTargetHooks().setTargetAtomicMetadata(CGF, *Pair, E);
391
392 // Cmp holds the result of the compare-exchange operation: true on success,
393 // false on failure.
394 llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
395 llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
396
397 // This basic block is used to hold the store instruction if the operation
398 // failed.
399 llvm::BasicBlock *StoreExpectedBB =
400 CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
401
402 // This basic block is the exit point of the operation, we should end up
403 // here regardless of whether or not the operation succeeded.
404 llvm::BasicBlock *ContinueBB =
405 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
406
407 // Update Expected if Expected isn't equal to Old, otherwise branch to the
408 // exit point.
409 CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
410
411 CGF.Builder.SetInsertPoint(StoreExpectedBB);
412 // Update the memory at Expected with Old's value.
413 llvm::Type *ExpectedType = ExpectedResult.getElementType();
414 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
415 uint64_t ExpectedSizeInBytes = DL.getTypeStoreSize(ExpectedType);
416
417 if (ExpectedSizeInBytes == Size) {
418 // Sizes match: store directly
419 auto *I = CGF.Builder.CreateStore(Old, ExpectedResult);
420 CGF.addInstToCurrentSourceAtom(I, Old);
421 } else {
422 // store only the first ExpectedSizeInBytes bytes of Old
423 llvm::Type *OldType = Old->getType();
424
425 // Allocate temporary storage for Old value
426 Address OldTmp =
427 CGF.CreateTempAlloca(OldType, Ptr.getAlignment(), "old.tmp");
428
429 // Store Old into this temporary
430 auto *I = CGF.Builder.CreateStore(Old, OldTmp);
431 CGF.addInstToCurrentSourceAtom(I, Old);
432
433 // Perform memcpy for first ExpectedSizeInBytes bytes
434 CGF.Builder.CreateMemCpy(ExpectedResult, OldTmp, ExpectedSizeInBytes,
435 /*isVolatile=*/false);
436 }
437
438 // Finally, branch to the exit point.
439 CGF.Builder.CreateBr(ContinueBB);
440
441 CGF.Builder.SetInsertPoint(ContinueBB);
442 // Update the memory at Dest with Cmp's value.
443 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
444}
445
446/// Given an ordering required on success, emit all possible cmpxchg
447/// instructions to cope with the provided (but possibly only dynamically known)
448/// FailureOrder.
450 CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr,
451 Address Val1, Address Val2, Address ExpectedResult,
452 llvm::Value *FailureOrderVal, uint64_t Size,
453 llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope) {
454 llvm::AtomicOrdering FailureOrder;
455 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
456 auto FOS = FO->getSExtValue();
457 if (!llvm::isValidAtomicOrderingCABI(FOS))
458 FailureOrder = llvm::AtomicOrdering::Monotonic;
459 else
460 switch ((llvm::AtomicOrderingCABI)FOS) {
461 case llvm::AtomicOrderingCABI::relaxed:
462 // 31.7.2.18: "The failure argument shall not be memory_order_release
463 // nor memory_order_acq_rel". Fallback to monotonic.
464 case llvm::AtomicOrderingCABI::release:
465 case llvm::AtomicOrderingCABI::acq_rel:
466 FailureOrder = llvm::AtomicOrdering::Monotonic;
467 break;
468 case llvm::AtomicOrderingCABI::consume:
469 case llvm::AtomicOrderingCABI::acquire:
470 FailureOrder = llvm::AtomicOrdering::Acquire;
471 break;
472 case llvm::AtomicOrderingCABI::seq_cst:
473 FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
474 break;
475 }
476 // Prior to c++17, "the failure argument shall be no stronger than the
477 // success argument". This condition has been lifted and the only
478 // precondition is 31.7.2.18. Effectively treat this as a DR and skip
479 // language version checks.
480 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult,
481 Size, SuccessOrder, FailureOrder, Scope);
482 return;
483 }
484
485 // Create all the relevant BB's
486 auto *MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
487 auto *AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
488 auto *SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
489 auto *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
490
491 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
492 // doesn't matter unless someone is crazy enough to use something that
493 // doesn't fold to a constant for the ordering.
494 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
495 // Implemented as acquire, since it's the closest in LLVM.
496 SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
497 AcquireBB);
498 SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
499 AcquireBB);
500 SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
501 SeqCstBB);
502
503 // Emit all the different atomics
504 CGF.Builder.SetInsertPoint(MonotonicBB);
505 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
506 SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
507 CGF.Builder.CreateBr(ContBB);
508
509 CGF.Builder.SetInsertPoint(AcquireBB);
510 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
511 SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
512 CGF.Builder.CreateBr(ContBB);
513
514 CGF.Builder.SetInsertPoint(SeqCstBB);
515 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, ExpectedResult, Size,
516 SuccessOrder, llvm::AtomicOrdering::SequentiallyConsistent,
517 Scope);
518 CGF.Builder.CreateBr(ContBB);
519
520 CGF.Builder.SetInsertPoint(ContBB);
521}
522
523/// Duplicate the atomic min/max operation in conventional IR for the builtin
524/// variants that return the new rather than the original value.
525static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
527 bool IsSigned,
528 llvm::Value *OldVal,
529 llvm::Value *RHS) {
530 const bool IsFP = OldVal->getType()->isFloatingPointTy();
531
532 if (IsFP) {
533 llvm::Intrinsic::ID IID = (Op == AtomicExpr::AO__atomic_max_fetch ||
534 Op == AtomicExpr::AO__scoped_atomic_max_fetch)
535 ? llvm::Intrinsic::maxnum
536 : llvm::Intrinsic::minnum;
537
538 return Builder.CreateBinaryIntrinsic(IID, OldVal, RHS, llvm::FMFSource(),
539 "newval");
540 }
541
542 llvm::CmpInst::Predicate Pred;
543 switch (Op) {
544 default:
545 llvm_unreachable("Unexpected min/max operation");
546 case AtomicExpr::AO__atomic_max_fetch:
547 case AtomicExpr::AO__scoped_atomic_max_fetch:
548 Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
549 break;
550 case AtomicExpr::AO__atomic_min_fetch:
551 case AtomicExpr::AO__scoped_atomic_min_fetch:
552 Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
553 break;
554 }
555 llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS, "tst");
556 return Builder.CreateSelect(Cmp, OldVal, RHS, "newval");
557}
558
560 Address Ptr, Address Val1, Address Val2,
561 Address ExpectedResult, llvm::Value *IsWeak,
562 llvm::Value *FailureOrder, uint64_t Size,
563 llvm::AtomicOrdering Order,
564 llvm::SyncScope::ID Scope) {
565 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
566 bool PostOpMinMax = false;
567 unsigned PostOp = 0;
568
569 switch (E->getOp()) {
570 case AtomicExpr::AO__c11_atomic_init:
571 case AtomicExpr::AO__opencl_atomic_init:
572 llvm_unreachable("Already handled!");
573
574 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
575 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
576 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
577 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
578 ExpectedResult, FailureOrder, Size, Order,
579 Scope);
580 return;
581 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
582 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
583 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
584 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
585 ExpectedResult, FailureOrder, Size, Order,
586 Scope);
587 return;
588 case AtomicExpr::AO__atomic_compare_exchange:
589 case AtomicExpr::AO__atomic_compare_exchange_n:
590 case AtomicExpr::AO__scoped_atomic_compare_exchange:
591 case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {
592 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
593 emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
594 Val1, Val2, ExpectedResult, FailureOrder,
595 Size, Order, Scope);
596 } else {
597 // Create all the relevant BB's
598 llvm::BasicBlock *StrongBB =
599 CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
600 llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
601 llvm::BasicBlock *ContBB =
602 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
603
604 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
605 SI->addCase(CGF.Builder.getInt1(false), StrongBB);
606
607 CGF.Builder.SetInsertPoint(StrongBB);
608 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
609 ExpectedResult, FailureOrder, Size, Order,
610 Scope);
611 CGF.Builder.CreateBr(ContBB);
612
613 CGF.Builder.SetInsertPoint(WeakBB);
614 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
615 ExpectedResult, FailureOrder, Size, Order,
616 Scope);
617 CGF.Builder.CreateBr(ContBB);
618
619 CGF.Builder.SetInsertPoint(ContBB);
620 }
621 return;
622 }
623 case AtomicExpr::AO__c11_atomic_load:
624 case AtomicExpr::AO__opencl_atomic_load:
625 case AtomicExpr::AO__hip_atomic_load:
626 case AtomicExpr::AO__atomic_load_n:
627 case AtomicExpr::AO__atomic_load:
628 case AtomicExpr::AO__scoped_atomic_load_n:
629 case AtomicExpr::AO__scoped_atomic_load: {
630 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
631 Load->setAtomic(Order, Scope);
632 Load->setVolatile(E->isVolatile());
633 CGF.maybeAttachRangeForLoad(Load, E->getValueType(), E->getExprLoc());
634 auto *I = CGF.Builder.CreateStore(Load, Dest);
635 CGF.addInstToCurrentSourceAtom(I, Load);
636 return;
637 }
638
639 case AtomicExpr::AO__c11_atomic_store:
640 case AtomicExpr::AO__opencl_atomic_store:
641 case AtomicExpr::AO__hip_atomic_store:
642 case AtomicExpr::AO__atomic_store:
643 case AtomicExpr::AO__atomic_store_n:
644 case AtomicExpr::AO__scoped_atomic_store:
645 case AtomicExpr::AO__scoped_atomic_store_n: {
646 llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
647 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
648 Store->setAtomic(Order, Scope);
649 Store->setVolatile(E->isVolatile());
650 CGF.addInstToCurrentSourceAtom(Store, LoadVal1);
651 return;
652 }
653
654 case AtomicExpr::AO__c11_atomic_exchange:
655 case AtomicExpr::AO__hip_atomic_exchange:
656 case AtomicExpr::AO__opencl_atomic_exchange:
657 case AtomicExpr::AO__atomic_exchange_n:
658 case AtomicExpr::AO__atomic_exchange:
659 case AtomicExpr::AO__scoped_atomic_exchange_n:
660 case AtomicExpr::AO__scoped_atomic_exchange:
661 Op = llvm::AtomicRMWInst::Xchg;
662 break;
663
664 case AtomicExpr::AO__atomic_add_fetch:
665 case AtomicExpr::AO__scoped_atomic_add_fetch:
666 PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FAdd
667 : llvm::Instruction::Add;
668 [[fallthrough]];
669 case AtomicExpr::AO__c11_atomic_fetch_add:
670 case AtomicExpr::AO__hip_atomic_fetch_add:
671 case AtomicExpr::AO__opencl_atomic_fetch_add:
672 case AtomicExpr::AO__atomic_fetch_add:
673 case AtomicExpr::AO__scoped_atomic_fetch_add:
674 Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FAdd
675 : llvm::AtomicRMWInst::Add;
676 break;
677
678 case AtomicExpr::AO__atomic_sub_fetch:
679 case AtomicExpr::AO__scoped_atomic_sub_fetch:
680 PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FSub
681 : llvm::Instruction::Sub;
682 [[fallthrough]];
683 case AtomicExpr::AO__c11_atomic_fetch_sub:
684 case AtomicExpr::AO__hip_atomic_fetch_sub:
685 case AtomicExpr::AO__opencl_atomic_fetch_sub:
686 case AtomicExpr::AO__atomic_fetch_sub:
687 case AtomicExpr::AO__scoped_atomic_fetch_sub:
688 Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FSub
689 : llvm::AtomicRMWInst::Sub;
690 break;
691
692 case AtomicExpr::AO__atomic_min_fetch:
693 case AtomicExpr::AO__scoped_atomic_min_fetch:
694 PostOpMinMax = true;
695 [[fallthrough]];
696 case AtomicExpr::AO__c11_atomic_fetch_min:
697 case AtomicExpr::AO__hip_atomic_fetch_min:
698 case AtomicExpr::AO__opencl_atomic_fetch_min:
699 case AtomicExpr::AO__atomic_fetch_min:
700 case AtomicExpr::AO__scoped_atomic_fetch_min:
701 Op = E->getValueType()->isFloatingType()
702 ? llvm::AtomicRMWInst::FMin
703 : (E->getValueType()->isSignedIntegerType()
704 ? llvm::AtomicRMWInst::Min
705 : llvm::AtomicRMWInst::UMin);
706 break;
707
708 case AtomicExpr::AO__atomic_max_fetch:
709 case AtomicExpr::AO__scoped_atomic_max_fetch:
710 PostOpMinMax = true;
711 [[fallthrough]];
712 case AtomicExpr::AO__c11_atomic_fetch_max:
713 case AtomicExpr::AO__hip_atomic_fetch_max:
714 case AtomicExpr::AO__opencl_atomic_fetch_max:
715 case AtomicExpr::AO__atomic_fetch_max:
716 case AtomicExpr::AO__scoped_atomic_fetch_max:
717 Op = E->getValueType()->isFloatingType()
718 ? llvm::AtomicRMWInst::FMax
719 : (E->getValueType()->isSignedIntegerType()
720 ? llvm::AtomicRMWInst::Max
721 : llvm::AtomicRMWInst::UMax);
722 break;
723
724 case AtomicExpr::AO__atomic_and_fetch:
725 case AtomicExpr::AO__scoped_atomic_and_fetch:
726 PostOp = llvm::Instruction::And;
727 [[fallthrough]];
728 case AtomicExpr::AO__c11_atomic_fetch_and:
729 case AtomicExpr::AO__hip_atomic_fetch_and:
730 case AtomicExpr::AO__opencl_atomic_fetch_and:
731 case AtomicExpr::AO__atomic_fetch_and:
732 case AtomicExpr::AO__scoped_atomic_fetch_and:
733 Op = llvm::AtomicRMWInst::And;
734 break;
735
736 case AtomicExpr::AO__atomic_or_fetch:
737 case AtomicExpr::AO__scoped_atomic_or_fetch:
738 PostOp = llvm::Instruction::Or;
739 [[fallthrough]];
740 case AtomicExpr::AO__c11_atomic_fetch_or:
741 case AtomicExpr::AO__hip_atomic_fetch_or:
742 case AtomicExpr::AO__opencl_atomic_fetch_or:
743 case AtomicExpr::AO__atomic_fetch_or:
744 case AtomicExpr::AO__scoped_atomic_fetch_or:
745 Op = llvm::AtomicRMWInst::Or;
746 break;
747
748 case AtomicExpr::AO__atomic_xor_fetch:
749 case AtomicExpr::AO__scoped_atomic_xor_fetch:
750 PostOp = llvm::Instruction::Xor;
751 [[fallthrough]];
752 case AtomicExpr::AO__c11_atomic_fetch_xor:
753 case AtomicExpr::AO__hip_atomic_fetch_xor:
754 case AtomicExpr::AO__opencl_atomic_fetch_xor:
755 case AtomicExpr::AO__atomic_fetch_xor:
756 case AtomicExpr::AO__scoped_atomic_fetch_xor:
757 Op = llvm::AtomicRMWInst::Xor;
758 break;
759
760 case AtomicExpr::AO__atomic_nand_fetch:
761 case AtomicExpr::AO__scoped_atomic_nand_fetch:
762 PostOp = llvm::Instruction::And; // the NOT is special cased below
763 [[fallthrough]];
764 case AtomicExpr::AO__c11_atomic_fetch_nand:
765 case AtomicExpr::AO__atomic_fetch_nand:
766 case AtomicExpr::AO__scoped_atomic_fetch_nand:
767 Op = llvm::AtomicRMWInst::Nand;
768 break;
769
770 case AtomicExpr::AO__atomic_fetch_uinc:
771 case AtomicExpr::AO__scoped_atomic_fetch_uinc:
772 Op = llvm::AtomicRMWInst::UIncWrap;
773 break;
774 case AtomicExpr::AO__atomic_fetch_udec:
775 case AtomicExpr::AO__scoped_atomic_fetch_udec:
776 Op = llvm::AtomicRMWInst::UDecWrap;
777 break;
778
779 case AtomicExpr::AO__atomic_test_and_set: {
780 llvm::AtomicRMWInst *RMWI =
781 CGF.emitAtomicRMWInst(llvm::AtomicRMWInst::Xchg, Ptr,
782 CGF.Builder.getInt8(1), Order, Scope, E);
783 RMWI->setVolatile(E->isVolatile());
784 llvm::Value *Result = CGF.EmitToMemory(
785 CGF.Builder.CreateIsNotNull(RMWI, "tobool"), E->getType());
786 auto *I = CGF.Builder.CreateStore(Result, Dest);
787 CGF.addInstToCurrentSourceAtom(I, Result);
788 return;
789 }
790
791 case AtomicExpr::AO__atomic_clear: {
792 llvm::StoreInst *Store =
793 CGF.Builder.CreateStore(CGF.Builder.getInt8(0), Ptr);
794 Store->setAtomic(Order, Scope);
795 Store->setVolatile(E->isVolatile());
796 CGF.addInstToCurrentSourceAtom(Store, nullptr);
797 return;
798 }
799 }
800
801 llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
802 llvm::AtomicRMWInst *RMWI =
803 CGF.emitAtomicRMWInst(Op, Ptr, LoadVal1, Order, Scope, E);
804 RMWI->setVolatile(E->isVolatile());
805
806 // For __atomic_*_fetch operations, perform the operation again to
807 // determine the value which was written.
808 llvm::Value *Result = RMWI;
809 if (PostOpMinMax)
810 Result = EmitPostAtomicMinMax(CGF.Builder, E->getOp(),
812 RMWI, LoadVal1);
813 else if (PostOp)
814 Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
815 LoadVal1);
816 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch ||
817 E->getOp() == AtomicExpr::AO__scoped_atomic_nand_fetch)
818 Result = CGF.Builder.CreateNot(Result);
819 auto *I = CGF.Builder.CreateStore(Result, Dest);
820 CGF.addInstToCurrentSourceAtom(I, Result);
821}
822
823// This function emits any expression (scalar, complex, or aggregate)
824// into a temporary alloca.
825static Address
827 Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
828 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
829 /*Init*/ true);
830 return DeclPtr;
831}
832
834 Address Ptr, Address Val1, Address Val2,
835 Address OriginalVal1, llvm::Value *IsWeak,
836 llvm::Value *FailureOrder, uint64_t Size,
837 llvm::AtomicOrdering Order, llvm::Value *Scope) {
838 auto ScopeModel = Expr->getScopeModel();
839
840 // LLVM atomic instructions always have sync scope. If clang atomic
841 // expression has no scope operand, use default LLVM sync scope.
842 if (!ScopeModel) {
843 llvm::SyncScope::ID SS;
844 if (CGF.getLangOpts().OpenCL)
845 // OpenCL approach is: "The functions that do not have memory_scope
846 // argument have the same semantics as the corresponding functions with
847 // the memory_scope argument set to memory_scope_device." See ref.:
848 // https://registry.khronos.org/OpenCL/specs/3.0-unified/html/OpenCL_C.html#atomic-functions
851 Order, CGF.getLLVMContext());
852 else
853 SS = llvm::SyncScope::System;
854 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
855 FailureOrder, Size, Order, SS);
856 return;
857 }
858
859 // Handle constant scope.
860 if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
861 auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
862 CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
863 Order, CGF.CGM.getLLVMContext());
864 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
865 FailureOrder, Size, Order, SCID);
866 return;
867 }
868
869 // Handle non-constant scope.
870 auto &Builder = CGF.Builder;
871 auto Scopes = ScopeModel->getRuntimeValues();
872 llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
873 for (auto S : Scopes)
874 BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
875
876 llvm::BasicBlock *ContBB =
877 CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);
878
879 auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
880 // If unsupported sync scope is encountered at run time, assume a fallback
881 // sync scope value.
882 auto FallBack = ScopeModel->getFallBackValue();
883 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
884 for (auto S : Scopes) {
885 auto *B = BB[S];
886 if (S != FallBack)
887 SI->addCase(Builder.getInt32(S), B);
888
889 Builder.SetInsertPoint(B);
890 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
891 FailureOrder, Size, Order,
893 CGF.CGM.getLangOpts(), ScopeModel->map(S), Order,
894 CGF.getLLVMContext()));
895 Builder.CreateBr(ContBB);
896 }
897
898 Builder.SetInsertPoint(ContBB);
899}
900
903
904 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
905 QualType MemTy = AtomicTy;
906 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
907 MemTy = AT->getValueType();
908 llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
909
910 Address Val1 = Address::invalid();
911 Address Val2 = Address::invalid();
912 Address Dest = Address::invalid();
914
915 if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
916 E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
917 LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
918 EmitAtomicInit(E->getVal1(), lvalue);
919 return RValue::get(nullptr);
920 }
921
922 auto TInfo = getContext().getTypeInfoInChars(AtomicTy);
923 uint64_t Size = TInfo.Width.getQuantity();
924 unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
925
926 CharUnits MaxInlineWidth =
927 getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
928 DiagnosticsEngine &Diags = CGM.getDiags();
929 bool Misaligned = !Ptr.getAlignment().isMultipleOf(TInfo.Width);
930 bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
931 if (Misaligned) {
932 Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
933 << (int)TInfo.Width.getQuantity()
934 << (int)Ptr.getAlignment().getQuantity();
935 }
936 if (Oversized) {
937 Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_oversized)
938 << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity();
939 }
940
941 llvm::Value *Order = EmitScalarExpr(E->getOrder());
942 llvm::Value *Scope =
943 E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
944 bool ShouldCastToIntPtrTy = true;
945
946 switch (E->getOp()) {
947 case AtomicExpr::AO__c11_atomic_init:
948 case AtomicExpr::AO__opencl_atomic_init:
949 llvm_unreachable("Already handled above with EmitAtomicInit!");
950
951 case AtomicExpr::AO__atomic_load_n:
952 case AtomicExpr::AO__scoped_atomic_load_n:
953 case AtomicExpr::AO__c11_atomic_load:
954 case AtomicExpr::AO__opencl_atomic_load:
955 case AtomicExpr::AO__hip_atomic_load:
956 case AtomicExpr::AO__atomic_test_and_set:
957 case AtomicExpr::AO__atomic_clear:
958 break;
959
960 case AtomicExpr::AO__atomic_load:
961 case AtomicExpr::AO__scoped_atomic_load:
963 break;
964
965 case AtomicExpr::AO__atomic_store:
966 case AtomicExpr::AO__scoped_atomic_store:
968 break;
969
970 case AtomicExpr::AO__atomic_exchange:
971 case AtomicExpr::AO__scoped_atomic_exchange:
974 break;
975
976 case AtomicExpr::AO__atomic_compare_exchange:
977 case AtomicExpr::AO__atomic_compare_exchange_n:
978 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
979 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
980 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
981 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
982 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
983 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
984 case AtomicExpr::AO__scoped_atomic_compare_exchange:
985 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
987 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
988 E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
990 else
991 Val2 = EmitValToTemp(*this, E->getVal2());
992 OrderFail = EmitScalarExpr(E->getOrderFail());
993 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
994 E->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
995 E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
996 E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
997 IsWeak = EmitScalarExpr(E->getWeak());
998 break;
999
1000 case AtomicExpr::AO__c11_atomic_fetch_add:
1001 case AtomicExpr::AO__c11_atomic_fetch_sub:
1002 case AtomicExpr::AO__hip_atomic_fetch_add:
1003 case AtomicExpr::AO__hip_atomic_fetch_sub:
1004 case AtomicExpr::AO__opencl_atomic_fetch_add:
1005 case AtomicExpr::AO__opencl_atomic_fetch_sub:
1006 if (MemTy->isPointerType()) {
1007 // For pointer arithmetic, we're required to do a bit of math:
1008 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
1009 // ... but only for the C11 builtins. The GNU builtins expect the
1010 // user to multiply by sizeof(T).
1011 QualType Val1Ty = E->getVal1()->getType();
1012 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
1013 CharUnits PointeeIncAmt =
1014 getContext().getTypeSizeInChars(MemTy->getPointeeType());
1015 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
1016 auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
1017 Val1 = Temp;
1018 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
1019 break;
1020 }
1021 [[fallthrough]];
1022 case AtomicExpr::AO__atomic_fetch_add:
1023 case AtomicExpr::AO__atomic_fetch_max:
1024 case AtomicExpr::AO__atomic_fetch_min:
1025 case AtomicExpr::AO__atomic_fetch_sub:
1026 case AtomicExpr::AO__atomic_add_fetch:
1027 case AtomicExpr::AO__atomic_max_fetch:
1028 case AtomicExpr::AO__atomic_min_fetch:
1029 case AtomicExpr::AO__atomic_sub_fetch:
1030 case AtomicExpr::AO__c11_atomic_fetch_max:
1031 case AtomicExpr::AO__c11_atomic_fetch_min:
1032 case AtomicExpr::AO__opencl_atomic_fetch_max:
1033 case AtomicExpr::AO__opencl_atomic_fetch_min:
1034 case AtomicExpr::AO__hip_atomic_fetch_max:
1035 case AtomicExpr::AO__hip_atomic_fetch_min:
1036 case AtomicExpr::AO__scoped_atomic_fetch_add:
1037 case AtomicExpr::AO__scoped_atomic_fetch_max:
1038 case AtomicExpr::AO__scoped_atomic_fetch_min:
1039 case AtomicExpr::AO__scoped_atomic_fetch_sub:
1040 case AtomicExpr::AO__scoped_atomic_add_fetch:
1041 case AtomicExpr::AO__scoped_atomic_max_fetch:
1042 case AtomicExpr::AO__scoped_atomic_min_fetch:
1043 case AtomicExpr::AO__scoped_atomic_sub_fetch:
1044 ShouldCastToIntPtrTy = !MemTy->isFloatingType();
1045 [[fallthrough]];
1046
1047 case AtomicExpr::AO__atomic_fetch_and:
1048 case AtomicExpr::AO__atomic_fetch_nand:
1049 case AtomicExpr::AO__atomic_fetch_or:
1050 case AtomicExpr::AO__atomic_fetch_xor:
1051 case AtomicExpr::AO__atomic_fetch_uinc:
1052 case AtomicExpr::AO__atomic_fetch_udec:
1053 case AtomicExpr::AO__atomic_and_fetch:
1054 case AtomicExpr::AO__atomic_nand_fetch:
1055 case AtomicExpr::AO__atomic_or_fetch:
1056 case AtomicExpr::AO__atomic_xor_fetch:
1057 case AtomicExpr::AO__atomic_store_n:
1058 case AtomicExpr::AO__atomic_exchange_n:
1059 case AtomicExpr::AO__c11_atomic_fetch_and:
1060 case AtomicExpr::AO__c11_atomic_fetch_nand:
1061 case AtomicExpr::AO__c11_atomic_fetch_or:
1062 case AtomicExpr::AO__c11_atomic_fetch_xor:
1063 case AtomicExpr::AO__c11_atomic_store:
1064 case AtomicExpr::AO__c11_atomic_exchange:
1065 case AtomicExpr::AO__hip_atomic_fetch_and:
1066 case AtomicExpr::AO__hip_atomic_fetch_or:
1067 case AtomicExpr::AO__hip_atomic_fetch_xor:
1068 case AtomicExpr::AO__hip_atomic_store:
1069 case AtomicExpr::AO__hip_atomic_exchange:
1070 case AtomicExpr::AO__opencl_atomic_fetch_and:
1071 case AtomicExpr::AO__opencl_atomic_fetch_or:
1072 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1073 case AtomicExpr::AO__opencl_atomic_store:
1074 case AtomicExpr::AO__opencl_atomic_exchange:
1075 case AtomicExpr::AO__scoped_atomic_fetch_and:
1076 case AtomicExpr::AO__scoped_atomic_fetch_nand:
1077 case AtomicExpr::AO__scoped_atomic_fetch_or:
1078 case AtomicExpr::AO__scoped_atomic_fetch_xor:
1079 case AtomicExpr::AO__scoped_atomic_and_fetch:
1080 case AtomicExpr::AO__scoped_atomic_nand_fetch:
1081 case AtomicExpr::AO__scoped_atomic_or_fetch:
1082 case AtomicExpr::AO__scoped_atomic_xor_fetch:
1083 case AtomicExpr::AO__scoped_atomic_store_n:
1084 case AtomicExpr::AO__scoped_atomic_exchange_n:
1085 case AtomicExpr::AO__scoped_atomic_fetch_uinc:
1086 case AtomicExpr::AO__scoped_atomic_fetch_udec:
1087 Val1 = EmitValToTemp(*this, E->getVal1());
1088 break;
1089 }
1090
1091 QualType RValTy = E->getType().getUnqualifiedType();
1092
1093 // The inlined atomics only function on iN types, where N is a power of 2. We
1094 // need to make sure (via temporaries if necessary) that all incoming values
1095 // are compatible.
1096 LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
1097 AtomicInfo Atomics(*this, AtomicVal);
1098
1099 Address OriginalVal1 = Val1;
1100 if (ShouldCastToIntPtrTy) {
1101 Ptr = Atomics.castToAtomicIntPointer(Ptr);
1102 if (Val1.isValid())
1103 Val1 = Atomics.convertToAtomicIntPointer(Val1);
1104 if (Val2.isValid())
1105 Val2 = Atomics.convertToAtomicIntPointer(Val2);
1106 }
1107 if (Dest.isValid()) {
1108 if (ShouldCastToIntPtrTy)
1109 Dest = Atomics.castToAtomicIntPointer(Dest);
1110 } else if (E->isCmpXChg())
1111 Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
1112 else if (!RValTy->isVoidType()) {
1113 Dest = Atomics.CreateTempAlloca();
1114 if (ShouldCastToIntPtrTy)
1115 Dest = Atomics.castToAtomicIntPointer(Dest);
1116 }
1117
1118 bool PowerOf2Size = (Size & (Size - 1)) == 0;
1119 bool UseLibcall = !PowerOf2Size || (Size > 16);
1120
1121 // For atomics larger than 16 bytes, emit a libcall from the frontend. This
1122 // avoids the overhead of dealing with excessively-large value types in IR.
1123 // Non-power-of-2 values also lower to libcall here, as they are not currently
1124 // permitted in IR instructions (although that constraint could be relaxed in
1125 // the future). For other cases where a libcall is required on a given
1126 // platform, we let the backend handle it (this includes handling for all of
1127 // the size-optimized libcall variants, which are only valid up to 16 bytes.)
1128 //
1129 // See: https://llvm.org/docs/Atomics.html#libcalls-atomic
1130 if (UseLibcall) {
1131 CallArgList Args;
1132 // For non-optimized library calls, the size is the first parameter.
1133 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
1134 getContext().getSizeType());
1135
1136 // The atomic address is the second parameter.
1137 // The OpenCL atomic library functions only accept pointer arguments to
1138 // generic address space.
1139 auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
1140 if (!E->isOpenCL())
1141 return V;
1142 auto AS = PT->castAs<PointerType>()->getPointeeType().getAddressSpace();
1143 if (AS == LangAS::opencl_generic)
1144 return V;
1145 auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
1146 auto *DestType = llvm::PointerType::get(getLLVMContext(), DestAS);
1147
1148 return getTargetHooks().performAddrSpaceCast(*this, V, AS, DestType,
1149 false);
1150 };
1151
1152 Args.add(RValue::get(CastToGenericAddrSpace(Ptr.emitRawPointer(*this),
1153 E->getPtr()->getType())),
1155
1156 // The next 1-3 parameters are op-dependent.
1157 std::string LibCallName;
1158 QualType RetTy;
1159 bool HaveRetTy = false;
1160 switch (E->getOp()) {
1161 case AtomicExpr::AO__c11_atomic_init:
1162 case AtomicExpr::AO__opencl_atomic_init:
1163 llvm_unreachable("Already handled!");
1164
1165 // There is only one libcall for compare an exchange, because there is no
1166 // optimisation benefit possible from a libcall version of a weak compare
1167 // and exchange.
1168 // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
1169 // void *desired, int success, int failure)
1170 case AtomicExpr::AO__atomic_compare_exchange:
1171 case AtomicExpr::AO__atomic_compare_exchange_n:
1172 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1173 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1174 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
1175 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
1176 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1177 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1178 case AtomicExpr::AO__scoped_atomic_compare_exchange:
1179 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
1180 LibCallName = "__atomic_compare_exchange";
1181 RetTy = getContext().BoolTy;
1182 HaveRetTy = true;
1183 Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this),
1184 E->getVal1()->getType())),
1186 Args.add(RValue::get(CastToGenericAddrSpace(Val2.emitRawPointer(*this),
1187 E->getVal2()->getType())),
1189 Args.add(RValue::get(Order), getContext().IntTy);
1190 Order = OrderFail;
1191 break;
1192 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
1193 // int order)
1194 case AtomicExpr::AO__atomic_exchange:
1195 case AtomicExpr::AO__atomic_exchange_n:
1196 case AtomicExpr::AO__c11_atomic_exchange:
1197 case AtomicExpr::AO__hip_atomic_exchange:
1198 case AtomicExpr::AO__opencl_atomic_exchange:
1199 case AtomicExpr::AO__scoped_atomic_exchange:
1200 case AtomicExpr::AO__scoped_atomic_exchange_n:
1201 LibCallName = "__atomic_exchange";
1202 Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this),
1203 E->getVal1()->getType())),
1205 break;
1206 // void __atomic_store(size_t size, void *mem, void *val, int order)
1207 case AtomicExpr::AO__atomic_store:
1208 case AtomicExpr::AO__atomic_store_n:
1209 case AtomicExpr::AO__c11_atomic_store:
1210 case AtomicExpr::AO__hip_atomic_store:
1211 case AtomicExpr::AO__opencl_atomic_store:
1212 case AtomicExpr::AO__scoped_atomic_store:
1213 case AtomicExpr::AO__scoped_atomic_store_n:
1214 LibCallName = "__atomic_store";
1215 RetTy = getContext().VoidTy;
1216 HaveRetTy = true;
1217 Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this),
1218 E->getVal1()->getType())),
1220 break;
1221 // void __atomic_load(size_t size, void *mem, void *return, int order)
1222 case AtomicExpr::AO__atomic_load:
1223 case AtomicExpr::AO__atomic_load_n:
1224 case AtomicExpr::AO__c11_atomic_load:
1225 case AtomicExpr::AO__hip_atomic_load:
1226 case AtomicExpr::AO__opencl_atomic_load:
1227 case AtomicExpr::AO__scoped_atomic_load:
1228 case AtomicExpr::AO__scoped_atomic_load_n:
1229 LibCallName = "__atomic_load";
1230 break;
1231 case AtomicExpr::AO__atomic_add_fetch:
1232 case AtomicExpr::AO__scoped_atomic_add_fetch:
1233 case AtomicExpr::AO__atomic_fetch_add:
1234 case AtomicExpr::AO__c11_atomic_fetch_add:
1235 case AtomicExpr::AO__hip_atomic_fetch_add:
1236 case AtomicExpr::AO__opencl_atomic_fetch_add:
1237 case AtomicExpr::AO__scoped_atomic_fetch_add:
1238 case AtomicExpr::AO__atomic_and_fetch:
1239 case AtomicExpr::AO__scoped_atomic_and_fetch:
1240 case AtomicExpr::AO__atomic_fetch_and:
1241 case AtomicExpr::AO__c11_atomic_fetch_and:
1242 case AtomicExpr::AO__hip_atomic_fetch_and:
1243 case AtomicExpr::AO__opencl_atomic_fetch_and:
1244 case AtomicExpr::AO__scoped_atomic_fetch_and:
1245 case AtomicExpr::AO__atomic_or_fetch:
1246 case AtomicExpr::AO__scoped_atomic_or_fetch:
1247 case AtomicExpr::AO__atomic_fetch_or:
1248 case AtomicExpr::AO__c11_atomic_fetch_or:
1249 case AtomicExpr::AO__hip_atomic_fetch_or:
1250 case AtomicExpr::AO__opencl_atomic_fetch_or:
1251 case AtomicExpr::AO__scoped_atomic_fetch_or:
1252 case AtomicExpr::AO__atomic_sub_fetch:
1253 case AtomicExpr::AO__scoped_atomic_sub_fetch:
1254 case AtomicExpr::AO__atomic_fetch_sub:
1255 case AtomicExpr::AO__c11_atomic_fetch_sub:
1256 case AtomicExpr::AO__hip_atomic_fetch_sub:
1257 case AtomicExpr::AO__opencl_atomic_fetch_sub:
1258 case AtomicExpr::AO__scoped_atomic_fetch_sub:
1259 case AtomicExpr::AO__atomic_xor_fetch:
1260 case AtomicExpr::AO__scoped_atomic_xor_fetch:
1261 case AtomicExpr::AO__atomic_fetch_xor:
1262 case AtomicExpr::AO__c11_atomic_fetch_xor:
1263 case AtomicExpr::AO__hip_atomic_fetch_xor:
1264 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1265 case AtomicExpr::AO__scoped_atomic_fetch_xor:
1266 case AtomicExpr::AO__atomic_nand_fetch:
1267 case AtomicExpr::AO__atomic_fetch_nand:
1268 case AtomicExpr::AO__c11_atomic_fetch_nand:
1269 case AtomicExpr::AO__scoped_atomic_fetch_nand:
1270 case AtomicExpr::AO__scoped_atomic_nand_fetch:
1271 case AtomicExpr::AO__atomic_min_fetch:
1272 case AtomicExpr::AO__atomic_fetch_min:
1273 case AtomicExpr::AO__c11_atomic_fetch_min:
1274 case AtomicExpr::AO__hip_atomic_fetch_min:
1275 case AtomicExpr::AO__opencl_atomic_fetch_min:
1276 case AtomicExpr::AO__scoped_atomic_fetch_min:
1277 case AtomicExpr::AO__scoped_atomic_min_fetch:
1278 case AtomicExpr::AO__atomic_max_fetch:
1279 case AtomicExpr::AO__atomic_fetch_max:
1280 case AtomicExpr::AO__c11_atomic_fetch_max:
1281 case AtomicExpr::AO__hip_atomic_fetch_max:
1282 case AtomicExpr::AO__opencl_atomic_fetch_max:
1283 case AtomicExpr::AO__scoped_atomic_fetch_max:
1284 case AtomicExpr::AO__scoped_atomic_max_fetch:
1285 case AtomicExpr::AO__scoped_atomic_fetch_uinc:
1286 case AtomicExpr::AO__scoped_atomic_fetch_udec:
1287 case AtomicExpr::AO__atomic_test_and_set:
1288 case AtomicExpr::AO__atomic_clear:
1289 case AtomicExpr::AO__atomic_fetch_uinc:
1290 case AtomicExpr::AO__atomic_fetch_udec:
1291 llvm_unreachable("Integral atomic operations always become atomicrmw!");
1292 }
1293
1294 if (E->isOpenCL()) {
1295 LibCallName =
1296 std::string("__opencl") + StringRef(LibCallName).drop_front(1).str();
1297 }
1298 // By default, assume we return a value of the atomic type.
1299 if (!HaveRetTy) {
1300 // Value is returned through parameter before the order.
1301 RetTy = getContext().VoidTy;
1302 Args.add(RValue::get(
1303 CastToGenericAddrSpace(Dest.emitRawPointer(*this), RetTy)),
1305 }
1306 // Order is always the last parameter.
1307 Args.add(RValue::get(Order),
1308 getContext().IntTy);
1309 if (E->isOpenCL())
1311
1312 RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
1313 // The value is returned directly from the libcall.
1314 if (E->isCmpXChg())
1315 return Res;
1316
1317 if (RValTy->isVoidType())
1318 return RValue::get(nullptr);
1319
1321 RValTy, E->getExprLoc());
1322 }
1323
1324 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1325 E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
1326 E->getOp() == AtomicExpr::AO__hip_atomic_store ||
1327 E->getOp() == AtomicExpr::AO__atomic_store ||
1328 E->getOp() == AtomicExpr::AO__atomic_store_n ||
1329 E->getOp() == AtomicExpr::AO__scoped_atomic_store ||
1330 E->getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
1331 E->getOp() == AtomicExpr::AO__atomic_clear;
1332 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1333 E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1334 E->getOp() == AtomicExpr::AO__hip_atomic_load ||
1335 E->getOp() == AtomicExpr::AO__atomic_load ||
1336 E->getOp() == AtomicExpr::AO__atomic_load_n ||
1337 E->getOp() == AtomicExpr::AO__scoped_atomic_load ||
1338 E->getOp() == AtomicExpr::AO__scoped_atomic_load_n;
1339
1340 if (isa<llvm::ConstantInt>(Order)) {
1341 auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1342 // We should not ever get to a case where the ordering isn't a valid C ABI
1343 // value, but it's hard to enforce that in general.
1344 if (llvm::isValidAtomicOrderingCABI(ord))
1345 switch ((llvm::AtomicOrderingCABI)ord) {
1346 case llvm::AtomicOrderingCABI::relaxed:
1347 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1348 OrderFail, Size, llvm::AtomicOrdering::Monotonic, Scope);
1349 break;
1350 case llvm::AtomicOrderingCABI::consume:
1351 case llvm::AtomicOrderingCABI::acquire:
1352 if (IsStore)
1353 break; // Avoid crashing on code with undefined behavior
1354 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1355 OrderFail, Size, llvm::AtomicOrdering::Acquire, Scope);
1356 break;
1357 case llvm::AtomicOrderingCABI::release:
1358 if (IsLoad)
1359 break; // Avoid crashing on code with undefined behavior
1360 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1361 OrderFail, Size, llvm::AtomicOrdering::Release, Scope);
1362 break;
1363 case llvm::AtomicOrderingCABI::acq_rel:
1364 if (IsLoad || IsStore)
1365 break; // Avoid crashing on code with undefined behavior
1366 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1367 OrderFail, Size, llvm::AtomicOrdering::AcquireRelease,
1368 Scope);
1369 break;
1370 case llvm::AtomicOrderingCABI::seq_cst:
1371 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1372 OrderFail, Size,
1373 llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1374 break;
1375 }
1376 if (RValTy->isVoidType())
1377 return RValue::get(nullptr);
1378
1380 RValTy, E->getExprLoc());
1381 }
1382
1383 // Long case, when Order isn't obviously constant.
1384
1385 // Create all the relevant BB's
1386 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1387 *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1388 *SeqCstBB = nullptr;
1389 MonotonicBB = createBasicBlock("monotonic", CurFn);
1390 if (!IsStore)
1391 AcquireBB = createBasicBlock("acquire", CurFn);
1392 if (!IsLoad)
1393 ReleaseBB = createBasicBlock("release", CurFn);
1394 if (!IsLoad && !IsStore)
1395 AcqRelBB = createBasicBlock("acqrel", CurFn);
1396 SeqCstBB = createBasicBlock("seqcst", CurFn);
1397 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1398
1399 // Create the switch for the split
1400 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1401 // doesn't matter unless someone is crazy enough to use something that
1402 // doesn't fold to a constant for the ordering.
1403 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1404 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1405
1406 // Emit all the different atomics
1407 Builder.SetInsertPoint(MonotonicBB);
1408 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail,
1409 Size, llvm::AtomicOrdering::Monotonic, Scope);
1410 Builder.CreateBr(ContBB);
1411 if (!IsStore) {
1412 Builder.SetInsertPoint(AcquireBB);
1413 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1414 OrderFail, Size, llvm::AtomicOrdering::Acquire, Scope);
1415 Builder.CreateBr(ContBB);
1416 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
1417 AcquireBB);
1418 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
1419 AcquireBB);
1420 }
1421 if (!IsLoad) {
1422 Builder.SetInsertPoint(ReleaseBB);
1423 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1424 OrderFail, Size, llvm::AtomicOrdering::Release, Scope);
1425 Builder.CreateBr(ContBB);
1426 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
1427 ReleaseBB);
1428 }
1429 if (!IsLoad && !IsStore) {
1430 Builder.SetInsertPoint(AcqRelBB);
1431 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak,
1432 OrderFail, Size, llvm::AtomicOrdering::AcquireRelease, Scope);
1433 Builder.CreateBr(ContBB);
1434 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
1435 AcqRelBB);
1436 }
1437 Builder.SetInsertPoint(SeqCstBB);
1438 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, OriginalVal1, IsWeak, OrderFail,
1439 Size, llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1440 Builder.CreateBr(ContBB);
1441 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
1442 SeqCstBB);
1443
1444 // Cleanup and return
1445 Builder.SetInsertPoint(ContBB);
1446 if (RValTy->isVoidType())
1447 return RValue::get(nullptr);
1448
1449 assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1451 RValTy, E->getExprLoc());
1452}
1453
1454Address AtomicInfo::castToAtomicIntPointer(Address addr) const {
1455 llvm::IntegerType *ty =
1456 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1457 return addr.withElementType(ty);
1458}
1459
1460Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
1461 llvm::Type *Ty = Addr.getElementType();
1462 uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1463 if (SourceSizeInBits != AtomicSizeInBits) {
1464 Address Tmp = CreateTempAlloca();
1466 Tmp.emitRawPointer(CGF), llvm::ConstantInt::get(CGF.Int8Ty, 0),
1467 CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
1468 Tmp.getAlignment().getAsAlign());
1469
1470 CGF.Builder.CreateMemCpy(Tmp, Addr,
1471 std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1472 Addr = Tmp;
1473 }
1474
1475 return castToAtomicIntPointer(Addr);
1476}
1477
1478RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1479 AggValueSlot resultSlot,
1480 SourceLocation loc,
1481 bool asValue) const {
1482 if (LVal.isSimple()) {
1483 if (EvaluationKind == TEK_Aggregate)
1484 return resultSlot.asRValue();
1485
1486 // Drill into the padding structure if we have one.
1487 if (hasPadding())
1488 addr = CGF.Builder.CreateStructGEP(addr, 0);
1489
1490 // Otherwise, just convert the temporary to an r-value using the
1491 // normal conversion routine.
1492 return CGF.convertTempToRValue(addr, getValueType(), loc);
1493 }
1494 if (!asValue)
1495 // Get RValue from temp memory as atomic for non-simple lvalues
1496 return RValue::get(CGF.Builder.CreateLoad(addr));
1497 if (LVal.isBitField())
1498 return CGF.EmitLoadOfBitfieldLValue(
1499 LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1500 LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1501 if (LVal.isVectorElt())
1502 return CGF.EmitLoadOfLValue(
1503 LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1504 LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1505 assert(LVal.isExtVectorElt());
1506 return CGF.EmitLoadOfExtVectorElementLValue(LValue::MakeExtVectorElt(
1507 addr, LVal.getExtVectorElts(), LVal.getType(),
1508 LVal.getBaseInfo(), TBAAAccessInfo()));
1509}
1510
1511/// Return true if \param ValTy is a type that should be casted to integer
1512/// around the atomic memory operation. If \param CmpXchg is true, then the
1513/// cast of a floating point type is made as that instruction can not have
1514/// floating point operands. TODO: Allow compare-and-exchange and FP - see
1515/// comment in AtomicExpandPass.cpp.
1516static bool shouldCastToInt(llvm::Type *ValTy, bool CmpXchg) {
1517 if (ValTy->isFloatingPointTy())
1518 return ValTy->isX86_FP80Ty() || CmpXchg;
1519 return !ValTy->isIntegerTy() && !ValTy->isPointerTy();
1520}
1521
1522RValue AtomicInfo::ConvertToValueOrAtomic(llvm::Value *Val,
1523 AggValueSlot ResultSlot,
1524 SourceLocation Loc, bool AsValue,
1525 bool CmpXchg) const {
1526 // Try not to in some easy cases.
1527 assert((Val->getType()->isIntegerTy() || Val->getType()->isPointerTy() ||
1528 Val->getType()->isIEEELikeFPTy()) &&
1529 "Expected integer, pointer or floating point value when converting "
1530 "result.");
1531 if (getEvaluationKind() == TEK_Scalar &&
1532 (((!LVal.isBitField() ||
1533 LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1534 !hasPadding()) ||
1535 !AsValue)) {
1536 auto *ValTy = AsValue
1537 ? CGF.ConvertTypeForMem(ValueTy)
1538 : getAtomicAddress().getElementType();
1539 if (!shouldCastToInt(ValTy, CmpXchg)) {
1540 assert((!ValTy->isIntegerTy() || Val->getType() == ValTy) &&
1541 "Different integer types.");
1542 return RValue::get(CGF.EmitFromMemory(Val, ValueTy));
1543 }
1544 if (llvm::CastInst::isBitCastable(Val->getType(), ValTy))
1545 return RValue::get(CGF.Builder.CreateBitCast(Val, ValTy));
1546 }
1547
1548 // Create a temporary. This needs to be big enough to hold the
1549 // atomic integer.
1550 Address Temp = Address::invalid();
1551 bool TempIsVolatile = false;
1552 if (AsValue && getEvaluationKind() == TEK_Aggregate) {
1553 assert(!ResultSlot.isIgnored());
1554 Temp = ResultSlot.getAddress();
1555 TempIsVolatile = ResultSlot.isVolatile();
1556 } else {
1557 Temp = CreateTempAlloca();
1558 }
1559
1560 // Slam the integer into the temporary.
1561 Address CastTemp = castToAtomicIntPointer(Temp);
1562 CGF.Builder.CreateStore(Val, CastTemp)->setVolatile(TempIsVolatile);
1563
1564 return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1565}
1566
1567void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1568 llvm::AtomicOrdering AO, bool) {
1569 // void __atomic_load(size_t size, void *mem, void *return, int order);
1570 CallArgList Args;
1571 Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1572 Args.add(RValue::get(getAtomicPointer()), CGF.getContext().VoidPtrTy);
1573 Args.add(RValue::get(AddForLoaded), CGF.getContext().VoidPtrTy);
1574 Args.add(
1575 RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
1576 CGF.getContext().IntTy);
1577 emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1578}
1579
1580llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1581 bool IsVolatile, bool CmpXchg) {
1582 // Okay, we're doing this natively.
1583 Address Addr = getAtomicAddress();
1584 if (shouldCastToInt(Addr.getElementType(), CmpXchg))
1585 Addr = castToAtomicIntPointer(Addr);
1586 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1587 Load->setAtomic(AO);
1588
1589 // Other decoration.
1590 if (IsVolatile)
1591 Load->setVolatile(true);
1592 CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
1593 return Load;
1594}
1595
1596/// An LValue is a candidate for having its loads and stores be made atomic if
1597/// we are operating under /volatile:ms *and* the LValue itself is volatile and
1598/// performing such an operation can be performed without a libcall.
1600 if (!CGM.getLangOpts().MSVolatile) return false;
1601 AtomicInfo AI(*this, LV);
1602 bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1603 // An atomic is inline if we don't need to use a libcall.
1604 bool AtomicIsInline = !AI.shouldUseLibcall();
1605 // MSVC doesn't seem to do this for types wider than a pointer.
1606 if (getContext().getTypeSize(LV.getType()) >
1607 getContext().getTypeSize(getContext().getIntPtrType()))
1608 return false;
1609 return IsVolatile && AtomicIsInline;
1610}
1611
1613 AggValueSlot Slot) {
1614 llvm::AtomicOrdering AO;
1615 bool IsVolatile = LV.isVolatileQualified();
1616 if (LV.getType()->isAtomicType()) {
1617 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1618 } else {
1619 AO = llvm::AtomicOrdering::Acquire;
1620 IsVolatile = true;
1621 }
1622 return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1623}
1624
1625RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1626 bool AsValue, llvm::AtomicOrdering AO,
1627 bool IsVolatile) {
1628 // Check whether we should use a library call.
1629 if (shouldUseLibcall()) {
1630 Address TempAddr = Address::invalid();
1631 if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1632 assert(getEvaluationKind() == TEK_Aggregate);
1633 TempAddr = ResultSlot.getAddress();
1634 } else
1635 TempAddr = CreateTempAlloca();
1636
1637 EmitAtomicLoadLibcall(TempAddr.emitRawPointer(CGF), AO, IsVolatile);
1638
1639 // Okay, turn that back into the original value or whole atomic (for
1640 // non-simple lvalues) type.
1641 return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1642 }
1643
1644 // Okay, we're doing this natively.
1645 auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1646
1647 // If we're ignoring an aggregate return, don't do anything.
1648 if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1649 return RValue::getAggregate(Address::invalid(), false);
1650
1651 // Okay, turn that back into the original value or atomic (for non-simple
1652 // lvalues) type.
1653 return ConvertToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1654}
1655
1656/// Emit a load from an l-value of atomic type. Note that the r-value
1657/// we produce is an r-value of the atomic *value* type.
1659 llvm::AtomicOrdering AO, bool IsVolatile,
1660 AggValueSlot resultSlot) {
1661 AtomicInfo Atomics(*this, src);
1662 return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1663 IsVolatile);
1664}
1665
1666/// Copy an r-value into memory as part of storing to an atomic type.
1667/// This needs to create a bit-pattern suitable for atomic operations.
1668void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1669 assert(LVal.isSimple());
1670 // If we have an r-value, the rvalue should be of the atomic type,
1671 // which means that the caller is responsible for having zeroed
1672 // any padding. Just do an aggregate copy of that type.
1673 if (rvalue.isAggregate()) {
1674 LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
1675 LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
1676 getAtomicType());
1677 bool IsVolatile = rvalue.isVolatileQualified() ||
1678 LVal.isVolatileQualified();
1679 CGF.EmitAggregateCopy(Dest, Src, getAtomicType(),
1680 AggValueSlot::DoesNotOverlap, IsVolatile);
1681 return;
1682 }
1683
1684 // Okay, otherwise we're copying stuff.
1685
1686 // Zero out the buffer if necessary.
1687 emitMemSetZeroIfNecessary();
1688
1689 // Drill past the padding if present.
1690 LValue TempLVal = projectValue();
1691
1692 // Okay, store the rvalue in.
1693 if (rvalue.isScalar()) {
1694 CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1695 } else {
1696 CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1697 }
1698}
1699
1700
1701/// Materialize an r-value into memory for the purposes of storing it
1702/// to an atomic type.
1703Address AtomicInfo::materializeRValue(RValue rvalue) const {
1704 // Aggregate r-values are already in memory, and EmitAtomicStore
1705 // requires them to be values of the atomic type.
1706 if (rvalue.isAggregate())
1707 return rvalue.getAggregateAddress();
1708
1709 // Otherwise, make a temporary and materialize into it.
1710 LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1711 AtomicInfo Atomics(CGF, TempLV);
1712 Atomics.emitCopyIntoMemory(rvalue);
1713 return TempLV.getAddress();
1714}
1715
1716llvm::Value *AtomicInfo::getScalarRValValueOrNull(RValue RVal) const {
1717 if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple()))
1718 return RVal.getScalarVal();
1719 return nullptr;
1720}
1721
1722llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal, bool CmpXchg) const {
1723 // If we've got a scalar value of the right size, try to avoid going
1724 // through memory. Floats get casted if needed by AtomicExpandPass.
1725 if (llvm::Value *Value = getScalarRValValueOrNull(RVal)) {
1726 if (!shouldCastToInt(Value->getType(), CmpXchg))
1727 return CGF.EmitToMemory(Value, ValueTy);
1728 else {
1729 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1730 CGF.getLLVMContext(),
1731 LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1732 if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1733 return CGF.Builder.CreateBitCast(Value, InputIntTy);
1734 }
1735 }
1736 // Otherwise, we need to go through memory.
1737 // Put the r-value in memory.
1738 Address Addr = materializeRValue(RVal);
1739
1740 // Cast the temporary to the atomic int type and pull a value out.
1741 Addr = castToAtomicIntPointer(Addr);
1742 return CGF.Builder.CreateLoad(Addr);
1743}
1744
1745std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1746 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1747 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1748 // Do the atomic store.
1749 Address Addr = getAtomicAddressAsAtomicIntPointer();
1750 auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr, ExpectedVal, DesiredVal,
1751 Success, Failure);
1752 // Other decoration.
1753 Inst->setVolatile(LVal.isVolatileQualified());
1754 Inst->setWeak(IsWeak);
1755
1756 // Okay, turn that back into the original value type.
1757 auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1758 auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1759 return std::make_pair(PreviousVal, SuccessFailureVal);
1760}
1761
1762llvm::Value *
1763AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1764 llvm::Value *DesiredAddr,
1765 llvm::AtomicOrdering Success,
1766 llvm::AtomicOrdering Failure) {
1767 // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1768 // void *desired, int success, int failure);
1769 CallArgList Args;
1770 Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1771 Args.add(RValue::get(getAtomicPointer()), CGF.getContext().VoidPtrTy);
1772 Args.add(RValue::get(ExpectedAddr), CGF.getContext().VoidPtrTy);
1773 Args.add(RValue::get(DesiredAddr), CGF.getContext().VoidPtrTy);
1774 Args.add(RValue::get(
1775 llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
1776 CGF.getContext().IntTy);
1777 Args.add(RValue::get(
1778 llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
1779 CGF.getContext().IntTy);
1780 auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1781 CGF.getContext().BoolTy, Args);
1782
1783 return SuccessFailureRVal.getScalarVal();
1784}
1785
1786std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1787 RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1788 llvm::AtomicOrdering Failure, bool IsWeak) {
1789 // Check whether we should use a library call.
1790 if (shouldUseLibcall()) {
1791 // Produce a source address.
1792 Address ExpectedAddr = materializeRValue(Expected);
1793 llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);
1794 llvm::Value *DesiredPtr = materializeRValue(Desired).emitRawPointer(CGF);
1795 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr,
1796 Success, Failure);
1797 return std::make_pair(
1798 convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1799 SourceLocation(), /*AsValue=*/false),
1800 Res);
1801 }
1802
1803 // If we've got a scalar value of the right size, try to avoid going
1804 // through memory.
1805 auto *ExpectedVal = convertRValueToInt(Expected, /*CmpXchg=*/true);
1806 auto *DesiredVal = convertRValueToInt(Desired, /*CmpXchg=*/true);
1807 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1808 Failure, IsWeak);
1809 return std::make_pair(
1810 ConvertToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1811 SourceLocation(), /*AsValue=*/false,
1812 /*CmpXchg=*/true),
1813 Res.second);
1814}
1815
1816static void
1817EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1818 const llvm::function_ref<RValue(RValue)> &UpdateOp,
1819 Address DesiredAddr) {
1820 RValue UpRVal;
1821 LValue AtomicLVal = Atomics.getAtomicLValue();
1822 LValue DesiredLVal;
1823 if (AtomicLVal.isSimple()) {
1824 UpRVal = OldRVal;
1825 DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1826 } else {
1827 // Build new lvalue for temp address.
1828 Address Ptr = Atomics.materializeRValue(OldRVal);
1829 LValue UpdateLVal;
1830 if (AtomicLVal.isBitField()) {
1831 UpdateLVal =
1832 LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1833 AtomicLVal.getType(),
1834 AtomicLVal.getBaseInfo(),
1835 AtomicLVal.getTBAAInfo());
1836 DesiredLVal =
1837 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1838 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1839 AtomicLVal.getTBAAInfo());
1840 } else if (AtomicLVal.isVectorElt()) {
1841 UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1842 AtomicLVal.getType(),
1843 AtomicLVal.getBaseInfo(),
1844 AtomicLVal.getTBAAInfo());
1845 DesiredLVal = LValue::MakeVectorElt(
1846 DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1847 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1848 } else {
1849 assert(AtomicLVal.isExtVectorElt());
1850 UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1851 AtomicLVal.getType(),
1852 AtomicLVal.getBaseInfo(),
1853 AtomicLVal.getTBAAInfo());
1854 DesiredLVal = LValue::MakeExtVectorElt(
1855 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1856 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1857 }
1858 UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1859 }
1860 // Store new value in the corresponding memory area.
1861 RValue NewRVal = UpdateOp(UpRVal);
1862 if (NewRVal.isScalar()) {
1863 CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1864 } else {
1865 assert(NewRVal.isComplex());
1866 CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1867 /*isInit=*/false);
1868 }
1869}
1870
1871void AtomicInfo::EmitAtomicUpdateLibcall(
1872 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1873 bool IsVolatile) {
1874 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1875
1876 Address ExpectedAddr = CreateTempAlloca();
1877
1878 EmitAtomicLoadLibcall(ExpectedAddr.emitRawPointer(CGF), AO, IsVolatile);
1879 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1880 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1881 CGF.EmitBlock(ContBB);
1882 Address DesiredAddr = CreateTempAlloca();
1883 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1884 requiresMemSetZero(getAtomicAddress().getElementType())) {
1885 auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1886 CGF.Builder.CreateStore(OldVal, DesiredAddr);
1887 }
1888 auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1890 SourceLocation(), /*AsValue=*/false);
1891 EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1892 llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);
1893 llvm::Value *DesiredPtr = DesiredAddr.emitRawPointer(CGF);
1894 auto *Res =
1895 EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);
1896 CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1897 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1898}
1899
1900void AtomicInfo::EmitAtomicUpdateOp(
1901 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1902 bool IsVolatile) {
1903 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1904
1905 // Do the atomic load.
1906 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile, /*CmpXchg=*/true);
1907 // For non-simple lvalues perform compare-and-swap procedure.
1908 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1909 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1910 auto *CurBB = CGF.Builder.GetInsertBlock();
1911 CGF.EmitBlock(ContBB);
1912 llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1913 /*NumReservedValues=*/2);
1914 PHI->addIncoming(OldVal, CurBB);
1915 Address NewAtomicAddr = CreateTempAlloca();
1916 Address NewAtomicIntAddr =
1917 shouldCastToInt(NewAtomicAddr.getElementType(), /*CmpXchg=*/true)
1918 ? castToAtomicIntPointer(NewAtomicAddr)
1919 : NewAtomicAddr;
1920
1921 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1922 requiresMemSetZero(getAtomicAddress().getElementType())) {
1923 CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1924 }
1925 auto OldRVal = ConvertToValueOrAtomic(PHI, AggValueSlot::ignored(),
1926 SourceLocation(), /*AsValue=*/false,
1927 /*CmpXchg=*/true);
1928 EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1929 auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1930 // Try to write new value using cmpxchg operation.
1931 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1932 PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1933 CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1934 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1935}
1936
1937static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1938 RValue UpdateRVal, Address DesiredAddr) {
1939 LValue AtomicLVal = Atomics.getAtomicLValue();
1940 LValue DesiredLVal;
1941 // Build new lvalue for temp address.
1942 if (AtomicLVal.isBitField()) {
1943 DesiredLVal =
1944 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1945 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1946 AtomicLVal.getTBAAInfo());
1947 } else if (AtomicLVal.isVectorElt()) {
1948 DesiredLVal =
1949 LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1950 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1951 AtomicLVal.getTBAAInfo());
1952 } else {
1953 assert(AtomicLVal.isExtVectorElt());
1954 DesiredLVal = LValue::MakeExtVectorElt(
1955 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1956 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1957 }
1958 // Store new value in the corresponding memory area.
1959 assert(UpdateRVal.isScalar());
1960 CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1961}
1962
1963void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1964 RValue UpdateRVal, bool IsVolatile) {
1965 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1966
1967 Address ExpectedAddr = CreateTempAlloca();
1968
1969 EmitAtomicLoadLibcall(ExpectedAddr.emitRawPointer(CGF), AO, IsVolatile);
1970 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1971 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1972 CGF.EmitBlock(ContBB);
1973 Address DesiredAddr = CreateTempAlloca();
1974 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1975 requiresMemSetZero(getAtomicAddress().getElementType())) {
1976 auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1977 CGF.Builder.CreateStore(OldVal, DesiredAddr);
1978 }
1979 EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1980 llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);
1981 llvm::Value *DesiredPtr = DesiredAddr.emitRawPointer(CGF);
1982 auto *Res =
1983 EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);
1984 CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1985 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1986}
1987
1988void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1989 bool IsVolatile) {
1990 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1991
1992 // Do the atomic load.
1993 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile, /*CmpXchg=*/true);
1994 // For non-simple lvalues perform compare-and-swap procedure.
1995 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1996 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1997 auto *CurBB = CGF.Builder.GetInsertBlock();
1998 CGF.EmitBlock(ContBB);
1999 llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
2000 /*NumReservedValues=*/2);
2001 PHI->addIncoming(OldVal, CurBB);
2002 Address NewAtomicAddr = CreateTempAlloca();
2003 Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr);
2004 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
2005 requiresMemSetZero(getAtomicAddress().getElementType())) {
2006 CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
2007 }
2008 EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
2009 auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
2010 // Try to write new value using cmpxchg operation.
2011 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
2012 PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
2013 CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
2014 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
2015}
2016
2017void AtomicInfo::EmitAtomicUpdate(
2018 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
2019 bool IsVolatile) {
2020 if (shouldUseLibcall()) {
2021 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
2022 } else {
2023 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
2024 }
2025}
2026
2027void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
2028 bool IsVolatile) {
2029 if (shouldUseLibcall()) {
2030 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
2031 } else {
2032 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
2033 }
2034}
2035
2037 bool isInit) {
2038 bool IsVolatile = lvalue.isVolatileQualified();
2039 llvm::AtomicOrdering AO;
2040 if (lvalue.getType()->isAtomicType()) {
2041 AO = llvm::AtomicOrdering::SequentiallyConsistent;
2042 } else {
2043 AO = llvm::AtomicOrdering::Release;
2044 IsVolatile = true;
2045 }
2046 return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
2047}
2048
2049/// Emit a store to an l-value of atomic type.
2050///
2051/// Note that the r-value is expected to be an r-value *of the atomic
2052/// type*; this means that for aggregate r-values, it should include
2053/// storage for any padding that was necessary.
2055 llvm::AtomicOrdering AO, bool IsVolatile,
2056 bool isInit) {
2057 // If this is an aggregate r-value, it should agree in type except
2058 // maybe for address-space qualification.
2059 assert(!rvalue.isAggregate() ||
2061 dest.getAddress().getElementType());
2062
2063 AtomicInfo atomics(*this, dest);
2064 LValue LVal = atomics.getAtomicLValue();
2065
2066 // If this is an initialization, just put the value there normally.
2067 if (LVal.isSimple()) {
2068 if (isInit) {
2069 atomics.emitCopyIntoMemory(rvalue);
2070 return;
2071 }
2072
2073 // Check whether we should use a library call.
2074 if (atomics.shouldUseLibcall()) {
2075 // Produce a source address.
2076 Address srcAddr = atomics.materializeRValue(rvalue);
2077
2078 // void __atomic_store(size_t size, void *mem, void *val, int order)
2079 CallArgList args;
2080 args.add(RValue::get(atomics.getAtomicSizeValue()),
2081 getContext().getSizeType());
2082 args.add(RValue::get(atomics.getAtomicPointer()), getContext().VoidPtrTy);
2083 args.add(RValue::get(srcAddr.emitRawPointer(*this)),
2085 args.add(
2086 RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
2087 getContext().IntTy);
2088 emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
2089 return;
2090 }
2091
2092 // Okay, we're doing this natively.
2093 llvm::Value *ValToStore = atomics.convertRValueToInt(rvalue);
2094
2095 // Do the atomic store.
2096 Address Addr = atomics.getAtomicAddress();
2097 if (llvm::Value *Value = atomics.getScalarRValValueOrNull(rvalue))
2098 if (shouldCastToInt(Value->getType(), /*CmpXchg=*/false)) {
2099 Addr = atomics.castToAtomicIntPointer(Addr);
2100 ValToStore = Builder.CreateIntCast(ValToStore, Addr.getElementType(),
2101 /*isSigned=*/false);
2102 }
2103 llvm::StoreInst *store = Builder.CreateStore(ValToStore, Addr);
2104
2105 if (AO == llvm::AtomicOrdering::Acquire)
2106 AO = llvm::AtomicOrdering::Monotonic;
2107 else if (AO == llvm::AtomicOrdering::AcquireRelease)
2108 AO = llvm::AtomicOrdering::Release;
2109 // Initializations don't need to be atomic.
2110 if (!isInit)
2111 store->setAtomic(AO);
2112
2113 // Other decoration.
2114 if (IsVolatile)
2115 store->setVolatile(true);
2116 CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
2117 return;
2118 }
2119
2120 // Emit simple atomic update operation.
2121 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2122}
2123
2124/// Emit a compare-and-exchange op for atomic type.
2125///
2126std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
2127 LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
2128 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
2129 AggValueSlot Slot) {
2130 // If this is an aggregate r-value, it should agree in type except
2131 // maybe for address-space qualification.
2132 assert(!Expected.isAggregate() ||
2133 Expected.getAggregateAddress().getElementType() ==
2134 Obj.getAddress().getElementType());
2135 assert(!Desired.isAggregate() ||
2136 Desired.getAggregateAddress().getElementType() ==
2137 Obj.getAddress().getElementType());
2138 AtomicInfo Atomics(*this, Obj);
2139
2140 return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
2141 IsWeak);
2142}
2143
2144llvm::AtomicRMWInst *
2145CodeGenFunction::emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr,
2146 llvm::Value *Val, llvm::AtomicOrdering Order,
2147 llvm::SyncScope::ID SSID,
2148 const AtomicExpr *AE) {
2149 llvm::AtomicRMWInst *RMW =
2150 Builder.CreateAtomicRMW(Op, Addr, Val, Order, SSID);
2151 getTargetHooks().setTargetAtomicMetadata(*this, *RMW, AE);
2152 return RMW;
2153}
2154
2156 LValue LVal, llvm::AtomicOrdering AO,
2157 const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
2158 AtomicInfo Atomics(*this, LVal);
2159 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2160}
2161
2163 AtomicInfo atomics(*this, dest);
2164
2165 switch (atomics.getEvaluationKind()) {
2166 case TEK_Scalar: {
2167 llvm::Value *value = EmitScalarExpr(init);
2168 atomics.emitCopyIntoMemory(RValue::get(value));
2169 return;
2170 }
2171
2172 case TEK_Complex: {
2173 ComplexPairTy value = EmitComplexExpr(init);
2174 atomics.emitCopyIntoMemory(RValue::getComplex(value));
2175 return;
2176 }
2177
2178 case TEK_Aggregate: {
2179 // Fix up the destination if the initializer isn't an expression
2180 // of atomic type.
2181 bool Zeroed = false;
2182 if (!init->getType()->isAtomicType()) {
2183 Zeroed = atomics.emitMemSetZeroIfNecessary();
2184 dest = atomics.projectValue();
2185 }
2186
2187 // Evaluate the expression directly into the destination.
2193
2194 EmitAggExpr(init, slot);
2195 return;
2196 }
2197 }
2198 llvm_unreachable("bad evaluation kind");
2199}
Defines the clang::ASTContext interface.
#define V(N, I)
static llvm::Value * EmitPostAtomicMinMax(CGBuilderTy &Builder, AtomicExpr::AtomicOp Op, bool IsSigned, llvm::Value *OldVal, llvm::Value *RHS)
Duplicate the atomic min/max operation in conventional IR for the builtin variants that return the ne...
Definition CGAtomic.cpp:525
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, Address DesiredAddr)
static Address EmitValToTemp(CodeGenFunction &CGF, Expr *E)
Definition CGAtomic.cpp:826
static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)
Definition CGAtomic.cpp:314
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, Address ExpectedResult, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, llvm::AtomicOrdering Order, llvm::SyncScope::ID Scope)
Definition CGAtomic.cpp:559
static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo, TBAAAccessInfo *TBAAInfo, KnownNonNull_t IsKnownNonNull, CodeGenFunction &CGF)
Definition CGExpr.cpp:1437
static bool shouldCastToInt(mlir::Type valueTy, bool cmpxchg)
Return true if.
static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, uint64_t size, cir::MemOrder successOrder, cir::MemOrder failureOrder)
static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, Expr *failureOrderExpr, uint64_t size, cir::MemOrder successOrder)
static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
TokenType getType() const
Returns the token's type, e.g.
static QualType getPointeeType(const MemRegion *R)
CanQualType VoidPtrTy
CanQualType BoolTy
CanQualType IntTy
CanQualType VoidTy
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition Expr.h:6880
static std::unique_ptr< AtomicScopeModel > getScopeModel(AtomicOp Op)
Get atomic scope model for the atomic op code.
Definition Expr.h:7029
Expr * getVal2() const
Definition Expr.h:6931
Expr * getOrder() const
Definition Expr.h:6914
QualType getValueType() const
Definition Expr.cpp:5265
Expr * getScope() const
Definition Expr.h:6917
bool isCmpXChg() const
Definition Expr.h:6964
AtomicOp getOp() const
Definition Expr.h:6943
bool isOpenCL() const
Definition Expr.h:6992
Expr * getVal1() const
Definition Expr.h:6921
Expr * getPtr() const
Definition Expr.h:6911
Expr * getWeak() const
Definition Expr.h:6937
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Expr.h:7011
Expr * getOrderFail() const
Definition Expr.h:6927
bool isVolatile() const
Definition Expr.h:6960
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
bool isMultipleOf(CharUnits N) const
Test whether this is a multiple of the other value.
Definition CharUnits.h:143
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
static Address invalid()
Definition Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
bool isValid() const
Definition Address.h:177
An aggregate value slot.
Definition CGValue.h:551
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition CGValue.h:619
Address getAddress() const
Definition CGValue.h:691
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition CGValue.h:649
RValue asRValue() const
Definition CGValue.h:713
A scoped helper to set the current source atom group for CGDebugInfo::addInstToCurrentSourceAtom.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition CGBuilder.h:140
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition CGBuilder.h:207
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition CGBuilder.h:402
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition CGBuilder.h:223
llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(Address Addr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition CGBuilder.h:173
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:112
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition CGBuilder.h:369
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition CGBuilder.h:193
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition CGCall.h:137
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition CGCall.h:274
void add(RValue rvalue, QualType type)
Definition CGCall.h:302
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
void EmitAtomicInit(Expr *E, LValue lvalue)
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
Definition CGExpr.cpp:6952
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void addInstToCurrentSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
See CGDebugInfo::addInstToCurrentSourceAtom.
const LangOptions & getLangOpts() const
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
void maybeAttachRangeForLoad(llvm::LoadInst *Load, QualType Ty, SourceLocation Loc)
Definition CGExpr.cpp:2075
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
const TargetInfo & getTarget() const
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition CGExpr.cpp:2407
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2216
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition CGExpr.cpp:154
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition CGCall.cpp:5248
const TargetCodeGenInfo & getTargetHooks() const
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition CGExpr.cpp:2643
llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System, const AtomicExpr *AE=nullptr)
Emit an atomicrmw instruction, and applying relevant metadata when applicable.
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
Definition CGExpr.cpp:296
llvm::Type * ConvertTypeForMem(QualType T)
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
Definition CGExpr.cpp:2517
RValue EmitAtomicExpr(AtomicExpr *E)
Definition CGAtomic.cpp:901
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:189
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
RValue EmitLoadOfExtVectorElementLValue(LValue V)
Definition CGExpr.cpp:2554
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2250
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
llvm::LLVMContext & getLLVMContext()
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:656
This class organizes the cross-function state that is used while generating LLVM code.
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
llvm::LLVMContext & getLLVMContext()
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition CGCall.cpp:1702
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
Definition CGCall.cpp:729
LValue - This represents an lvalue references.
Definition CGValue.h:183
bool isSimple() const
Definition CGValue.h:286
bool isVolatileQualified() const
Definition CGValue.h:297
bool isVolatile() const
Definition CGValue.h:340
Address getAddress() const
Definition CGValue.h:373
QualType getType() const
Definition CGValue.h:303
TBAAAccessInfo getTBAAInfo() const
Definition CGValue.h:347
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
bool isScalar() const
Definition CGValue.h:64
static RValue get(llvm::Value *V)
Definition CGValue.h:99
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition CGValue.h:126
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition CGValue.h:109
bool isAggregate() const
Definition CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition CGValue.h:84
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition CGValue.h:72
bool isComplex() const
Definition CGValue.h:65
bool isVolatileQualified() const
Definition CGValue.h:69
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition CGValue.h:79
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition CGCall.h:379
virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const
Get the syncscope used in LLVM IR.
virtual void setTargetAtomicMetadata(CodeGenFunction &CGF, llvm::Instruction &AtomicInst, const AtomicExpr *Expr=nullptr) const
Allow the target to apply other metadata to an atomic instruction.
Definition TargetInfo.h:361
Concrete class used by the front-end to report problems and issues.
Definition Diagnostic.h:232
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Definition Expr.h:112
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3329
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8418
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8332
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition TypeBase.h:8386
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
Encodes a location in the source.
bool isVoidType() const
Definition TypeBase.h:8891
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2206
bool isPointerType() const
Definition TypeBase.h:8529
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9178
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
bool isAtomicType() const
Definition TypeBase.h:8717
bool isFloatingType() const
Definition Type.cpp:2305
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9111
QualType getType() const
Definition Value.cpp:237
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Load(InterpState &S, CodePtr OpPC)
Definition Interp.h:1970
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ Success
Annotation was successful.
Definition Parser.h:65
llvm::Expected< QualType > ExpectedType
llvm::StringRef getAsString(SyncScope S)
Definition SyncScope.h:62
U cast(CodeGen::Address addr)
Definition Address.h:327
unsigned long uint64_t
#define true
Definition stdbool.h:25
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64