clang 20.0.0git
CGAtomic.cpp
Go to the documentation of this file.
1//===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the code for emitting atomic operations.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGCall.h"
14#include "CGRecordLayout.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "TargetInfo.h"
21#include "llvm/ADT/DenseMap.h"
22#include "llvm/IR/DataLayout.h"
23#include "llvm/IR/Intrinsics.h"
24
25using namespace clang;
26using namespace CodeGen;
27
28namespace {
29 class AtomicInfo {
30 CodeGenFunction &CGF;
31 QualType AtomicTy;
32 QualType ValueTy;
33 uint64_t AtomicSizeInBits;
34 uint64_t ValueSizeInBits;
35 CharUnits AtomicAlign;
36 CharUnits ValueAlign;
37 TypeEvaluationKind EvaluationKind;
38 bool UseLibcall;
39 LValue LVal;
41 public:
42 AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
43 : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
44 EvaluationKind(TEK_Scalar), UseLibcall(true) {
45 assert(!lvalue.isGlobalReg());
46 ASTContext &C = CGF.getContext();
47 if (lvalue.isSimple()) {
48 AtomicTy = lvalue.getType();
49 if (auto *ATy = AtomicTy->getAs<AtomicType>())
50 ValueTy = ATy->getValueType();
51 else
52 ValueTy = AtomicTy;
53 EvaluationKind = CGF.getEvaluationKind(ValueTy);
54
55 uint64_t ValueAlignInBits;
56 uint64_t AtomicAlignInBits;
57 TypeInfo ValueTI = C.getTypeInfo(ValueTy);
58 ValueSizeInBits = ValueTI.Width;
59 ValueAlignInBits = ValueTI.Align;
60
61 TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
62 AtomicSizeInBits = AtomicTI.Width;
63 AtomicAlignInBits = AtomicTI.Align;
64
65 assert(ValueSizeInBits <= AtomicSizeInBits);
66 assert(ValueAlignInBits <= AtomicAlignInBits);
67
68 AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
69 ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
70 if (lvalue.getAlignment().isZero())
71 lvalue.setAlignment(AtomicAlign);
72
73 LVal = lvalue;
74 } else if (lvalue.isBitField()) {
75 ValueTy = lvalue.getType();
76 ValueSizeInBits = C.getTypeSize(ValueTy);
77 auto &OrigBFI = lvalue.getBitFieldInfo();
78 auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
79 AtomicSizeInBits = C.toBits(
80 C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
81 .alignTo(lvalue.getAlignment()));
82 llvm::Value *BitFieldPtr = lvalue.getRawBitFieldPointer(CGF);
83 auto OffsetInChars =
84 (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
85 lvalue.getAlignment();
86 llvm::Value *StoragePtr = CGF.Builder.CreateConstGEP1_64(
87 CGF.Int8Ty, BitFieldPtr, OffsetInChars.getQuantity());
88 StoragePtr = CGF.Builder.CreateAddrSpaceCast(
89 StoragePtr, CGF.UnqualPtrTy, "atomic_bitfield_base");
90 BFI = OrigBFI;
91 BFI.Offset = Offset;
92 BFI.StorageSize = AtomicSizeInBits;
93 BFI.StorageOffset += OffsetInChars;
94 llvm::Type *StorageTy = CGF.Builder.getIntNTy(AtomicSizeInBits);
95 LVal = LValue::MakeBitfield(
96 Address(StoragePtr, StorageTy, lvalue.getAlignment()), BFI,
97 lvalue.getType(), lvalue.getBaseInfo(), lvalue.getTBAAInfo());
98 AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
99 if (AtomicTy.isNull()) {
100 llvm::APInt Size(
101 /*numBits=*/32,
102 C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
103 AtomicTy = C.getConstantArrayType(C.CharTy, Size, nullptr,
104 ArraySizeModifier::Normal,
105 /*IndexTypeQuals=*/0);
106 }
107 AtomicAlign = ValueAlign = lvalue.getAlignment();
108 } else if (lvalue.isVectorElt()) {
109 ValueTy = lvalue.getType()->castAs<VectorType>()->getElementType();
110 ValueSizeInBits = C.getTypeSize(ValueTy);
111 AtomicTy = lvalue.getType();
112 AtomicSizeInBits = C.getTypeSize(AtomicTy);
113 AtomicAlign = ValueAlign = lvalue.getAlignment();
114 LVal = lvalue;
115 } else {
116 assert(lvalue.isExtVectorElt());
117 ValueTy = lvalue.getType();
118 ValueSizeInBits = C.getTypeSize(ValueTy);
119 AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
120 lvalue.getType(), cast<llvm::FixedVectorType>(
122 ->getNumElements());
123 AtomicSizeInBits = C.getTypeSize(AtomicTy);
124 AtomicAlign = ValueAlign = lvalue.getAlignment();
125 LVal = lvalue;
126 }
127 UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
128 AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
129 }
130
131 QualType getAtomicType() const { return AtomicTy; }
132 QualType getValueType() const { return ValueTy; }
133 CharUnits getAtomicAlignment() const { return AtomicAlign; }
134 uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
135 uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
136 TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
137 bool shouldUseLibcall() const { return UseLibcall; }
138 const LValue &getAtomicLValue() const { return LVal; }
139 llvm::Value *getAtomicPointer() const {
140 if (LVal.isSimple())
141 return LVal.emitRawPointer(CGF);
142 else if (LVal.isBitField())
143 return LVal.getRawBitFieldPointer(CGF);
144 else if (LVal.isVectorElt())
145 return LVal.getRawVectorPointer(CGF);
146 assert(LVal.isExtVectorElt());
147 return LVal.getRawExtVectorPointer(CGF);
148 }
149 Address getAtomicAddress() const {
150 llvm::Type *ElTy;
151 if (LVal.isSimple())
152 ElTy = LVal.getAddress().getElementType();
153 else if (LVal.isBitField())
154 ElTy = LVal.getBitFieldAddress().getElementType();
155 else if (LVal.isVectorElt())
156 ElTy = LVal.getVectorAddress().getElementType();
157 else
158 ElTy = LVal.getExtVectorAddress().getElementType();
159 return Address(getAtomicPointer(), ElTy, getAtomicAlignment());
160 }
161
162 Address getAtomicAddressAsAtomicIntPointer() const {
163 return castToAtomicIntPointer(getAtomicAddress());
164 }
165
166 /// Is the atomic size larger than the underlying value type?
167 ///
168 /// Note that the absence of padding does not mean that atomic
169 /// objects are completely interchangeable with non-atomic
170 /// objects: we might have promoted the alignment of a type
171 /// without making it bigger.
172 bool hasPadding() const {
173 return (ValueSizeInBits != AtomicSizeInBits);
174 }
175
176 bool emitMemSetZeroIfNecessary() const;
177
178 llvm::Value *getAtomicSizeValue() const {
179 CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
180 return CGF.CGM.getSize(size);
181 }
182
183 /// Cast the given pointer to an integer pointer suitable for atomic
184 /// operations if the source.
185 Address castToAtomicIntPointer(Address Addr) const;
186
187 /// If Addr is compatible with the iN that will be used for an atomic
188 /// operation, bitcast it. Otherwise, create a temporary that is suitable
189 /// and copy the value across.
190 Address convertToAtomicIntPointer(Address Addr) const;
191
192 /// Turn an atomic-layout object into an r-value.
193 RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
194 SourceLocation loc, bool AsValue) const;
195
196 llvm::Value *getScalarRValValueOrNull(RValue RVal) const;
197
198 /// Converts an rvalue to integer value if needed.
199 llvm::Value *convertRValueToInt(RValue RVal, bool CmpXchg = false) const;
200
201 RValue ConvertToValueOrAtomic(llvm::Value *IntVal, AggValueSlot ResultSlot,
202 SourceLocation Loc, bool AsValue,
203 bool CmpXchg = false) const;
204
205 /// Copy an atomic r-value into atomic-layout memory.
206 void emitCopyIntoMemory(RValue rvalue) const;
207
208 /// Project an l-value down to the value field.
209 LValue projectValue() const {
210 assert(LVal.isSimple());
211 Address addr = getAtomicAddress();
212 if (hasPadding())
213 addr = CGF.Builder.CreateStructGEP(addr, 0);
214
215 return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
216 LVal.getBaseInfo(), LVal.getTBAAInfo());
217 }
218
219 /// Emits atomic load.
220 /// \returns Loaded value.
221 RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
222 bool AsValue, llvm::AtomicOrdering AO,
223 bool IsVolatile);
224
225 /// Emits atomic compare-and-exchange sequence.
226 /// \param Expected Expected value.
227 /// \param Desired Desired value.
228 /// \param Success Atomic ordering for success operation.
229 /// \param Failure Atomic ordering for failed operation.
230 /// \param IsWeak true if atomic operation is weak, false otherwise.
231 /// \returns Pair of values: previous value from storage (value type) and
232 /// boolean flag (i1 type) with true if success and false otherwise.
233 std::pair<RValue, llvm::Value *>
234 EmitAtomicCompareExchange(RValue Expected, RValue Desired,
235 llvm::AtomicOrdering Success =
236 llvm::AtomicOrdering::SequentiallyConsistent,
237 llvm::AtomicOrdering Failure =
238 llvm::AtomicOrdering::SequentiallyConsistent,
239 bool IsWeak = false);
240
241 /// Emits atomic update.
242 /// \param AO Atomic ordering.
243 /// \param UpdateOp Update operation for the current lvalue.
244 void EmitAtomicUpdate(llvm::AtomicOrdering AO,
245 const llvm::function_ref<RValue(RValue)> &UpdateOp,
246 bool IsVolatile);
247 /// Emits atomic update.
248 /// \param AO Atomic ordering.
249 void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
250 bool IsVolatile);
251
252 /// Materialize an atomic r-value in atomic-layout memory.
253 Address materializeRValue(RValue rvalue) const;
254
255 /// Creates temp alloca for intermediate operations on atomic value.
256 Address CreateTempAlloca() const;
257 private:
258 bool requiresMemSetZero(llvm::Type *type) const;
259
260
261 /// Emits atomic load as a libcall.
262 void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
263 llvm::AtomicOrdering AO, bool IsVolatile);
264 /// Emits atomic load as LLVM instruction.
265 llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile,
266 bool CmpXchg = false);
267 /// Emits atomic compare-and-exchange op as a libcall.
268 llvm::Value *EmitAtomicCompareExchangeLibcall(
269 llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
270 llvm::AtomicOrdering Success =
271 llvm::AtomicOrdering::SequentiallyConsistent,
272 llvm::AtomicOrdering Failure =
273 llvm::AtomicOrdering::SequentiallyConsistent);
274 /// Emits atomic compare-and-exchange op as LLVM instruction.
275 std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
276 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
277 llvm::AtomicOrdering Success =
278 llvm::AtomicOrdering::SequentiallyConsistent,
279 llvm::AtomicOrdering Failure =
280 llvm::AtomicOrdering::SequentiallyConsistent,
281 bool IsWeak = false);
282 /// Emit atomic update as libcalls.
283 void
284 EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
285 const llvm::function_ref<RValue(RValue)> &UpdateOp,
286 bool IsVolatile);
287 /// Emit atomic update as LLVM instructions.
288 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
289 const llvm::function_ref<RValue(RValue)> &UpdateOp,
290 bool IsVolatile);
291 /// Emit atomic update as libcalls.
292 void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
293 bool IsVolatile);
294 /// Emit atomic update as LLVM instructions.
295 void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
296 bool IsVolatile);
297 };
298}
299
300Address AtomicInfo::CreateTempAlloca() const {
301 Address TempAlloca = CGF.CreateMemTemp(
302 (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
303 : AtomicTy,
304 getAtomicAlignment(),
305 "atomic-temp");
306 // Cast to pointer to value type for bitfields.
307 if (LVal.isBitField())
309 TempAlloca, getAtomicAddress().getType(),
310 getAtomicAddress().getElementType());
311 return TempAlloca;
312}
313
315 StringRef fnName,
316 QualType resultType,
317 CallArgList &args) {
318 const CGFunctionInfo &fnInfo =
319 CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
320 llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
321 llvm::AttrBuilder fnAttrB(CGF.getLLVMContext());
322 fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
323 fnAttrB.addAttribute(llvm::Attribute::WillReturn);
324 llvm::AttributeList fnAttrs = llvm::AttributeList::get(
325 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);
326
327 llvm::FunctionCallee fn =
328 CGF.CGM.CreateRuntimeFunction(fnTy, fnName, fnAttrs);
329 auto callee = CGCallee::forDirect(fn);
330 return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
331}
332
333/// Does a store of the given IR type modify the full expected width?
334static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
335 uint64_t expectedSize) {
336 return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
337}
338
339/// Does the atomic type require memsetting to zero before initialization?
340///
341/// The IR type is provided as a way of making certain queries faster.
342bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
343 // If the atomic type has size padding, we definitely need a memset.
344 if (hasPadding()) return true;
345
346 // Otherwise, do some simple heuristics to try to avoid it:
347 switch (getEvaluationKind()) {
348 // For scalars and complexes, check whether the store size of the
349 // type uses the full size.
350 case TEK_Scalar:
351 return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
352 case TEK_Complex:
353 return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
354 AtomicSizeInBits / 2);
355
356 // Padding in structs has an undefined bit pattern. User beware.
357 case TEK_Aggregate:
358 return false;
359 }
360 llvm_unreachable("bad evaluation kind");
361}
362
363bool AtomicInfo::emitMemSetZeroIfNecessary() const {
364 assert(LVal.isSimple());
365 Address addr = LVal.getAddress();
366 if (!requiresMemSetZero(addr.getElementType()))
367 return false;
368
370 addr.emitRawPointer(CGF), llvm::ConstantInt::get(CGF.Int8Ty, 0),
371 CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
372 LVal.getAlignment().getAsAlign());
373 return true;
374}
375
376static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
377 Address Dest, Address Ptr,
378 Address Val1, Address Val2,
379 uint64_t Size,
380 llvm::AtomicOrdering SuccessOrder,
381 llvm::AtomicOrdering FailureOrder,
382 llvm::SyncScope::ID Scope) {
383 // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
384 llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
385 llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
386
387 llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
388 Ptr, Expected, Desired, SuccessOrder, FailureOrder, Scope);
389 Pair->setVolatile(E->isVolatile());
390 Pair->setWeak(IsWeak);
391 CGF.getTargetHooks().setTargetAtomicMetadata(CGF, *Pair, E);
392
393 // Cmp holds the result of the compare-exchange operation: true on success,
394 // false on failure.
395 llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
396 llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
397
398 // This basic block is used to hold the store instruction if the operation
399 // failed.
400 llvm::BasicBlock *StoreExpectedBB =
401 CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
402
403 // This basic block is the exit point of the operation, we should end up
404 // here regardless of whether or not the operation succeeded.
405 llvm::BasicBlock *ContinueBB =
406 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
407
408 // Update Expected if Expected isn't equal to Old, otherwise branch to the
409 // exit point.
410 CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
411
412 CGF.Builder.SetInsertPoint(StoreExpectedBB);
413 // Update the memory at Expected with Old's value.
414 CGF.Builder.CreateStore(Old, Val1);
415 // Finally, branch to the exit point.
416 CGF.Builder.CreateBr(ContinueBB);
417
418 CGF.Builder.SetInsertPoint(ContinueBB);
419 // Update the memory at Dest with Cmp's value.
420 CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
421}
422
423/// Given an ordering required on success, emit all possible cmpxchg
424/// instructions to cope with the provided (but possibly only dynamically known)
425/// FailureOrder.
427 bool IsWeak, Address Dest, Address Ptr,
428 Address Val1, Address Val2,
429 llvm::Value *FailureOrderVal,
430 uint64_t Size,
431 llvm::AtomicOrdering SuccessOrder,
432 llvm::SyncScope::ID Scope) {
433 llvm::AtomicOrdering FailureOrder;
434 if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
435 auto FOS = FO->getSExtValue();
436 if (!llvm::isValidAtomicOrderingCABI(FOS))
437 FailureOrder = llvm::AtomicOrdering::Monotonic;
438 else
439 switch ((llvm::AtomicOrderingCABI)FOS) {
440 case llvm::AtomicOrderingCABI::relaxed:
441 // 31.7.2.18: "The failure argument shall not be memory_order_release
442 // nor memory_order_acq_rel". Fallback to monotonic.
443 case llvm::AtomicOrderingCABI::release:
444 case llvm::AtomicOrderingCABI::acq_rel:
445 FailureOrder = llvm::AtomicOrdering::Monotonic;
446 break;
447 case llvm::AtomicOrderingCABI::consume:
448 case llvm::AtomicOrderingCABI::acquire:
449 FailureOrder = llvm::AtomicOrdering::Acquire;
450 break;
451 case llvm::AtomicOrderingCABI::seq_cst:
452 FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
453 break;
454 }
455 // Prior to c++17, "the failure argument shall be no stronger than the
456 // success argument". This condition has been lifted and the only
457 // precondition is 31.7.2.18. Effectively treat this as a DR and skip
458 // language version checks.
459 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
460 FailureOrder, Scope);
461 return;
462 }
463
464 // Create all the relevant BB's
465 auto *MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
466 auto *AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
467 auto *SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
468 auto *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
469
470 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
471 // doesn't matter unless someone is crazy enough to use something that
472 // doesn't fold to a constant for the ordering.
473 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
474 // Implemented as acquire, since it's the closest in LLVM.
475 SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
476 AcquireBB);
477 SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
478 AcquireBB);
479 SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
480 SeqCstBB);
481
482 // Emit all the different atomics
483 CGF.Builder.SetInsertPoint(MonotonicBB);
484 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
485 Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
486 CGF.Builder.CreateBr(ContBB);
487
488 CGF.Builder.SetInsertPoint(AcquireBB);
489 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
490 llvm::AtomicOrdering::Acquire, Scope);
491 CGF.Builder.CreateBr(ContBB);
492
493 CGF.Builder.SetInsertPoint(SeqCstBB);
494 emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
495 llvm::AtomicOrdering::SequentiallyConsistent, Scope);
496 CGF.Builder.CreateBr(ContBB);
497
498 CGF.Builder.SetInsertPoint(ContBB);
499}
500
501/// Duplicate the atomic min/max operation in conventional IR for the builtin
502/// variants that return the new rather than the original value.
503static llvm::Value *EmitPostAtomicMinMax(CGBuilderTy &Builder,
505 bool IsSigned,
506 llvm::Value *OldVal,
507 llvm::Value *RHS) {
508 llvm::CmpInst::Predicate Pred;
509 switch (Op) {
510 default:
511 llvm_unreachable("Unexpected min/max operation");
512 case AtomicExpr::AO__atomic_max_fetch:
513 case AtomicExpr::AO__scoped_atomic_max_fetch:
514 Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
515 break;
516 case AtomicExpr::AO__atomic_min_fetch:
517 case AtomicExpr::AO__scoped_atomic_min_fetch:
518 Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
519 break;
520 }
521 llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS, "tst");
522 return Builder.CreateSelect(Cmp, OldVal, RHS, "newval");
523}
524
526 Address Ptr, Address Val1, Address Val2,
527 llvm::Value *IsWeak, llvm::Value *FailureOrder,
528 uint64_t Size, llvm::AtomicOrdering Order,
529 llvm::SyncScope::ID Scope) {
530 llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
531 bool PostOpMinMax = false;
532 unsigned PostOp = 0;
533
534 switch (E->getOp()) {
535 case AtomicExpr::AO__c11_atomic_init:
536 case AtomicExpr::AO__opencl_atomic_init:
537 llvm_unreachable("Already handled!");
538
539 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
540 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
541 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
542 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
543 FailureOrder, Size, Order, Scope);
544 return;
545 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
546 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
547 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
548 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
549 FailureOrder, Size, Order, Scope);
550 return;
551 case AtomicExpr::AO__atomic_compare_exchange:
552 case AtomicExpr::AO__atomic_compare_exchange_n:
553 case AtomicExpr::AO__scoped_atomic_compare_exchange:
554 case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {
555 if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
556 emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
557 Val1, Val2, FailureOrder, Size, Order, Scope);
558 } else {
559 // Create all the relevant BB's
560 llvm::BasicBlock *StrongBB =
561 CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
562 llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
563 llvm::BasicBlock *ContBB =
564 CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
565
566 llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
567 SI->addCase(CGF.Builder.getInt1(false), StrongBB);
568
569 CGF.Builder.SetInsertPoint(StrongBB);
570 emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
571 FailureOrder, Size, Order, Scope);
572 CGF.Builder.CreateBr(ContBB);
573
574 CGF.Builder.SetInsertPoint(WeakBB);
575 emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
576 FailureOrder, Size, Order, Scope);
577 CGF.Builder.CreateBr(ContBB);
578
579 CGF.Builder.SetInsertPoint(ContBB);
580 }
581 return;
582 }
583 case AtomicExpr::AO__c11_atomic_load:
584 case AtomicExpr::AO__opencl_atomic_load:
585 case AtomicExpr::AO__hip_atomic_load:
586 case AtomicExpr::AO__atomic_load_n:
587 case AtomicExpr::AO__atomic_load:
588 case AtomicExpr::AO__scoped_atomic_load_n:
589 case AtomicExpr::AO__scoped_atomic_load: {
590 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
591 Load->setAtomic(Order, Scope);
592 Load->setVolatile(E->isVolatile());
593 CGF.Builder.CreateStore(Load, Dest);
594 return;
595 }
596
597 case AtomicExpr::AO__c11_atomic_store:
598 case AtomicExpr::AO__opencl_atomic_store:
599 case AtomicExpr::AO__hip_atomic_store:
600 case AtomicExpr::AO__atomic_store:
601 case AtomicExpr::AO__atomic_store_n:
602 case AtomicExpr::AO__scoped_atomic_store:
603 case AtomicExpr::AO__scoped_atomic_store_n: {
604 llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
605 llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
606 Store->setAtomic(Order, Scope);
607 Store->setVolatile(E->isVolatile());
608 return;
609 }
610
611 case AtomicExpr::AO__c11_atomic_exchange:
612 case AtomicExpr::AO__hip_atomic_exchange:
613 case AtomicExpr::AO__opencl_atomic_exchange:
614 case AtomicExpr::AO__atomic_exchange_n:
615 case AtomicExpr::AO__atomic_exchange:
616 case AtomicExpr::AO__scoped_atomic_exchange_n:
617 case AtomicExpr::AO__scoped_atomic_exchange:
618 Op = llvm::AtomicRMWInst::Xchg;
619 break;
620
621 case AtomicExpr::AO__atomic_add_fetch:
622 case AtomicExpr::AO__scoped_atomic_add_fetch:
623 PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FAdd
624 : llvm::Instruction::Add;
625 [[fallthrough]];
626 case AtomicExpr::AO__c11_atomic_fetch_add:
627 case AtomicExpr::AO__hip_atomic_fetch_add:
628 case AtomicExpr::AO__opencl_atomic_fetch_add:
629 case AtomicExpr::AO__atomic_fetch_add:
630 case AtomicExpr::AO__scoped_atomic_fetch_add:
631 Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FAdd
632 : llvm::AtomicRMWInst::Add;
633 break;
634
635 case AtomicExpr::AO__atomic_sub_fetch:
636 case AtomicExpr::AO__scoped_atomic_sub_fetch:
637 PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FSub
638 : llvm::Instruction::Sub;
639 [[fallthrough]];
640 case AtomicExpr::AO__c11_atomic_fetch_sub:
641 case AtomicExpr::AO__hip_atomic_fetch_sub:
642 case AtomicExpr::AO__opencl_atomic_fetch_sub:
643 case AtomicExpr::AO__atomic_fetch_sub:
644 case AtomicExpr::AO__scoped_atomic_fetch_sub:
645 Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FSub
646 : llvm::AtomicRMWInst::Sub;
647 break;
648
649 case AtomicExpr::AO__atomic_min_fetch:
650 case AtomicExpr::AO__scoped_atomic_min_fetch:
651 PostOpMinMax = true;
652 [[fallthrough]];
653 case AtomicExpr::AO__c11_atomic_fetch_min:
654 case AtomicExpr::AO__hip_atomic_fetch_min:
655 case AtomicExpr::AO__opencl_atomic_fetch_min:
656 case AtomicExpr::AO__atomic_fetch_min:
657 case AtomicExpr::AO__scoped_atomic_fetch_min:
658 Op = E->getValueType()->isFloatingType()
659 ? llvm::AtomicRMWInst::FMin
660 : (E->getValueType()->isSignedIntegerType()
661 ? llvm::AtomicRMWInst::Min
662 : llvm::AtomicRMWInst::UMin);
663 break;
664
665 case AtomicExpr::AO__atomic_max_fetch:
666 case AtomicExpr::AO__scoped_atomic_max_fetch:
667 PostOpMinMax = true;
668 [[fallthrough]];
669 case AtomicExpr::AO__c11_atomic_fetch_max:
670 case AtomicExpr::AO__hip_atomic_fetch_max:
671 case AtomicExpr::AO__opencl_atomic_fetch_max:
672 case AtomicExpr::AO__atomic_fetch_max:
673 case AtomicExpr::AO__scoped_atomic_fetch_max:
674 Op = E->getValueType()->isFloatingType()
675 ? llvm::AtomicRMWInst::FMax
676 : (E->getValueType()->isSignedIntegerType()
677 ? llvm::AtomicRMWInst::Max
678 : llvm::AtomicRMWInst::UMax);
679 break;
680
681 case AtomicExpr::AO__atomic_and_fetch:
682 case AtomicExpr::AO__scoped_atomic_and_fetch:
683 PostOp = llvm::Instruction::And;
684 [[fallthrough]];
685 case AtomicExpr::AO__c11_atomic_fetch_and:
686 case AtomicExpr::AO__hip_atomic_fetch_and:
687 case AtomicExpr::AO__opencl_atomic_fetch_and:
688 case AtomicExpr::AO__atomic_fetch_and:
689 case AtomicExpr::AO__scoped_atomic_fetch_and:
690 Op = llvm::AtomicRMWInst::And;
691 break;
692
693 case AtomicExpr::AO__atomic_or_fetch:
694 case AtomicExpr::AO__scoped_atomic_or_fetch:
695 PostOp = llvm::Instruction::Or;
696 [[fallthrough]];
697 case AtomicExpr::AO__c11_atomic_fetch_or:
698 case AtomicExpr::AO__hip_atomic_fetch_or:
699 case AtomicExpr::AO__opencl_atomic_fetch_or:
700 case AtomicExpr::AO__atomic_fetch_or:
701 case AtomicExpr::AO__scoped_atomic_fetch_or:
702 Op = llvm::AtomicRMWInst::Or;
703 break;
704
705 case AtomicExpr::AO__atomic_xor_fetch:
706 case AtomicExpr::AO__scoped_atomic_xor_fetch:
707 PostOp = llvm::Instruction::Xor;
708 [[fallthrough]];
709 case AtomicExpr::AO__c11_atomic_fetch_xor:
710 case AtomicExpr::AO__hip_atomic_fetch_xor:
711 case AtomicExpr::AO__opencl_atomic_fetch_xor:
712 case AtomicExpr::AO__atomic_fetch_xor:
713 case AtomicExpr::AO__scoped_atomic_fetch_xor:
714 Op = llvm::AtomicRMWInst::Xor;
715 break;
716
717 case AtomicExpr::AO__atomic_nand_fetch:
718 case AtomicExpr::AO__scoped_atomic_nand_fetch:
719 PostOp = llvm::Instruction::And; // the NOT is special cased below
720 [[fallthrough]];
721 case AtomicExpr::AO__c11_atomic_fetch_nand:
722 case AtomicExpr::AO__atomic_fetch_nand:
723 case AtomicExpr::AO__scoped_atomic_fetch_nand:
724 Op = llvm::AtomicRMWInst::Nand;
725 break;
726 }
727
728 llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
729 llvm::AtomicRMWInst *RMWI =
730 CGF.emitAtomicRMWInst(Op, Ptr, LoadVal1, Order, Scope, E);
731 RMWI->setVolatile(E->isVolatile());
732
733 // For __atomic_*_fetch operations, perform the operation again to
734 // determine the value which was written.
735 llvm::Value *Result = RMWI;
736 if (PostOpMinMax)
737 Result = EmitPostAtomicMinMax(CGF.Builder, E->getOp(),
738 E->getValueType()->isSignedIntegerType(),
739 RMWI, LoadVal1);
740 else if (PostOp)
741 Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
742 LoadVal1);
743 if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch ||
744 E->getOp() == AtomicExpr::AO__scoped_atomic_nand_fetch)
745 Result = CGF.Builder.CreateNot(Result);
746 CGF.Builder.CreateStore(Result, Dest);
747}
748
749// This function emits any expression (scalar, complex, or aggregate)
750// into a temporary alloca.
751static Address
753 Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
754 CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
755 /*Init*/ true);
756 return DeclPtr;
757}
758
760 Address Ptr, Address Val1, Address Val2,
761 llvm::Value *IsWeak, llvm::Value *FailureOrder,
762 uint64_t Size, llvm::AtomicOrdering Order,
763 llvm::Value *Scope) {
764 auto ScopeModel = Expr->getScopeModel();
765
766 // LLVM atomic instructions always have synch scope. If clang atomic
767 // expression has no scope operand, use default LLVM synch scope.
768 if (!ScopeModel) {
769 llvm::SyncScope::ID SS;
770 if (CGF.getLangOpts().OpenCL)
771 // OpenCL approach is: "The functions that do not have memory_scope
772 // argument have the same semantics as the corresponding functions with
773 // the memory_scope argument set to memory_scope_device." See ref.:
774 // https://registry.khronos.org/OpenCL/specs/3.0-unified/html/OpenCL_C.html#atomic-functions
776 SyncScope::OpenCLDevice,
777 Order, CGF.getLLVMContext());
778 else
779 SS = llvm::SyncScope::System;
780 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
781 Order, SS);
782 return;
783 }
784
785 // Handle constant scope.
786 if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
787 auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
788 CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
789 Order, CGF.CGM.getLLVMContext());
790 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
791 Order, SCID);
792 return;
793 }
794
795 // Handle non-constant scope.
796 auto &Builder = CGF.Builder;
797 auto Scopes = ScopeModel->getRuntimeValues();
798 llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
799 for (auto S : Scopes)
800 BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
801
802 llvm::BasicBlock *ContBB =
803 CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);
804
805 auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
806 // If unsupported synch scope is encountered at run time, assume a fallback
807 // synch scope value.
808 auto FallBack = ScopeModel->getFallBackValue();
809 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
810 for (auto S : Scopes) {
811 auto *B = BB[S];
812 if (S != FallBack)
813 SI->addCase(Builder.getInt32(S), B);
814
815 Builder.SetInsertPoint(B);
816 EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
817 Order,
819 ScopeModel->map(S),
820 Order,
821 CGF.getLLVMContext()));
822 Builder.CreateBr(ContBB);
823 }
824
825 Builder.SetInsertPoint(ContBB);
826}
827
828RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) {
829 QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
830 QualType MemTy = AtomicTy;
831 if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
832 MemTy = AT->getValueType();
833 llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
834
835 Address Val1 = Address::invalid();
836 Address Val2 = Address::invalid();
837 Address Dest = Address::invalid();
838 Address Ptr = EmitPointerWithAlignment(E->getPtr());
839
840 if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
841 E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
842 LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
843 EmitAtomicInit(E->getVal1(), lvalue);
844 return RValue::get(nullptr);
845 }
846
847 auto TInfo = getContext().getTypeInfoInChars(AtomicTy);
848 uint64_t Size = TInfo.Width.getQuantity();
849 unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
850
851 CharUnits MaxInlineWidth =
852 getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
853 DiagnosticsEngine &Diags = CGM.getDiags();
854 bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0;
855 bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
856 if (Misaligned) {
857 Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
858 << (int)TInfo.Width.getQuantity()
859 << (int)Ptr.getAlignment().getQuantity();
860 }
861 if (Oversized) {
862 Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_oversized)
863 << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity();
864 }
865
866 llvm::Value *Order = EmitScalarExpr(E->getOrder());
867 llvm::Value *Scope =
868 E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
869 bool ShouldCastToIntPtrTy = true;
870
871 switch (E->getOp()) {
872 case AtomicExpr::AO__c11_atomic_init:
873 case AtomicExpr::AO__opencl_atomic_init:
874 llvm_unreachable("Already handled above with EmitAtomicInit!");
875
876 case AtomicExpr::AO__atomic_load_n:
877 case AtomicExpr::AO__scoped_atomic_load_n:
878 case AtomicExpr::AO__c11_atomic_load:
879 case AtomicExpr::AO__opencl_atomic_load:
880 case AtomicExpr::AO__hip_atomic_load:
881 break;
882
883 case AtomicExpr::AO__atomic_load:
884 case AtomicExpr::AO__scoped_atomic_load:
885 Dest = EmitPointerWithAlignment(E->getVal1());
886 break;
887
888 case AtomicExpr::AO__atomic_store:
889 case AtomicExpr::AO__scoped_atomic_store:
890 Val1 = EmitPointerWithAlignment(E->getVal1());
891 break;
892
893 case AtomicExpr::AO__atomic_exchange:
894 case AtomicExpr::AO__scoped_atomic_exchange:
895 Val1 = EmitPointerWithAlignment(E->getVal1());
896 Dest = EmitPointerWithAlignment(E->getVal2());
897 break;
898
899 case AtomicExpr::AO__atomic_compare_exchange:
900 case AtomicExpr::AO__atomic_compare_exchange_n:
901 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
902 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
903 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
904 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
905 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
906 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
907 case AtomicExpr::AO__scoped_atomic_compare_exchange:
908 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
909 Val1 = EmitPointerWithAlignment(E->getVal1());
910 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
911 E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
912 Val2 = EmitPointerWithAlignment(E->getVal2());
913 else
914 Val2 = EmitValToTemp(*this, E->getVal2());
915 OrderFail = EmitScalarExpr(E->getOrderFail());
916 if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
917 E->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
918 E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
919 E->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
920 IsWeak = EmitScalarExpr(E->getWeak());
921 break;
922
923 case AtomicExpr::AO__c11_atomic_fetch_add:
924 case AtomicExpr::AO__c11_atomic_fetch_sub:
925 case AtomicExpr::AO__hip_atomic_fetch_add:
926 case AtomicExpr::AO__hip_atomic_fetch_sub:
927 case AtomicExpr::AO__opencl_atomic_fetch_add:
928 case AtomicExpr::AO__opencl_atomic_fetch_sub:
929 if (MemTy->isPointerType()) {
930 // For pointer arithmetic, we're required to do a bit of math:
931 // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
932 // ... but only for the C11 builtins. The GNU builtins expect the
933 // user to multiply by sizeof(T).
934 QualType Val1Ty = E->getVal1()->getType();
935 llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
936 CharUnits PointeeIncAmt =
938 Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
939 auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
940 Val1 = Temp;
941 EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
942 break;
943 }
944 [[fallthrough]];
945 case AtomicExpr::AO__atomic_fetch_add:
946 case AtomicExpr::AO__atomic_fetch_max:
947 case AtomicExpr::AO__atomic_fetch_min:
948 case AtomicExpr::AO__atomic_fetch_sub:
949 case AtomicExpr::AO__atomic_add_fetch:
950 case AtomicExpr::AO__atomic_max_fetch:
951 case AtomicExpr::AO__atomic_min_fetch:
952 case AtomicExpr::AO__atomic_sub_fetch:
953 case AtomicExpr::AO__c11_atomic_fetch_max:
954 case AtomicExpr::AO__c11_atomic_fetch_min:
955 case AtomicExpr::AO__opencl_atomic_fetch_max:
956 case AtomicExpr::AO__opencl_atomic_fetch_min:
957 case AtomicExpr::AO__hip_atomic_fetch_max:
958 case AtomicExpr::AO__hip_atomic_fetch_min:
959 case AtomicExpr::AO__scoped_atomic_fetch_add:
960 case AtomicExpr::AO__scoped_atomic_fetch_max:
961 case AtomicExpr::AO__scoped_atomic_fetch_min:
962 case AtomicExpr::AO__scoped_atomic_fetch_sub:
963 case AtomicExpr::AO__scoped_atomic_add_fetch:
964 case AtomicExpr::AO__scoped_atomic_max_fetch:
965 case AtomicExpr::AO__scoped_atomic_min_fetch:
966 case AtomicExpr::AO__scoped_atomic_sub_fetch:
967 ShouldCastToIntPtrTy = !MemTy->isFloatingType();
968 [[fallthrough]];
969
970 case AtomicExpr::AO__atomic_fetch_and:
971 case AtomicExpr::AO__atomic_fetch_nand:
972 case AtomicExpr::AO__atomic_fetch_or:
973 case AtomicExpr::AO__atomic_fetch_xor:
974 case AtomicExpr::AO__atomic_and_fetch:
975 case AtomicExpr::AO__atomic_nand_fetch:
976 case AtomicExpr::AO__atomic_or_fetch:
977 case AtomicExpr::AO__atomic_xor_fetch:
978 case AtomicExpr::AO__atomic_store_n:
979 case AtomicExpr::AO__atomic_exchange_n:
980 case AtomicExpr::AO__c11_atomic_fetch_and:
981 case AtomicExpr::AO__c11_atomic_fetch_nand:
982 case AtomicExpr::AO__c11_atomic_fetch_or:
983 case AtomicExpr::AO__c11_atomic_fetch_xor:
984 case AtomicExpr::AO__c11_atomic_store:
985 case AtomicExpr::AO__c11_atomic_exchange:
986 case AtomicExpr::AO__hip_atomic_fetch_and:
987 case AtomicExpr::AO__hip_atomic_fetch_or:
988 case AtomicExpr::AO__hip_atomic_fetch_xor:
989 case AtomicExpr::AO__hip_atomic_store:
990 case AtomicExpr::AO__hip_atomic_exchange:
991 case AtomicExpr::AO__opencl_atomic_fetch_and:
992 case AtomicExpr::AO__opencl_atomic_fetch_or:
993 case AtomicExpr::AO__opencl_atomic_fetch_xor:
994 case AtomicExpr::AO__opencl_atomic_store:
995 case AtomicExpr::AO__opencl_atomic_exchange:
996 case AtomicExpr::AO__scoped_atomic_fetch_and:
997 case AtomicExpr::AO__scoped_atomic_fetch_nand:
998 case AtomicExpr::AO__scoped_atomic_fetch_or:
999 case AtomicExpr::AO__scoped_atomic_fetch_xor:
1000 case AtomicExpr::AO__scoped_atomic_and_fetch:
1001 case AtomicExpr::AO__scoped_atomic_nand_fetch:
1002 case AtomicExpr::AO__scoped_atomic_or_fetch:
1003 case AtomicExpr::AO__scoped_atomic_xor_fetch:
1004 case AtomicExpr::AO__scoped_atomic_store_n:
1005 case AtomicExpr::AO__scoped_atomic_exchange_n:
1006 Val1 = EmitValToTemp(*this, E->getVal1());
1007 break;
1008 }
1009
1010 QualType RValTy = E->getType().getUnqualifiedType();
1011
1012 // The inlined atomics only function on iN types, where N is a power of 2. We
1013 // need to make sure (via temporaries if necessary) that all incoming values
1014 // are compatible.
1015 LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
1016 AtomicInfo Atomics(*this, AtomicVal);
1017
1018 if (ShouldCastToIntPtrTy) {
1019 Ptr = Atomics.castToAtomicIntPointer(Ptr);
1020 if (Val1.isValid())
1021 Val1 = Atomics.convertToAtomicIntPointer(Val1);
1022 if (Val2.isValid())
1023 Val2 = Atomics.convertToAtomicIntPointer(Val2);
1024 }
1025 if (Dest.isValid()) {
1026 if (ShouldCastToIntPtrTy)
1027 Dest = Atomics.castToAtomicIntPointer(Dest);
1028 } else if (E->isCmpXChg())
1029 Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
1030 else if (!RValTy->isVoidType()) {
1031 Dest = Atomics.CreateTempAlloca();
1032 if (ShouldCastToIntPtrTy)
1033 Dest = Atomics.castToAtomicIntPointer(Dest);
1034 }
1035
1036 bool PowerOf2Size = (Size & (Size - 1)) == 0;
1037 bool UseLibcall = !PowerOf2Size || (Size > 16);
1038
1039 // For atomics larger than 16 bytes, emit a libcall from the frontend. This
1040 // avoids the overhead of dealing with excessively-large value types in IR.
1041 // Non-power-of-2 values also lower to libcall here, as they are not currently
1042 // permitted in IR instructions (although that constraint could be relaxed in
1043 // the future). For other cases where a libcall is required on a given
1044 // platform, we let the backend handle it (this includes handling for all of
1045 // the size-optimized libcall variants, which are only valid up to 16 bytes.)
1046 //
1047 // See: https://llvm.org/docs/Atomics.html#libcalls-atomic
1048 if (UseLibcall) {
1049 CallArgList Args;
1050 // For non-optimized library calls, the size is the first parameter.
1051 Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
1053
1054 // The atomic address is the second parameter.
1055 // The OpenCL atomic library functions only accept pointer arguments to
1056 // generic address space.
1057 auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
1058 if (!E->isOpenCL())
1059 return V;
1060 auto AS = PT->castAs<PointerType>()->getPointeeType().getAddressSpace();
1061 if (AS == LangAS::opencl_generic)
1062 return V;
1064 auto *DestType = llvm::PointerType::get(getLLVMContext(), DestAS);
1065
1067 *this, V, AS, LangAS::opencl_generic, DestType, false);
1068 };
1069
1070 Args.add(RValue::get(CastToGenericAddrSpace(Ptr.emitRawPointer(*this),
1071 E->getPtr()->getType())),
1073
1074 // The next 1-3 parameters are op-dependent.
1075 std::string LibCallName;
1076 QualType RetTy;
1077 bool HaveRetTy = false;
1078 switch (E->getOp()) {
1079 case AtomicExpr::AO__c11_atomic_init:
1080 case AtomicExpr::AO__opencl_atomic_init:
1081 llvm_unreachable("Already handled!");
1082
1083 // There is only one libcall for compare an exchange, because there is no
1084 // optimisation benefit possible from a libcall version of a weak compare
1085 // and exchange.
1086 // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
1087 // void *desired, int success, int failure)
1088 case AtomicExpr::AO__atomic_compare_exchange:
1089 case AtomicExpr::AO__atomic_compare_exchange_n:
1090 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1091 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1092 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
1093 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
1094 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1095 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1096 case AtomicExpr::AO__scoped_atomic_compare_exchange:
1097 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
1098 LibCallName = "__atomic_compare_exchange";
1099 RetTy = getContext().BoolTy;
1100 HaveRetTy = true;
1101 Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this),
1102 E->getVal1()->getType())),
1104 Args.add(RValue::get(CastToGenericAddrSpace(Val2.emitRawPointer(*this),
1105 E->getVal2()->getType())),
1107 Args.add(RValue::get(Order), getContext().IntTy);
1108 Order = OrderFail;
1109 break;
1110 // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
1111 // int order)
1112 case AtomicExpr::AO__atomic_exchange:
1113 case AtomicExpr::AO__atomic_exchange_n:
1114 case AtomicExpr::AO__c11_atomic_exchange:
1115 case AtomicExpr::AO__hip_atomic_exchange:
1116 case AtomicExpr::AO__opencl_atomic_exchange:
1117 case AtomicExpr::AO__scoped_atomic_exchange:
1118 case AtomicExpr::AO__scoped_atomic_exchange_n:
1119 LibCallName = "__atomic_exchange";
1120 Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this),
1121 E->getVal1()->getType())),
1123 break;
1124 // void __atomic_store(size_t size, void *mem, void *val, int order)
1125 case AtomicExpr::AO__atomic_store:
1126 case AtomicExpr::AO__atomic_store_n:
1127 case AtomicExpr::AO__c11_atomic_store:
1128 case AtomicExpr::AO__hip_atomic_store:
1129 case AtomicExpr::AO__opencl_atomic_store:
1130 case AtomicExpr::AO__scoped_atomic_store:
1131 case AtomicExpr::AO__scoped_atomic_store_n:
1132 LibCallName = "__atomic_store";
1133 RetTy = getContext().VoidTy;
1134 HaveRetTy = true;
1135 Args.add(RValue::get(CastToGenericAddrSpace(Val1.emitRawPointer(*this),
1136 E->getVal1()->getType())),
1138 break;
1139 // void __atomic_load(size_t size, void *mem, void *return, int order)
1140 case AtomicExpr::AO__atomic_load:
1141 case AtomicExpr::AO__atomic_load_n:
1142 case AtomicExpr::AO__c11_atomic_load:
1143 case AtomicExpr::AO__hip_atomic_load:
1144 case AtomicExpr::AO__opencl_atomic_load:
1145 case AtomicExpr::AO__scoped_atomic_load:
1146 case AtomicExpr::AO__scoped_atomic_load_n:
1147 LibCallName = "__atomic_load";
1148 break;
1149 case AtomicExpr::AO__atomic_add_fetch:
1150 case AtomicExpr::AO__scoped_atomic_add_fetch:
1151 case AtomicExpr::AO__atomic_fetch_add:
1152 case AtomicExpr::AO__c11_atomic_fetch_add:
1153 case AtomicExpr::AO__hip_atomic_fetch_add:
1154 case AtomicExpr::AO__opencl_atomic_fetch_add:
1155 case AtomicExpr::AO__scoped_atomic_fetch_add:
1156 case AtomicExpr::AO__atomic_and_fetch:
1157 case AtomicExpr::AO__scoped_atomic_and_fetch:
1158 case AtomicExpr::AO__atomic_fetch_and:
1159 case AtomicExpr::AO__c11_atomic_fetch_and:
1160 case AtomicExpr::AO__hip_atomic_fetch_and:
1161 case AtomicExpr::AO__opencl_atomic_fetch_and:
1162 case AtomicExpr::AO__scoped_atomic_fetch_and:
1163 case AtomicExpr::AO__atomic_or_fetch:
1164 case AtomicExpr::AO__scoped_atomic_or_fetch:
1165 case AtomicExpr::AO__atomic_fetch_or:
1166 case AtomicExpr::AO__c11_atomic_fetch_or:
1167 case AtomicExpr::AO__hip_atomic_fetch_or:
1168 case AtomicExpr::AO__opencl_atomic_fetch_or:
1169 case AtomicExpr::AO__scoped_atomic_fetch_or:
1170 case AtomicExpr::AO__atomic_sub_fetch:
1171 case AtomicExpr::AO__scoped_atomic_sub_fetch:
1172 case AtomicExpr::AO__atomic_fetch_sub:
1173 case AtomicExpr::AO__c11_atomic_fetch_sub:
1174 case AtomicExpr::AO__hip_atomic_fetch_sub:
1175 case AtomicExpr::AO__opencl_atomic_fetch_sub:
1176 case AtomicExpr::AO__scoped_atomic_fetch_sub:
1177 case AtomicExpr::AO__atomic_xor_fetch:
1178 case AtomicExpr::AO__scoped_atomic_xor_fetch:
1179 case AtomicExpr::AO__atomic_fetch_xor:
1180 case AtomicExpr::AO__c11_atomic_fetch_xor:
1181 case AtomicExpr::AO__hip_atomic_fetch_xor:
1182 case AtomicExpr::AO__opencl_atomic_fetch_xor:
1183 case AtomicExpr::AO__scoped_atomic_fetch_xor:
1184 case AtomicExpr::AO__atomic_nand_fetch:
1185 case AtomicExpr::AO__atomic_fetch_nand:
1186 case AtomicExpr::AO__c11_atomic_fetch_nand:
1187 case AtomicExpr::AO__scoped_atomic_fetch_nand:
1188 case AtomicExpr::AO__scoped_atomic_nand_fetch:
1189 case AtomicExpr::AO__atomic_min_fetch:
1190 case AtomicExpr::AO__atomic_fetch_min:
1191 case AtomicExpr::AO__c11_atomic_fetch_min:
1192 case AtomicExpr::AO__hip_atomic_fetch_min:
1193 case AtomicExpr::AO__opencl_atomic_fetch_min:
1194 case AtomicExpr::AO__scoped_atomic_fetch_min:
1195 case AtomicExpr::AO__scoped_atomic_min_fetch:
1196 case AtomicExpr::AO__atomic_max_fetch:
1197 case AtomicExpr::AO__atomic_fetch_max:
1198 case AtomicExpr::AO__c11_atomic_fetch_max:
1199 case AtomicExpr::AO__hip_atomic_fetch_max:
1200 case AtomicExpr::AO__opencl_atomic_fetch_max:
1201 case AtomicExpr::AO__scoped_atomic_fetch_max:
1202 case AtomicExpr::AO__scoped_atomic_max_fetch:
1203 llvm_unreachable("Integral atomic operations always become atomicrmw!");
1204 }
1205
1206 if (E->isOpenCL()) {
1207 LibCallName =
1208 std::string("__opencl") + StringRef(LibCallName).drop_front(1).str();
1209 }
1210 // By default, assume we return a value of the atomic type.
1211 if (!HaveRetTy) {
1212 // Value is returned through parameter before the order.
1213 RetTy = getContext().VoidTy;
1214 Args.add(RValue::get(
1215 CastToGenericAddrSpace(Dest.emitRawPointer(*this), RetTy)),
1217 }
1218 // Order is always the last parameter.
1219 Args.add(RValue::get(Order),
1220 getContext().IntTy);
1221 if (E->isOpenCL())
1223
1224 RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
1225 // The value is returned directly from the libcall.
1226 if (E->isCmpXChg())
1227 return Res;
1228
1229 if (RValTy->isVoidType())
1230 return RValue::get(nullptr);
1231
1233 RValTy, E->getExprLoc());
1234 }
1235
1236 bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1237 E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
1238 E->getOp() == AtomicExpr::AO__hip_atomic_store ||
1239 E->getOp() == AtomicExpr::AO__atomic_store ||
1240 E->getOp() == AtomicExpr::AO__atomic_store_n ||
1241 E->getOp() == AtomicExpr::AO__scoped_atomic_store ||
1242 E->getOp() == AtomicExpr::AO__scoped_atomic_store_n;
1243 bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1244 E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1245 E->getOp() == AtomicExpr::AO__hip_atomic_load ||
1246 E->getOp() == AtomicExpr::AO__atomic_load ||
1247 E->getOp() == AtomicExpr::AO__atomic_load_n ||
1248 E->getOp() == AtomicExpr::AO__scoped_atomic_load ||
1249 E->getOp() == AtomicExpr::AO__scoped_atomic_load_n;
1250
1251 if (isa<llvm::ConstantInt>(Order)) {
1252 auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1253 // We should not ever get to a case where the ordering isn't a valid C ABI
1254 // value, but it's hard to enforce that in general.
1255 if (llvm::isValidAtomicOrderingCABI(ord))
1256 switch ((llvm::AtomicOrderingCABI)ord) {
1257 case llvm::AtomicOrderingCABI::relaxed:
1258 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1259 llvm::AtomicOrdering::Monotonic, Scope);
1260 break;
1261 case llvm::AtomicOrderingCABI::consume:
1262 case llvm::AtomicOrderingCABI::acquire:
1263 if (IsStore)
1264 break; // Avoid crashing on code with undefined behavior
1265 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1266 llvm::AtomicOrdering::Acquire, Scope);
1267 break;
1268 case llvm::AtomicOrderingCABI::release:
1269 if (IsLoad)
1270 break; // Avoid crashing on code with undefined behavior
1271 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1272 llvm::AtomicOrdering::Release, Scope);
1273 break;
1274 case llvm::AtomicOrderingCABI::acq_rel:
1275 if (IsLoad || IsStore)
1276 break; // Avoid crashing on code with undefined behavior
1277 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1278 llvm::AtomicOrdering::AcquireRelease, Scope);
1279 break;
1280 case llvm::AtomicOrderingCABI::seq_cst:
1281 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1282 llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1283 break;
1284 }
1285 if (RValTy->isVoidType())
1286 return RValue::get(nullptr);
1287
1289 RValTy, E->getExprLoc());
1290 }
1291
1292 // Long case, when Order isn't obviously constant.
1293
1294 // Create all the relevant BB's
1295 llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1296 *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1297 *SeqCstBB = nullptr;
1298 MonotonicBB = createBasicBlock("monotonic", CurFn);
1299 if (!IsStore)
1300 AcquireBB = createBasicBlock("acquire", CurFn);
1301 if (!IsLoad)
1302 ReleaseBB = createBasicBlock("release", CurFn);
1303 if (!IsLoad && !IsStore)
1304 AcqRelBB = createBasicBlock("acqrel", CurFn);
1305 SeqCstBB = createBasicBlock("seqcst", CurFn);
1306 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1307
1308 // Create the switch for the split
1309 // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1310 // doesn't matter unless someone is crazy enough to use something that
1311 // doesn't fold to a constant for the ordering.
1312 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1313 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1314
1315 // Emit all the different atomics
1316 Builder.SetInsertPoint(MonotonicBB);
1317 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1318 llvm::AtomicOrdering::Monotonic, Scope);
1319 Builder.CreateBr(ContBB);
1320 if (!IsStore) {
1321 Builder.SetInsertPoint(AcquireBB);
1322 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1323 llvm::AtomicOrdering::Acquire, Scope);
1324 Builder.CreateBr(ContBB);
1325 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
1326 AcquireBB);
1327 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
1328 AcquireBB);
1329 }
1330 if (!IsLoad) {
1331 Builder.SetInsertPoint(ReleaseBB);
1332 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1333 llvm::AtomicOrdering::Release, Scope);
1334 Builder.CreateBr(ContBB);
1335 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
1336 ReleaseBB);
1337 }
1338 if (!IsLoad && !IsStore) {
1339 Builder.SetInsertPoint(AcqRelBB);
1340 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1341 llvm::AtomicOrdering::AcquireRelease, Scope);
1342 Builder.CreateBr(ContBB);
1343 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
1344 AcqRelBB);
1345 }
1346 Builder.SetInsertPoint(SeqCstBB);
1347 EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1348 llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1349 Builder.CreateBr(ContBB);
1350 SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
1351 SeqCstBB);
1352
1353 // Cleanup and return
1354 Builder.SetInsertPoint(ContBB);
1355 if (RValTy->isVoidType())
1356 return RValue::get(nullptr);
1357
1358 assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1360 RValTy, E->getExprLoc());
1361}
1362
1363Address AtomicInfo::castToAtomicIntPointer(Address addr) const {
1364 llvm::IntegerType *ty =
1365 llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1366 return addr.withElementType(ty);
1367}
1368
1369Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
1370 llvm::Type *Ty = Addr.getElementType();
1371 uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1372 if (SourceSizeInBits != AtomicSizeInBits) {
1373 Address Tmp = CreateTempAlloca();
1374 CGF.Builder.CreateMemCpy(Tmp, Addr,
1375 std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1376 Addr = Tmp;
1377 }
1378
1379 return castToAtomicIntPointer(Addr);
1380}
1381
1382RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1383 AggValueSlot resultSlot,
1384 SourceLocation loc,
1385 bool asValue) const {
1386 if (LVal.isSimple()) {
1387 if (EvaluationKind == TEK_Aggregate)
1388 return resultSlot.asRValue();
1389
1390 // Drill into the padding structure if we have one.
1391 if (hasPadding())
1392 addr = CGF.Builder.CreateStructGEP(addr, 0);
1393
1394 // Otherwise, just convert the temporary to an r-value using the
1395 // normal conversion routine.
1396 return CGF.convertTempToRValue(addr, getValueType(), loc);
1397 }
1398 if (!asValue)
1399 // Get RValue from temp memory as atomic for non-simple lvalues
1400 return RValue::get(CGF.Builder.CreateLoad(addr));
1401 if (LVal.isBitField())
1402 return CGF.EmitLoadOfBitfieldLValue(
1403 LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1404 LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1405 if (LVal.isVectorElt())
1406 return CGF.EmitLoadOfLValue(
1407 LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1408 LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1409 assert(LVal.isExtVectorElt());
1411 addr, LVal.getExtVectorElts(), LVal.getType(),
1412 LVal.getBaseInfo(), TBAAAccessInfo()));
1413}
1414
1415/// Return true if \param ValTy is a type that should be casted to integer
1416/// around the atomic memory operation. If \param CmpXchg is true, then the
1417/// cast of a floating point type is made as that instruction can not have
1418/// floating point operands. TODO: Allow compare-and-exchange and FP - see
1419/// comment in AtomicExpandPass.cpp.
1420static bool shouldCastToInt(llvm::Type *ValTy, bool CmpXchg) {
1421 if (ValTy->isFloatingPointTy())
1422 return ValTy->isX86_FP80Ty() || CmpXchg;
1423 return !ValTy->isIntegerTy() && !ValTy->isPointerTy();
1424}
1425
1426RValue AtomicInfo::ConvertToValueOrAtomic(llvm::Value *Val,
1427 AggValueSlot ResultSlot,
1428 SourceLocation Loc, bool AsValue,
1429 bool CmpXchg) const {
1430 // Try not to in some easy cases.
1431 assert((Val->getType()->isIntegerTy() || Val->getType()->isPointerTy() ||
1432 Val->getType()->isIEEELikeFPTy()) &&
1433 "Expected integer, pointer or floating point value when converting "
1434 "result.");
1435 if (getEvaluationKind() == TEK_Scalar &&
1436 (((!LVal.isBitField() ||
1437 LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1438 !hasPadding()) ||
1439 !AsValue)) {
1440 auto *ValTy = AsValue
1441 ? CGF.ConvertTypeForMem(ValueTy)
1442 : getAtomicAddress().getElementType();
1443 if (!shouldCastToInt(ValTy, CmpXchg)) {
1444 assert((!ValTy->isIntegerTy() || Val->getType() == ValTy) &&
1445 "Different integer types.");
1446 return RValue::get(CGF.EmitFromMemory(Val, ValueTy));
1447 }
1448 if (llvm::CastInst::isBitCastable(Val->getType(), ValTy))
1449 return RValue::get(CGF.Builder.CreateBitCast(Val, ValTy));
1450 }
1451
1452 // Create a temporary. This needs to be big enough to hold the
1453 // atomic integer.
1454 Address Temp = Address::invalid();
1455 bool TempIsVolatile = false;
1456 if (AsValue && getEvaluationKind() == TEK_Aggregate) {
1457 assert(!ResultSlot.isIgnored());
1458 Temp = ResultSlot.getAddress();
1459 TempIsVolatile = ResultSlot.isVolatile();
1460 } else {
1461 Temp = CreateTempAlloca();
1462 }
1463
1464 // Slam the integer into the temporary.
1465 Address CastTemp = castToAtomicIntPointer(Temp);
1466 CGF.Builder.CreateStore(Val, CastTemp)->setVolatile(TempIsVolatile);
1467
1468 return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1469}
1470
1471void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1472 llvm::AtomicOrdering AO, bool) {
1473 // void __atomic_load(size_t size, void *mem, void *return, int order);
1474 CallArgList Args;
1475 Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1476 Args.add(RValue::get(getAtomicPointer()), CGF.getContext().VoidPtrTy);
1477 Args.add(RValue::get(AddForLoaded), CGF.getContext().VoidPtrTy);
1478 Args.add(
1479 RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
1480 CGF.getContext().IntTy);
1481 emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1482}
1483
1484llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1485 bool IsVolatile, bool CmpXchg) {
1486 // Okay, we're doing this natively.
1487 Address Addr = getAtomicAddress();
1488 if (shouldCastToInt(Addr.getElementType(), CmpXchg))
1489 Addr = castToAtomicIntPointer(Addr);
1490 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1491 Load->setAtomic(AO);
1492
1493 // Other decoration.
1494 if (IsVolatile)
1495 Load->setVolatile(true);
1497 return Load;
1498}
1499
1500/// An LValue is a candidate for having its loads and stores be made atomic if
1501/// we are operating under /volatile:ms *and* the LValue itself is volatile and
1502/// performing such an operation can be performed without a libcall.
1504 if (!CGM.getLangOpts().MSVolatile) return false;
1505 AtomicInfo AI(*this, LV);
1506 bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1507 // An atomic is inline if we don't need to use a libcall.
1508 bool AtomicIsInline = !AI.shouldUseLibcall();
1509 // MSVC doesn't seem to do this for types wider than a pointer.
1510 if (getContext().getTypeSize(LV.getType()) >
1511 getContext().getTypeSize(getContext().getIntPtrType()))
1512 return false;
1513 return IsVolatile && AtomicIsInline;
1514}
1515
1517 AggValueSlot Slot) {
1518 llvm::AtomicOrdering AO;
1519 bool IsVolatile = LV.isVolatileQualified();
1520 if (LV.getType()->isAtomicType()) {
1521 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1522 } else {
1523 AO = llvm::AtomicOrdering::Acquire;
1524 IsVolatile = true;
1525 }
1526 return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1527}
1528
1529RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1530 bool AsValue, llvm::AtomicOrdering AO,
1531 bool IsVolatile) {
1532 // Check whether we should use a library call.
1533 if (shouldUseLibcall()) {
1534 Address TempAddr = Address::invalid();
1535 if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1536 assert(getEvaluationKind() == TEK_Aggregate);
1537 TempAddr = ResultSlot.getAddress();
1538 } else
1539 TempAddr = CreateTempAlloca();
1540
1541 EmitAtomicLoadLibcall(TempAddr.emitRawPointer(CGF), AO, IsVolatile);
1542
1543 // Okay, turn that back into the original value or whole atomic (for
1544 // non-simple lvalues) type.
1545 return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1546 }
1547
1548 // Okay, we're doing this natively.
1549 auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1550
1551 // If we're ignoring an aggregate return, don't do anything.
1552 if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1553 return RValue::getAggregate(Address::invalid(), false);
1554
1555 // Okay, turn that back into the original value or atomic (for non-simple
1556 // lvalues) type.
1557 return ConvertToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1558}
1559
1560/// Emit a load from an l-value of atomic type. Note that the r-value
1561/// we produce is an r-value of the atomic *value* type.
1563 llvm::AtomicOrdering AO, bool IsVolatile,
1564 AggValueSlot resultSlot) {
1565 AtomicInfo Atomics(*this, src);
1566 return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1567 IsVolatile);
1568}
1569
1570/// Copy an r-value into memory as part of storing to an atomic type.
1571/// This needs to create a bit-pattern suitable for atomic operations.
1572void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1573 assert(LVal.isSimple());
1574 // If we have an r-value, the rvalue should be of the atomic type,
1575 // which means that the caller is responsible for having zeroed
1576 // any padding. Just do an aggregate copy of that type.
1577 if (rvalue.isAggregate()) {
1578 LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
1579 LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
1580 getAtomicType());
1581 bool IsVolatile = rvalue.isVolatileQualified() ||
1582 LVal.isVolatileQualified();
1583 CGF.EmitAggregateCopy(Dest, Src, getAtomicType(),
1584 AggValueSlot::DoesNotOverlap, IsVolatile);
1585 return;
1586 }
1587
1588 // Okay, otherwise we're copying stuff.
1589
1590 // Zero out the buffer if necessary.
1591 emitMemSetZeroIfNecessary();
1592
1593 // Drill past the padding if present.
1594 LValue TempLVal = projectValue();
1595
1596 // Okay, store the rvalue in.
1597 if (rvalue.isScalar()) {
1598 CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1599 } else {
1600 CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1601 }
1602}
1603
1604
1605/// Materialize an r-value into memory for the purposes of storing it
1606/// to an atomic type.
1607Address AtomicInfo::materializeRValue(RValue rvalue) const {
1608 // Aggregate r-values are already in memory, and EmitAtomicStore
1609 // requires them to be values of the atomic type.
1610 if (rvalue.isAggregate())
1611 return rvalue.getAggregateAddress();
1612
1613 // Otherwise, make a temporary and materialize into it.
1614 LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1615 AtomicInfo Atomics(CGF, TempLV);
1616 Atomics.emitCopyIntoMemory(rvalue);
1617 return TempLV.getAddress();
1618}
1619
1620llvm::Value *AtomicInfo::getScalarRValValueOrNull(RValue RVal) const {
1621 if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple()))
1622 return RVal.getScalarVal();
1623 return nullptr;
1624}
1625
1626llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal, bool CmpXchg) const {
1627 // If we've got a scalar value of the right size, try to avoid going
1628 // through memory. Floats get casted if needed by AtomicExpandPass.
1629 if (llvm::Value *Value = getScalarRValValueOrNull(RVal)) {
1630 if (!shouldCastToInt(Value->getType(), CmpXchg))
1631 return CGF.EmitToMemory(Value, ValueTy);
1632 else {
1633 llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1634 CGF.getLLVMContext(),
1635 LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1636 if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1637 return CGF.Builder.CreateBitCast(Value, InputIntTy);
1638 }
1639 }
1640 // Otherwise, we need to go through memory.
1641 // Put the r-value in memory.
1642 Address Addr = materializeRValue(RVal);
1643
1644 // Cast the temporary to the atomic int type and pull a value out.
1645 Addr = castToAtomicIntPointer(Addr);
1646 return CGF.Builder.CreateLoad(Addr);
1647}
1648
1649std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1650 llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1651 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1652 // Do the atomic store.
1653 Address Addr = getAtomicAddressAsAtomicIntPointer();
1654 auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr, ExpectedVal, DesiredVal,
1655 Success, Failure);
1656 // Other decoration.
1657 Inst->setVolatile(LVal.isVolatileQualified());
1658 Inst->setWeak(IsWeak);
1659
1660 // Okay, turn that back into the original value type.
1661 auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1662 auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1663 return std::make_pair(PreviousVal, SuccessFailureVal);
1664}
1665
1666llvm::Value *
1667AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1668 llvm::Value *DesiredAddr,
1669 llvm::AtomicOrdering Success,
1670 llvm::AtomicOrdering Failure) {
1671 // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1672 // void *desired, int success, int failure);
1673 CallArgList Args;
1674 Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1675 Args.add(RValue::get(getAtomicPointer()), CGF.getContext().VoidPtrTy);
1676 Args.add(RValue::get(ExpectedAddr), CGF.getContext().VoidPtrTy);
1677 Args.add(RValue::get(DesiredAddr), CGF.getContext().VoidPtrTy);
1678 Args.add(RValue::get(
1679 llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
1680 CGF.getContext().IntTy);
1681 Args.add(RValue::get(
1682 llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
1683 CGF.getContext().IntTy);
1684 auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1685 CGF.getContext().BoolTy, Args);
1686
1687 return SuccessFailureRVal.getScalarVal();
1688}
1689
1690std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1691 RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1692 llvm::AtomicOrdering Failure, bool IsWeak) {
1693 // Check whether we should use a library call.
1694 if (shouldUseLibcall()) {
1695 // Produce a source address.
1696 Address ExpectedAddr = materializeRValue(Expected);
1697 llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);
1698 llvm::Value *DesiredPtr = materializeRValue(Desired).emitRawPointer(CGF);
1699 auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr,
1700 Success, Failure);
1701 return std::make_pair(
1702 convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1703 SourceLocation(), /*AsValue=*/false),
1704 Res);
1705 }
1706
1707 // If we've got a scalar value of the right size, try to avoid going
1708 // through memory.
1709 auto *ExpectedVal = convertRValueToInt(Expected, /*CmpXchg=*/true);
1710 auto *DesiredVal = convertRValueToInt(Desired, /*CmpXchg=*/true);
1711 auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1712 Failure, IsWeak);
1713 return std::make_pair(
1714 ConvertToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1715 SourceLocation(), /*AsValue=*/false,
1716 /*CmpXchg=*/true),
1717 Res.second);
1718}
1719
1720static void
1721EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1722 const llvm::function_ref<RValue(RValue)> &UpdateOp,
1723 Address DesiredAddr) {
1724 RValue UpRVal;
1725 LValue AtomicLVal = Atomics.getAtomicLValue();
1726 LValue DesiredLVal;
1727 if (AtomicLVal.isSimple()) {
1728 UpRVal = OldRVal;
1729 DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1730 } else {
1731 // Build new lvalue for temp address.
1732 Address Ptr = Atomics.materializeRValue(OldRVal);
1733 LValue UpdateLVal;
1734 if (AtomicLVal.isBitField()) {
1735 UpdateLVal =
1736 LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1737 AtomicLVal.getType(),
1738 AtomicLVal.getBaseInfo(),
1739 AtomicLVal.getTBAAInfo());
1740 DesiredLVal =
1741 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1742 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1743 AtomicLVal.getTBAAInfo());
1744 } else if (AtomicLVal.isVectorElt()) {
1745 UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1746 AtomicLVal.getType(),
1747 AtomicLVal.getBaseInfo(),
1748 AtomicLVal.getTBAAInfo());
1749 DesiredLVal = LValue::MakeVectorElt(
1750 DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1751 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1752 } else {
1753 assert(AtomicLVal.isExtVectorElt());
1754 UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1755 AtomicLVal.getType(),
1756 AtomicLVal.getBaseInfo(),
1757 AtomicLVal.getTBAAInfo());
1758 DesiredLVal = LValue::MakeExtVectorElt(
1759 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1760 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1761 }
1762 UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1763 }
1764 // Store new value in the corresponding memory area.
1765 RValue NewRVal = UpdateOp(UpRVal);
1766 if (NewRVal.isScalar()) {
1767 CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1768 } else {
1769 assert(NewRVal.isComplex());
1770 CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1771 /*isInit=*/false);
1772 }
1773}
1774
1775void AtomicInfo::EmitAtomicUpdateLibcall(
1776 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1777 bool IsVolatile) {
1778 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1779
1780 Address ExpectedAddr = CreateTempAlloca();
1781
1782 EmitAtomicLoadLibcall(ExpectedAddr.emitRawPointer(CGF), AO, IsVolatile);
1783 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1784 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1785 CGF.EmitBlock(ContBB);
1786 Address DesiredAddr = CreateTempAlloca();
1787 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1788 requiresMemSetZero(getAtomicAddress().getElementType())) {
1789 auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1790 CGF.Builder.CreateStore(OldVal, DesiredAddr);
1791 }
1792 auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1794 SourceLocation(), /*AsValue=*/false);
1795 EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1796 llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);
1797 llvm::Value *DesiredPtr = DesiredAddr.emitRawPointer(CGF);
1798 auto *Res =
1799 EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);
1800 CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1801 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1802}
1803
1804void AtomicInfo::EmitAtomicUpdateOp(
1805 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1806 bool IsVolatile) {
1807 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1808
1809 // Do the atomic load.
1810 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile, /*CmpXchg=*/true);
1811 // For non-simple lvalues perform compare-and-swap procedure.
1812 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1813 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1814 auto *CurBB = CGF.Builder.GetInsertBlock();
1815 CGF.EmitBlock(ContBB);
1816 llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1817 /*NumReservedValues=*/2);
1818 PHI->addIncoming(OldVal, CurBB);
1819 Address NewAtomicAddr = CreateTempAlloca();
1820 Address NewAtomicIntAddr =
1821 shouldCastToInt(NewAtomicAddr.getElementType(), /*CmpXchg=*/true)
1822 ? castToAtomicIntPointer(NewAtomicAddr)
1823 : NewAtomicAddr;
1824
1825 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1826 requiresMemSetZero(getAtomicAddress().getElementType())) {
1827 CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1828 }
1829 auto OldRVal = ConvertToValueOrAtomic(PHI, AggValueSlot::ignored(),
1830 SourceLocation(), /*AsValue=*/false,
1831 /*CmpXchg=*/true);
1832 EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1833 auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1834 // Try to write new value using cmpxchg operation.
1835 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1836 PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1837 CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1838 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1839}
1840
1841static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1842 RValue UpdateRVal, Address DesiredAddr) {
1843 LValue AtomicLVal = Atomics.getAtomicLValue();
1844 LValue DesiredLVal;
1845 // Build new lvalue for temp address.
1846 if (AtomicLVal.isBitField()) {
1847 DesiredLVal =
1848 LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1849 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1850 AtomicLVal.getTBAAInfo());
1851 } else if (AtomicLVal.isVectorElt()) {
1852 DesiredLVal =
1853 LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1854 AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1855 AtomicLVal.getTBAAInfo());
1856 } else {
1857 assert(AtomicLVal.isExtVectorElt());
1858 DesiredLVal = LValue::MakeExtVectorElt(
1859 DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1860 AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1861 }
1862 // Store new value in the corresponding memory area.
1863 assert(UpdateRVal.isScalar());
1864 CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1865}
1866
1867void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1868 RValue UpdateRVal, bool IsVolatile) {
1869 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1870
1871 Address ExpectedAddr = CreateTempAlloca();
1872
1873 EmitAtomicLoadLibcall(ExpectedAddr.emitRawPointer(CGF), AO, IsVolatile);
1874 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1875 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1876 CGF.EmitBlock(ContBB);
1877 Address DesiredAddr = CreateTempAlloca();
1878 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1879 requiresMemSetZero(getAtomicAddress().getElementType())) {
1880 auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1881 CGF.Builder.CreateStore(OldVal, DesiredAddr);
1882 }
1883 EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1884 llvm::Value *ExpectedPtr = ExpectedAddr.emitRawPointer(CGF);
1885 llvm::Value *DesiredPtr = DesiredAddr.emitRawPointer(CGF);
1886 auto *Res =
1887 EmitAtomicCompareExchangeLibcall(ExpectedPtr, DesiredPtr, AO, Failure);
1888 CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1889 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1890}
1891
1892void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1893 bool IsVolatile) {
1894 auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1895
1896 // Do the atomic load.
1897 auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile, /*CmpXchg=*/true);
1898 // For non-simple lvalues perform compare-and-swap procedure.
1899 auto *ContBB = CGF.createBasicBlock("atomic_cont");
1900 auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1901 auto *CurBB = CGF.Builder.GetInsertBlock();
1902 CGF.EmitBlock(ContBB);
1903 llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1904 /*NumReservedValues=*/2);
1905 PHI->addIncoming(OldVal, CurBB);
1906 Address NewAtomicAddr = CreateTempAlloca();
1907 Address NewAtomicIntAddr = castToAtomicIntPointer(NewAtomicAddr);
1908 if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1909 requiresMemSetZero(getAtomicAddress().getElementType())) {
1910 CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1911 }
1912 EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
1913 auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1914 // Try to write new value using cmpxchg operation.
1915 auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1916 PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1917 CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1918 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1919}
1920
1921void AtomicInfo::EmitAtomicUpdate(
1922 llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1923 bool IsVolatile) {
1924 if (shouldUseLibcall()) {
1925 EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1926 } else {
1927 EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1928 }
1929}
1930
1931void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1932 bool IsVolatile) {
1933 if (shouldUseLibcall()) {
1934 EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1935 } else {
1936 EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1937 }
1938}
1939
1941 bool isInit) {
1942 bool IsVolatile = lvalue.isVolatileQualified();
1943 llvm::AtomicOrdering AO;
1944 if (lvalue.getType()->isAtomicType()) {
1945 AO = llvm::AtomicOrdering::SequentiallyConsistent;
1946 } else {
1947 AO = llvm::AtomicOrdering::Release;
1948 IsVolatile = true;
1949 }
1950 return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1951}
1952
1953/// Emit a store to an l-value of atomic type.
1954///
1955/// Note that the r-value is expected to be an r-value *of the atomic
1956/// type*; this means that for aggregate r-values, it should include
1957/// storage for any padding that was necessary.
1959 llvm::AtomicOrdering AO, bool IsVolatile,
1960 bool isInit) {
1961 // If this is an aggregate r-value, it should agree in type except
1962 // maybe for address-space qualification.
1963 assert(!rvalue.isAggregate() ||
1965 dest.getAddress().getElementType());
1966
1967 AtomicInfo atomics(*this, dest);
1968 LValue LVal = atomics.getAtomicLValue();
1969
1970 // If this is an initialization, just put the value there normally.
1971 if (LVal.isSimple()) {
1972 if (isInit) {
1973 atomics.emitCopyIntoMemory(rvalue);
1974 return;
1975 }
1976
1977 // Check whether we should use a library call.
1978 if (atomics.shouldUseLibcall()) {
1979 // Produce a source address.
1980 Address srcAddr = atomics.materializeRValue(rvalue);
1981
1982 // void __atomic_store(size_t size, void *mem, void *val, int order)
1983 CallArgList args;
1984 args.add(RValue::get(atomics.getAtomicSizeValue()),
1985 getContext().getSizeType());
1986 args.add(RValue::get(atomics.getAtomicPointer()), getContext().VoidPtrTy);
1987 args.add(RValue::get(srcAddr.emitRawPointer(*this)),
1989 args.add(
1990 RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
1991 getContext().IntTy);
1992 emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1993 return;
1994 }
1995
1996 // Okay, we're doing this natively.
1997 llvm::Value *ValToStore = atomics.convertRValueToInt(rvalue);
1998
1999 // Do the atomic store.
2000 Address Addr = atomics.getAtomicAddress();
2001 if (llvm::Value *Value = atomics.getScalarRValValueOrNull(rvalue))
2002 if (shouldCastToInt(Value->getType(), /*CmpXchg=*/false)) {
2003 Addr = atomics.castToAtomicIntPointer(Addr);
2004 ValToStore = Builder.CreateIntCast(ValToStore, Addr.getElementType(),
2005 /*isSigned=*/false);
2006 }
2007 llvm::StoreInst *store = Builder.CreateStore(ValToStore, Addr);
2008
2009 if (AO == llvm::AtomicOrdering::Acquire)
2010 AO = llvm::AtomicOrdering::Monotonic;
2011 else if (AO == llvm::AtomicOrdering::AcquireRelease)
2012 AO = llvm::AtomicOrdering::Release;
2013 // Initializations don't need to be atomic.
2014 if (!isInit)
2015 store->setAtomic(AO);
2016
2017 // Other decoration.
2018 if (IsVolatile)
2019 store->setVolatile(true);
2021 return;
2022 }
2023
2024 // Emit simple atomic update operation.
2025 atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2026}
2027
2028/// Emit a compare-and-exchange op for atomic type.
2029///
2030std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
2032 llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
2033 AggValueSlot Slot) {
2034 // If this is an aggregate r-value, it should agree in type except
2035 // maybe for address-space qualification.
2036 assert(!Expected.isAggregate() ||
2037 Expected.getAggregateAddress().getElementType() ==
2038 Obj.getAddress().getElementType());
2039 assert(!Desired.isAggregate() ||
2040 Desired.getAggregateAddress().getElementType() ==
2041 Obj.getAddress().getElementType());
2042 AtomicInfo Atomics(*this, Obj);
2043
2044 return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
2045 IsWeak);
2046}
2047
2048llvm::AtomicRMWInst *
2049CodeGenFunction::emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr,
2050 llvm::Value *Val, llvm::AtomicOrdering Order,
2051 llvm::SyncScope::ID SSID,
2052 const AtomicExpr *AE) {
2053 llvm::AtomicRMWInst *RMW =
2054 Builder.CreateAtomicRMW(Op, Addr, Val, Order, SSID);
2055 getTargetHooks().setTargetAtomicMetadata(*this, *RMW, AE);
2056 return RMW;
2057}
2058
2060 LValue LVal, llvm::AtomicOrdering AO,
2061 const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
2062 AtomicInfo Atomics(*this, LVal);
2063 Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2064}
2065
2067 AtomicInfo atomics(*this, dest);
2068
2069 switch (atomics.getEvaluationKind()) {
2070 case TEK_Scalar: {
2071 llvm::Value *value = EmitScalarExpr(init);
2072 atomics.emitCopyIntoMemory(RValue::get(value));
2073 return;
2074 }
2075
2076 case TEK_Complex: {
2077 ComplexPairTy value = EmitComplexExpr(init);
2078 atomics.emitCopyIntoMemory(RValue::getComplex(value));
2079 return;
2080 }
2081
2082 case TEK_Aggregate: {
2083 // Fix up the destination if the initializer isn't an expression
2084 // of atomic type.
2085 bool Zeroed = false;
2086 if (!init->getType()->isAtomicType()) {
2087 Zeroed = atomics.emitMemSetZeroIfNecessary();
2088 dest = atomics.projectValue();
2089 }
2090
2091 // Evaluate the expression directly into the destination.
2097
2098 EmitAggExpr(init, slot);
2099 return;
2100 }
2101 }
2102 llvm_unreachable("bad evaluation kind");
2103}
Defines the clang::ASTContext interface.
#define V(N, I)
Definition: ASTContext.h:3443
static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
Definition: CGAtomic.cpp:334
static llvm::Value * EmitPostAtomicMinMax(CGBuilderTy &Builder, AtomicExpr::AtomicOp Op, bool IsSigned, llvm::Value *OldVal, llvm::Value *RHS)
Duplicate the atomic min/max operation in conventional IR for the builtin variants that return the ne...
Definition: CGAtomic.cpp:503
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, Address DesiredAddr)
Definition: CGAtomic.cpp:1721
static Address EmitValToTemp(CodeGenFunction &CGF, Expr *E)
Definition: CGAtomic.cpp:752
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, llvm::AtomicOrdering Order, llvm::SyncScope::ID Scope)
Definition: CGAtomic.cpp:525
static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)
Definition: CGAtomic.cpp:314
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *FailureOrderVal, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope)
Given an ordering required on success, emit all possible cmpxchg instructions to cope with the provid...
Definition: CGAtomic.cpp:426
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::AtomicOrdering FailureOrder, llvm::SyncScope::ID Scope)
Definition: CGAtomic.cpp:376
static bool shouldCastToInt(llvm::Type *ValTy, bool CmpXchg)
Return true if.
Definition: CGAtomic.cpp:1420
CodeGenFunction::ComplexPairTy ComplexPairTy
Expr * E
SourceLocation Loc
Definition: SemaObjC.cpp:759
static QualType getPointeeType(const MemRegion *R)
__device__ int
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
CanQualType VoidPtrTy
Definition: ASTContext.h:1187
CanQualType BoolTy
Definition: ASTContext.h:1161
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
CanQualType IntTy
Definition: ASTContext.h:1169
TypeInfoChars getTypeInfoInChars(const Type *T) const
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CanQualType VoidTy
Definition: ASTContext.h:1160
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition: Expr.h:6678
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition: Address.h:128
static Address invalid()
Definition: Address.h:176
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition: Address.h:251
CharUnits getAlignment() const
Definition: Address.h:189
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:207
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition: Address.h:274
bool isValid() const
Definition: Address.h:177
An aggregate value slot.
Definition: CGValue.h:504
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition: CGValue.h:572
Address getAddress() const
Definition: CGValue.h:644
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition: CGValue.h:602
RValue asRValue() const
Definition: CGValue.h:666
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:136
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition: CGBuilder.h:203
llvm::AtomicRMWInst * CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Ordering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition: CGBuilder.h:180
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:398
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:219
llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(Address Addr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition: CGBuilder.h:169
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:108
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:365
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition: CGBuilder.h:189
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition: CGCall.h:137
CGFunctionInfo - Class to encapsulate the information about a function definition.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:274
void add(RValue rvalue, QualType type)
Definition: CGCall.h:305
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
const LangOptions & getLangOpts() const
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
llvm::Type * ConvertTypeForMem(QualType T)
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
void EmitAtomicInit(Expr *E, LValue lvalue)
const TargetInfo & getTarget() const
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
RValue EmitLoadOfExtVectorElementLValue(LValue V)
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
const TargetCodeGenInfo & getTargetHooks() const
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
llvm::AtomicRMWInst * emitAtomicRMWInst(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Order=llvm::AtomicOrdering::SequentiallyConsistent, llvm::SyncScope::ID SSID=llvm::SyncScope::System, const AtomicExpr *AE=nullptr)
Emit an atomicrmw instruction, and applying relevant metadata when applicable.
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
bool LValueIsSuitableForInlineAtomic(LValue Src)
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
This class organizes the cross-function state that is used while generating LLVM code.
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
const llvm::DataLayout & getDataLayout() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
llvm::LLVMContext & getLLVMContext()
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1630
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
Definition: CGCall.cpp:667
LValue - This represents an lvalue references.
Definition: CGValue.h:182
llvm::Value * getRawExtVectorPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:405
bool isBitField() const
Definition: CGValue.h:280
llvm::Constant * getExtVectorElts() const
Definition: CGValue.h:409
void setAlignment(CharUnits A)
Definition: CGValue.h:344
bool isVectorElt() const
Definition: CGValue.h:279
bool isSimple() const
Definition: CGValue.h:278
bool isVolatileQualified() const
Definition: CGValue.h:285
llvm::Value * getRawBitFieldPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:419
CharUnits getAlignment() const
Definition: CGValue.h:343
bool isVolatile() const
Definition: CGValue.h:328
bool isGlobalReg() const
Definition: CGValue.h:282
static LValue MakeExtVectorElt(Address Addr, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:452
Address getAddress() const
Definition: CGValue.h:361
llvm::Value * getRawVectorPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:374
bool isExtVectorElt() const
Definition: CGValue.h:281
llvm::Value * getVectorIdx() const
Definition: CGValue.h:382
LValueBaseInfo getBaseInfo() const
Definition: CGValue.h:346
QualType getType() const
Definition: CGValue.h:291
const CGBitFieldInfo & getBitFieldInfo() const
Definition: CGValue.h:424
TBAAAccessInfo getTBAAInfo() const
Definition: CGValue.h:335
Address getVectorAddress() const
Definition: CGValue.h:370
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
Definition: CGValue.h:468
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:442
Address getExtVectorAddress() const
Definition: CGValue.h:401
Address getBitFieldAddress() const
Definition: CGValue.h:415
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:42
bool isScalar() const
Definition: CGValue.h:64
static RValue get(llvm::Value *V)
Definition: CGValue.h:98
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition: CGValue.h:125
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:108
bool isAggregate() const
Definition: CGValue.h:66
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:83
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:71
llvm::Value * second
Definition: CGValue.h:51
bool isComplex() const
Definition: CGValue.h:65
bool isVolatileQualified() const
Definition: CGValue.h:68
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:78
llvm::Value * first
Definition: CGValue.h:50
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:386
Address performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, Address Addr, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const
Get the syncscope used in LLVM IR.
Definition: TargetInfo.cpp:155
virtual void setTargetAtomicMetadata(CodeGenFunction &CGF, llvm::Instruction &AtomicInst, const AtomicExpr *Expr=nullptr) const
Allow the target to apply other metadata to an atomic instruction.
Definition: TargetInfo.h:356
Concrete class used by the front-end to report problems and issues.
Definition: Diagnostic.h:231
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1493
This represents one expression.
Definition: Expr.h:110
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:277
QualType getType() const
Definition: Expr.h:142
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:3198
A (possibly-)qualified type.
Definition: Type.h:929
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:996
LangAS getAddressSpace() const
Return the address space of this type.
Definition: Type.h:8057
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:7971
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition: Type.h:8025
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:41
Encodes a location in the source.
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Stmt.cpp:345
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
Definition: TargetInfo.h:837
bool isVoidType() const
Definition: Type.h:8510
bool isPointerType() const
Definition: Type.h:8186
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:8800
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:738
bool isAtomicType() const
Definition: Type.h:8341
bool isFloatingType() const
Definition: Type.cpp:2283
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:8731
QualType getType() const
Definition: Value.cpp:234
Represents a GCC generic vector type.
Definition: Type.h:4034
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
bool Load(InterpState &S, CodePtr OpPC)
Definition: Interp.h:1748
The JSON file list parser is used to communicate input to InstallAPI.
llvm::StringRef getAsString(SyncScope S)
Definition: SyncScope.h:60
@ Success
Template argument deduction was successful.
unsigned long uint64_t
#define true
Definition: stdbool.h:25
Structure with information about how a bitfield should be accessed.
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
unsigned Size
The total size of the bit-field, in bits.
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
llvm::IntegerType * IntTy
int
uint64_t Width
Definition: ASTContext.h:159
unsigned Align
Definition: ASTContext.h:160