clang  14.0.0git
CGAtomic.cpp
Go to the documentation of this file.
1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the code for emitting atomic operations.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCall.h"
14 #include "CGRecordLayout.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/ASTContext.h"
21 #include "llvm/ADT/DenseMap.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/Operator.h"
25 
26 using namespace clang;
27 using namespace CodeGen;
28 
29 namespace {
30  class AtomicInfo {
31  CodeGenFunction &CGF;
32  QualType AtomicTy;
33  QualType ValueTy;
34  uint64_t AtomicSizeInBits;
35  uint64_t ValueSizeInBits;
36  CharUnits AtomicAlign;
37  CharUnits ValueAlign;
38  TypeEvaluationKind EvaluationKind;
39  bool UseLibcall;
40  LValue LVal;
41  CGBitFieldInfo BFI;
42  public:
43  AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
44  : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45  EvaluationKind(TEK_Scalar), UseLibcall(true) {
46  assert(!lvalue.isGlobalReg());
47  ASTContext &C = CGF.getContext();
48  if (lvalue.isSimple()) {
49  AtomicTy = lvalue.getType();
50  if (auto *ATy = AtomicTy->getAs<AtomicType>())
51  ValueTy = ATy->getValueType();
52  else
53  ValueTy = AtomicTy;
54  EvaluationKind = CGF.getEvaluationKind(ValueTy);
55 
56  uint64_t ValueAlignInBits;
57  uint64_t AtomicAlignInBits;
58  TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59  ValueSizeInBits = ValueTI.Width;
60  ValueAlignInBits = ValueTI.Align;
61 
62  TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63  AtomicSizeInBits = AtomicTI.Width;
64  AtomicAlignInBits = AtomicTI.Align;
65 
66  assert(ValueSizeInBits <= AtomicSizeInBits);
67  assert(ValueAlignInBits <= AtomicAlignInBits);
68 
69  AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70  ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71  if (lvalue.getAlignment().isZero())
72  lvalue.setAlignment(AtomicAlign);
73 
74  LVal = lvalue;
75  } else if (lvalue.isBitField()) {
76  ValueTy = lvalue.getType();
77  ValueSizeInBits = C.getTypeSize(ValueTy);
78  auto &OrigBFI = lvalue.getBitFieldInfo();
79  auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
80  AtomicSizeInBits = C.toBits(
81  C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
82  .alignTo(lvalue.getAlignment()));
83  auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
84  auto OffsetInChars =
85  (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
86  lvalue.getAlignment();
87  VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
88  CGF.Int8Ty, VoidPtrAddr, OffsetInChars.getQuantity());
90  VoidPtrAddr,
91  CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
92  "atomic_bitfield_base");
93  BFI = OrigBFI;
94  BFI.Offset = Offset;
95  BFI.StorageSize = AtomicSizeInBits;
96  BFI.StorageOffset += OffsetInChars;
97  LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
98  BFI, lvalue.getType(), lvalue.getBaseInfo(),
99  lvalue.getTBAAInfo());
100  AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
101  if (AtomicTy.isNull()) {
103  /*numBits=*/32,
104  C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
105  AtomicTy =
106  C.getConstantArrayType(C.CharTy, Size, nullptr, ArrayType::Normal,
107  /*IndexTypeQuals=*/0);
108  }
109  AtomicAlign = ValueAlign = lvalue.getAlignment();
110  } else if (lvalue.isVectorElt()) {
111  ValueTy = lvalue.getType()->castAs<VectorType>()->getElementType();
112  ValueSizeInBits = C.getTypeSize(ValueTy);
113  AtomicTy = lvalue.getType();
114  AtomicSizeInBits = C.getTypeSize(AtomicTy);
115  AtomicAlign = ValueAlign = lvalue.getAlignment();
116  LVal = lvalue;
117  } else {
118  assert(lvalue.isExtVectorElt());
119  ValueTy = lvalue.getType();
120  ValueSizeInBits = C.getTypeSize(ValueTy);
121  AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
122  lvalue.getType(), cast<llvm::FixedVectorType>(
124  ->getNumElements());
125  AtomicSizeInBits = C.getTypeSize(AtomicTy);
126  AtomicAlign = ValueAlign = lvalue.getAlignment();
127  LVal = lvalue;
128  }
129  UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
130  AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
131  }
132 
133  QualType getAtomicType() const { return AtomicTy; }
134  QualType getValueType() const { return ValueTy; }
135  CharUnits getAtomicAlignment() const { return AtomicAlign; }
136  uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
137  uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
138  TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
139  bool shouldUseLibcall() const { return UseLibcall; }
140  const LValue &getAtomicLValue() const { return LVal; }
141  llvm::Value *getAtomicPointer() const {
142  if (LVal.isSimple())
143  return LVal.getPointer(CGF);
144  else if (LVal.isBitField())
145  return LVal.getBitFieldPointer();
146  else if (LVal.isVectorElt())
147  return LVal.getVectorPointer();
148  assert(LVal.isExtVectorElt());
149  return LVal.getExtVectorPointer();
150  }
151  Address getAtomicAddress() const {
152  return Address(getAtomicPointer(), getAtomicAlignment());
153  }
154 
155  Address getAtomicAddressAsAtomicIntPointer() const {
156  return emitCastToAtomicIntPointer(getAtomicAddress());
157  }
158 
159  /// Is the atomic size larger than the underlying value type?
160  ///
161  /// Note that the absence of padding does not mean that atomic
162  /// objects are completely interchangeable with non-atomic
163  /// objects: we might have promoted the alignment of a type
164  /// without making it bigger.
165  bool hasPadding() const {
166  return (ValueSizeInBits != AtomicSizeInBits);
167  }
168 
169  bool emitMemSetZeroIfNecessary() const;
170 
171  llvm::Value *getAtomicSizeValue() const {
172  CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
173  return CGF.CGM.getSize(size);
174  }
175 
176  /// Cast the given pointer to an integer pointer suitable for atomic
177  /// operations if the source.
178  Address emitCastToAtomicIntPointer(Address Addr) const;
179 
180  /// If Addr is compatible with the iN that will be used for an atomic
181  /// operation, bitcast it. Otherwise, create a temporary that is suitable
182  /// and copy the value across.
183  Address convertToAtomicIntPointer(Address Addr) const;
184 
185  /// Turn an atomic-layout object into an r-value.
186  RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
187  SourceLocation loc, bool AsValue) const;
188 
189  /// Converts a rvalue to integer value.
190  llvm::Value *convertRValueToInt(RValue RVal) const;
191 
192  RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
193  AggValueSlot ResultSlot,
194  SourceLocation Loc, bool AsValue) const;
195 
196  /// Copy an atomic r-value into atomic-layout memory.
197  void emitCopyIntoMemory(RValue rvalue) const;
198 
199  /// Project an l-value down to the value field.
200  LValue projectValue() const {
201  assert(LVal.isSimple());
202  Address addr = getAtomicAddress();
203  if (hasPadding())
204  addr = CGF.Builder.CreateStructGEP(addr, 0);
205 
206  return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
207  LVal.getBaseInfo(), LVal.getTBAAInfo());
208  }
209 
210  /// Emits atomic load.
211  /// \returns Loaded value.
212  RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
213  bool AsValue, llvm::AtomicOrdering AO,
214  bool IsVolatile);
215 
216  /// Emits atomic compare-and-exchange sequence.
217  /// \param Expected Expected value.
218  /// \param Desired Desired value.
219  /// \param Success Atomic ordering for success operation.
220  /// \param Failure Atomic ordering for failed operation.
221  /// \param IsWeak true if atomic operation is weak, false otherwise.
222  /// \returns Pair of values: previous value from storage (value type) and
223  /// boolean flag (i1 type) with true if success and false otherwise.
224  std::pair<RValue, llvm::Value *>
225  EmitAtomicCompareExchange(RValue Expected, RValue Desired,
226  llvm::AtomicOrdering Success =
227  llvm::AtomicOrdering::SequentiallyConsistent,
228  llvm::AtomicOrdering Failure =
229  llvm::AtomicOrdering::SequentiallyConsistent,
230  bool IsWeak = false);
231 
232  /// Emits atomic update.
233  /// \param AO Atomic ordering.
234  /// \param UpdateOp Update operation for the current lvalue.
235  void EmitAtomicUpdate(llvm::AtomicOrdering AO,
236  const llvm::function_ref<RValue(RValue)> &UpdateOp,
237  bool IsVolatile);
238  /// Emits atomic update.
239  /// \param AO Atomic ordering.
240  void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
241  bool IsVolatile);
242 
243  /// Materialize an atomic r-value in atomic-layout memory.
244  Address materializeRValue(RValue rvalue) const;
245 
246  /// Creates temp alloca for intermediate operations on atomic value.
247  Address CreateTempAlloca() const;
248  private:
249  bool requiresMemSetZero(llvm::Type *type) const;
250 
251 
252  /// Emits atomic load as a libcall.
253  void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
254  llvm::AtomicOrdering AO, bool IsVolatile);
255  /// Emits atomic load as LLVM instruction.
256  llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
257  /// Emits atomic compare-and-exchange op as a libcall.
258  llvm::Value *EmitAtomicCompareExchangeLibcall(
259  llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
260  llvm::AtomicOrdering Success =
261  llvm::AtomicOrdering::SequentiallyConsistent,
262  llvm::AtomicOrdering Failure =
263  llvm::AtomicOrdering::SequentiallyConsistent);
264  /// Emits atomic compare-and-exchange op as LLVM instruction.
265  std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
266  llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
267  llvm::AtomicOrdering Success =
268  llvm::AtomicOrdering::SequentiallyConsistent,
269  llvm::AtomicOrdering Failure =
270  llvm::AtomicOrdering::SequentiallyConsistent,
271  bool IsWeak = false);
272  /// Emit atomic update as libcalls.
273  void
274  EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
275  const llvm::function_ref<RValue(RValue)> &UpdateOp,
276  bool IsVolatile);
277  /// Emit atomic update as LLVM instructions.
278  void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
279  const llvm::function_ref<RValue(RValue)> &UpdateOp,
280  bool IsVolatile);
281  /// Emit atomic update as libcalls.
282  void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
283  bool IsVolatile);
284  /// Emit atomic update as LLVM instructions.
285  void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
286  bool IsVolatile);
287  };
288 }
289 
290 Address AtomicInfo::CreateTempAlloca() const {
291  Address TempAlloca = CGF.CreateMemTemp(
292  (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
293  : AtomicTy,
294  getAtomicAlignment(),
295  "atomic-temp");
296  // Cast to pointer to value type for bitfields.
297  if (LVal.isBitField())
299  TempAlloca, getAtomicAddress().getType());
300  return TempAlloca;
301 }
302 
304  StringRef fnName,
305  QualType resultType,
306  CallArgList &args) {
307  const CGFunctionInfo &fnInfo =
308  CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
309  llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
310  llvm::AttrBuilder fnAttrB;
311  fnAttrB.addAttribute(llvm::Attribute::NoUnwind);
312  fnAttrB.addAttribute(llvm::Attribute::WillReturn);
313  llvm::AttributeList fnAttrs = llvm::AttributeList::get(
314  CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex, fnAttrB);
315 
316  llvm::FunctionCallee fn =
317  CGF.CGM.CreateRuntimeFunction(fnTy, fnName, fnAttrs);
318  auto callee = CGCallee::forDirect(fn);
319  return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
320 }
321 
322 /// Does a store of the given IR type modify the full expected width?
323 static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type,
324  uint64_t expectedSize) {
325  return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
326 }
327 
328 /// Does the atomic type require memsetting to zero before initialization?
329 ///
330 /// The IR type is provided as a way of making certain queries faster.
331 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
332  // If the atomic type has size padding, we definitely need a memset.
333  if (hasPadding()) return true;
334 
335  // Otherwise, do some simple heuristics to try to avoid it:
336  switch (getEvaluationKind()) {
337  // For scalars and complexes, check whether the store size of the
338  // type uses the full size.
339  case TEK_Scalar:
340  return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
341  case TEK_Complex:
342  return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
343  AtomicSizeInBits / 2);
344 
345  // Padding in structs has an undefined bit pattern. User beware.
346  case TEK_Aggregate:
347  return false;
348  }
349  llvm_unreachable("bad evaluation kind");
350 }
351 
352 bool AtomicInfo::emitMemSetZeroIfNecessary() const {
353  assert(LVal.isSimple());
354  llvm::Value *addr = LVal.getPointer(CGF);
355  if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
356  return false;
357 
358  CGF.Builder.CreateMemSet(
359  addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
360  CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
361  LVal.getAlignment().getAsAlign());
362  return true;
363 }
364 
365 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
366  Address Dest, Address Ptr,
367  Address Val1, Address Val2,
368  uint64_t Size,
369  llvm::AtomicOrdering SuccessOrder,
370  llvm::AtomicOrdering FailureOrder,
372  // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
373  llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
374  llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
375 
376  llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
377  Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
378  Scope);
379  Pair->setVolatile(E->isVolatile());
380  Pair->setWeak(IsWeak);
381 
382  // Cmp holds the result of the compare-exchange operation: true on success,
383  // false on failure.
384  llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
385  llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
386 
387  // This basic block is used to hold the store instruction if the operation
388  // failed.
389  llvm::BasicBlock *StoreExpectedBB =
390  CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
391 
392  // This basic block is the exit point of the operation, we should end up
393  // here regardless of whether or not the operation succeeded.
394  llvm::BasicBlock *ContinueBB =
395  CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
396 
397  // Update Expected if Expected isn't equal to Old, otherwise branch to the
398  // exit point.
399  CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
400 
401  CGF.Builder.SetInsertPoint(StoreExpectedBB);
402  // Update the memory at Expected with Old's value.
403  CGF.Builder.CreateStore(Old, Val1);
404  // Finally, branch to the exit point.
405  CGF.Builder.CreateBr(ContinueBB);
406 
407  CGF.Builder.SetInsertPoint(ContinueBB);
408  // Update the memory at Dest with Cmp's value.
409  CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
410 }
411 
412 /// Given an ordering required on success, emit all possible cmpxchg
413 /// instructions to cope with the provided (but possibly only dynamically known)
414 /// FailureOrder.
416  bool IsWeak, Address Dest, Address Ptr,
417  Address Val1, Address Val2,
418  llvm::Value *FailureOrderVal,
419  uint64_t Size,
420  llvm::AtomicOrdering SuccessOrder,
422  llvm::AtomicOrdering FailureOrder;
423  if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
424  auto FOS = FO->getSExtValue();
425  if (!llvm::isValidAtomicOrderingCABI(FOS))
426  FailureOrder = llvm::AtomicOrdering::Monotonic;
427  else
428  switch ((llvm::AtomicOrderingCABI)FOS) {
429  case llvm::AtomicOrderingCABI::relaxed:
430  // 31.7.2.18: "The failure argument shall not be memory_order_release
431  // nor memory_order_acq_rel". Fallback to monotonic.
432  case llvm::AtomicOrderingCABI::release:
433  case llvm::AtomicOrderingCABI::acq_rel:
434  FailureOrder = llvm::AtomicOrdering::Monotonic;
435  break;
436  case llvm::AtomicOrderingCABI::consume:
437  case llvm::AtomicOrderingCABI::acquire:
438  FailureOrder = llvm::AtomicOrdering::Acquire;
439  break;
440  case llvm::AtomicOrderingCABI::seq_cst:
441  FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
442  break;
443  }
444  // Prior to c++17, "the failure argument shall be no stronger than the
445  // success argument". This condition has been lifted and the only
446  // precondition is 31.7.2.18. Effectively treat this as a DR and skip
447  // language version checks.
448  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
449  FailureOrder, Scope);
450  return;
451  }
452 
453  // Create all the relevant BB's
454  auto *MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
455  auto *AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
456  auto *SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
457  auto *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
458 
459  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
460  // doesn't matter unless someone is crazy enough to use something that
461  // doesn't fold to a constant for the ordering.
462  llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
463  // Implemented as acquire, since it's the closest in LLVM.
464  SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
465  AcquireBB);
466  SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
467  AcquireBB);
468  SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
469  SeqCstBB);
470 
471  // Emit all the different atomics
472  CGF.Builder.SetInsertPoint(MonotonicBB);
473  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
474  Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
475  CGF.Builder.CreateBr(ContBB);
476 
477  CGF.Builder.SetInsertPoint(AcquireBB);
478  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
479  llvm::AtomicOrdering::Acquire, Scope);
480  CGF.Builder.CreateBr(ContBB);
481 
482  CGF.Builder.SetInsertPoint(SeqCstBB);
483  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
484  llvm::AtomicOrdering::SequentiallyConsistent, Scope);
485  CGF.Builder.CreateBr(ContBB);
486 
487  CGF.Builder.SetInsertPoint(ContBB);
488 }
489 
490 /// Duplicate the atomic min/max operation in conventional IR for the builtin
491 /// variants that return the new rather than the original value.
494  bool IsSigned,
495  llvm::Value *OldVal,
496  llvm::Value *RHS) {
497  llvm::CmpInst::Predicate Pred;
498  switch (Op) {
499  default:
500  llvm_unreachable("Unexpected min/max operation");
501  case AtomicExpr::AO__atomic_max_fetch:
502  Pred = IsSigned ? llvm::CmpInst::ICMP_SGT : llvm::CmpInst::ICMP_UGT;
503  break;
504  case AtomicExpr::AO__atomic_min_fetch:
505  Pred = IsSigned ? llvm::CmpInst::ICMP_SLT : llvm::CmpInst::ICMP_ULT;
506  break;
507  }
508  llvm::Value *Cmp = Builder.CreateICmp(Pred, OldVal, RHS, "tst");
509  return Builder.CreateSelect(Cmp, OldVal, RHS, "newval");
510 }
511 
512 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
513  Address Ptr, Address Val1, Address Val2,
514  llvm::Value *IsWeak, llvm::Value *FailureOrder,
515  uint64_t Size, llvm::AtomicOrdering Order,
517  llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
518  bool PostOpMinMax = false;
519  unsigned PostOp = 0;
520 
521  switch (E->getOp()) {
522  case AtomicExpr::AO__c11_atomic_init:
523  case AtomicExpr::AO__opencl_atomic_init:
524  llvm_unreachable("Already handled!");
525 
526  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
527  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
528  emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
529  FailureOrder, Size, Order, Scope);
530  return;
531  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
532  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
533  emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
534  FailureOrder, Size, Order, Scope);
535  return;
536  case AtomicExpr::AO__atomic_compare_exchange:
537  case AtomicExpr::AO__atomic_compare_exchange_n: {
538  if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
539  emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
540  Val1, Val2, FailureOrder, Size, Order, Scope);
541  } else {
542  // Create all the relevant BB's
543  llvm::BasicBlock *StrongBB =
544  CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
545  llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
546  llvm::BasicBlock *ContBB =
547  CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
548 
549  llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
550  SI->addCase(CGF.Builder.getInt1(false), StrongBB);
551 
552  CGF.Builder.SetInsertPoint(StrongBB);
553  emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
554  FailureOrder, Size, Order, Scope);
555  CGF.Builder.CreateBr(ContBB);
556 
557  CGF.Builder.SetInsertPoint(WeakBB);
558  emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
559  FailureOrder, Size, Order, Scope);
560  CGF.Builder.CreateBr(ContBB);
561 
562  CGF.Builder.SetInsertPoint(ContBB);
563  }
564  return;
565  }
566  case AtomicExpr::AO__c11_atomic_load:
567  case AtomicExpr::AO__opencl_atomic_load:
568  case AtomicExpr::AO__atomic_load_n:
569  case AtomicExpr::AO__atomic_load: {
570  llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
571  Load->setAtomic(Order, Scope);
572  Load->setVolatile(E->isVolatile());
573  CGF.Builder.CreateStore(Load, Dest);
574  return;
575  }
576 
577  case AtomicExpr::AO__c11_atomic_store:
578  case AtomicExpr::AO__opencl_atomic_store:
579  case AtomicExpr::AO__atomic_store:
580  case AtomicExpr::AO__atomic_store_n: {
581  llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
582  llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
583  Store->setAtomic(Order, Scope);
584  Store->setVolatile(E->isVolatile());
585  return;
586  }
587 
588  case AtomicExpr::AO__c11_atomic_exchange:
589  case AtomicExpr::AO__opencl_atomic_exchange:
590  case AtomicExpr::AO__atomic_exchange_n:
591  case AtomicExpr::AO__atomic_exchange:
592  Op = llvm::AtomicRMWInst::Xchg;
593  break;
594 
595  case AtomicExpr::AO__atomic_add_fetch:
596  PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FAdd
598  LLVM_FALLTHROUGH;
599  case AtomicExpr::AO__c11_atomic_fetch_add:
600  case AtomicExpr::AO__opencl_atomic_fetch_add:
601  case AtomicExpr::AO__atomic_fetch_add:
602  Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FAdd
604  break;
605 
606  case AtomicExpr::AO__atomic_sub_fetch:
607  PostOp = E->getValueType()->isFloatingType() ? llvm::Instruction::FSub
609  LLVM_FALLTHROUGH;
610  case AtomicExpr::AO__c11_atomic_fetch_sub:
611  case AtomicExpr::AO__opencl_atomic_fetch_sub:
612  case AtomicExpr::AO__atomic_fetch_sub:
613  Op = E->getValueType()->isFloatingType() ? llvm::AtomicRMWInst::FSub
615  break;
616 
617  case AtomicExpr::AO__atomic_min_fetch:
618  PostOpMinMax = true;
619  LLVM_FALLTHROUGH;
620  case AtomicExpr::AO__c11_atomic_fetch_min:
621  case AtomicExpr::AO__opencl_atomic_fetch_min:
622  case AtomicExpr::AO__atomic_fetch_min:
623  Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
624  : llvm::AtomicRMWInst::UMin;
625  break;
626 
627  case AtomicExpr::AO__atomic_max_fetch:
628  PostOpMinMax = true;
629  LLVM_FALLTHROUGH;
630  case AtomicExpr::AO__c11_atomic_fetch_max:
631  case AtomicExpr::AO__opencl_atomic_fetch_max:
632  case AtomicExpr::AO__atomic_fetch_max:
633  Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
634  : llvm::AtomicRMWInst::UMax;
635  break;
636 
637  case AtomicExpr::AO__atomic_and_fetch:
638  PostOp = llvm::Instruction::And;
639  LLVM_FALLTHROUGH;
640  case AtomicExpr::AO__c11_atomic_fetch_and:
641  case AtomicExpr::AO__opencl_atomic_fetch_and:
642  case AtomicExpr::AO__atomic_fetch_and:
644  break;
645 
646  case AtomicExpr::AO__atomic_or_fetch:
647  PostOp = llvm::Instruction::Or;
648  LLVM_FALLTHROUGH;
649  case AtomicExpr::AO__c11_atomic_fetch_or:
650  case AtomicExpr::AO__opencl_atomic_fetch_or:
651  case AtomicExpr::AO__atomic_fetch_or:
652  Op = llvm::AtomicRMWInst::Or;
653  break;
654 
655  case AtomicExpr::AO__atomic_xor_fetch:
656  PostOp = llvm::Instruction::Xor;
657  LLVM_FALLTHROUGH;
658  case AtomicExpr::AO__c11_atomic_fetch_xor:
659  case AtomicExpr::AO__opencl_atomic_fetch_xor:
660  case AtomicExpr::AO__atomic_fetch_xor:
661  Op = llvm::AtomicRMWInst::Xor;
662  break;
663 
664  case AtomicExpr::AO__atomic_nand_fetch:
665  PostOp = llvm::Instruction::And; // the NOT is special cased below
666  LLVM_FALLTHROUGH;
667  case AtomicExpr::AO__atomic_fetch_nand:
668  Op = llvm::AtomicRMWInst::Nand;
669  break;
670  }
671 
672  llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
673  llvm::AtomicRMWInst *RMWI =
674  CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order, Scope);
675  RMWI->setVolatile(E->isVolatile());
676 
677  // For __atomic_*_fetch operations, perform the operation again to
678  // determine the value which was written.
679  llvm::Value *Result = RMWI;
680  if (PostOpMinMax)
681  Result = EmitPostAtomicMinMax(CGF.Builder, E->getOp(),
683  RMWI, LoadVal1);
684  else if (PostOp)
685  Result = CGF.Builder.CreateBinOp((llvm::Instruction::BinaryOps)PostOp, RMWI,
686  LoadVal1);
687  if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
688  Result = CGF.Builder.CreateNot(Result);
689  CGF.Builder.CreateStore(Result, Dest);
690 }
691 
692 // This function emits any expression (scalar, complex, or aggregate)
693 // into a temporary alloca.
694 static Address
696  Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
697  CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
698  /*Init*/ true);
699  return DeclPtr;
700 }
701 
703  Address Ptr, Address Val1, Address Val2,
704  llvm::Value *IsWeak, llvm::Value *FailureOrder,
705  uint64_t Size, llvm::AtomicOrdering Order,
706  llvm::Value *Scope) {
707  auto ScopeModel = Expr->getScopeModel();
708 
709  // LLVM atomic instructions always have synch scope. If clang atomic
710  // expression has no scope operand, use default LLVM synch scope.
711  if (!ScopeModel) {
712  EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
713  Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));
714  return;
715  }
716 
717  // Handle constant scope.
718  if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
719  auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
720  CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
721  Order, CGF.CGM.getLLVMContext());
722  EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
723  Order, SCID);
724  return;
725  }
726 
727  // Handle non-constant scope.
728  auto &Builder = CGF.Builder;
729  auto Scopes = ScopeModel->getRuntimeValues();
730  llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
731  for (auto S : Scopes)
732  BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
733 
734  llvm::BasicBlock *ContBB =
735  CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);
736 
737  auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
738  // If unsupported synch scope is encountered at run time, assume a fallback
739  // synch scope value.
740  auto FallBack = ScopeModel->getFallBackValue();
741  llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
742  for (auto S : Scopes) {
743  auto *B = BB[S];
744  if (S != FallBack)
745  SI->addCase(Builder.getInt32(S), B);
746 
747  Builder.SetInsertPoint(B);
748  EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
749  Order,
751  ScopeModel->map(S),
752  Order,
753  CGF.getLLVMContext()));
754  Builder.CreateBr(ContBB);
755  }
756 
757  Builder.SetInsertPoint(ContBB);
758 }
759 
760 static void
762  bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
763  SourceLocation Loc, CharUnits SizeInChars) {
764  if (UseOptimizedLibcall) {
765  // Load value and pass it to the function directly.
766  CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
767  int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
768  ValTy =
769  CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
770  llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
771  SizeInBits)->getPointerTo();
772  Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
773  Val = CGF.EmitLoadOfScalar(Ptr, false,
774  CGF.getContext().getPointerType(ValTy),
775  Loc);
776  // Coerce the value into an appropriately sized integer type.
777  Args.add(RValue::get(Val), ValTy);
778  } else {
779  // Non-optimized functions always take a reference.
780  Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
781  CGF.getContext().VoidPtrTy);
782  }
783 }
784 
786  QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
787  QualType MemTy = AtomicTy;
788  if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
789  MemTy = AT->getValueType();
790  llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
791 
792  Address Val1 = Address::invalid();
793  Address Val2 = Address::invalid();
794  Address Dest = Address::invalid();
796 
797  if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
798  E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
799  LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
800  EmitAtomicInit(E->getVal1(), lvalue);
801  return RValue::get(nullptr);
802  }
803 
804  auto TInfo = getContext().getTypeInfoInChars(AtomicTy);
805  uint64_t Size = TInfo.Width.getQuantity();
806  unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
807 
808  bool Oversized = getContext().toBits(TInfo.Width) > MaxInlineWidthInBits;
809  bool Misaligned = (Ptr.getAlignment() % TInfo.Width) != 0;
810  bool UseLibcall = Misaligned | Oversized;
811  bool ShouldCastToIntPtrTy = true;
812 
813  CharUnits MaxInlineWidth =
814  getContext().toCharUnitsFromBits(MaxInlineWidthInBits);
815 
816  DiagnosticsEngine &Diags = CGM.getDiags();
817 
818  if (Misaligned) {
819  Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
820  << (int)TInfo.Width.getQuantity()
821  << (int)Ptr.getAlignment().getQuantity();
822  }
823 
824  if (Oversized) {
825  Diags.Report(E->getBeginLoc(), diag::warn_atomic_op_oversized)
826  << (int)TInfo.Width.getQuantity() << (int)MaxInlineWidth.getQuantity();
827  }
828 
829  llvm::Value *Order = EmitScalarExpr(E->getOrder());
830  llvm::Value *Scope =
831  E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
832 
833  switch (E->getOp()) {
834  case AtomicExpr::AO__c11_atomic_init:
835  case AtomicExpr::AO__opencl_atomic_init:
836  llvm_unreachable("Already handled above with EmitAtomicInit!");
837 
838  case AtomicExpr::AO__c11_atomic_load:
839  case AtomicExpr::AO__opencl_atomic_load:
840  case AtomicExpr::AO__atomic_load_n:
841  break;
842 
843  case AtomicExpr::AO__atomic_load:
844  Dest = EmitPointerWithAlignment(E->getVal1());
845  break;
846 
847  case AtomicExpr::AO__atomic_store:
848  Val1 = EmitPointerWithAlignment(E->getVal1());
849  break;
850 
851  case AtomicExpr::AO__atomic_exchange:
852  Val1 = EmitPointerWithAlignment(E->getVal1());
853  Dest = EmitPointerWithAlignment(E->getVal2());
854  break;
855 
856  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
857  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
858  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
859  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
860  case AtomicExpr::AO__atomic_compare_exchange_n:
861  case AtomicExpr::AO__atomic_compare_exchange:
862  Val1 = EmitPointerWithAlignment(E->getVal1());
863  if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
864  Val2 = EmitPointerWithAlignment(E->getVal2());
865  else
866  Val2 = EmitValToTemp(*this, E->getVal2());
867  OrderFail = EmitScalarExpr(E->getOrderFail());
868  if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
869  E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
870  IsWeak = EmitScalarExpr(E->getWeak());
871  break;
872 
873  case AtomicExpr::AO__c11_atomic_fetch_add:
874  case AtomicExpr::AO__c11_atomic_fetch_sub:
875  case AtomicExpr::AO__opencl_atomic_fetch_add:
876  case AtomicExpr::AO__opencl_atomic_fetch_sub:
877  if (MemTy->isPointerType()) {
878  // For pointer arithmetic, we're required to do a bit of math:
879  // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
880  // ... but only for the C11 builtins. The GNU builtins expect the
881  // user to multiply by sizeof(T).
882  QualType Val1Ty = E->getVal1()->getType();
883  llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
884  CharUnits PointeeIncAmt =
886  Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
887  auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
888  Val1 = Temp;
889  EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
890  break;
891  }
892  LLVM_FALLTHROUGH;
893  case AtomicExpr::AO__atomic_fetch_add:
894  case AtomicExpr::AO__atomic_fetch_sub:
895  case AtomicExpr::AO__atomic_add_fetch:
896  case AtomicExpr::AO__atomic_sub_fetch:
897  ShouldCastToIntPtrTy = !MemTy->isFloatingType();
898  LLVM_FALLTHROUGH;
899 
900  case AtomicExpr::AO__c11_atomic_store:
901  case AtomicExpr::AO__c11_atomic_exchange:
902  case AtomicExpr::AO__opencl_atomic_store:
903  case AtomicExpr::AO__opencl_atomic_exchange:
904  case AtomicExpr::AO__atomic_store_n:
905  case AtomicExpr::AO__atomic_exchange_n:
906  case AtomicExpr::AO__c11_atomic_fetch_and:
907  case AtomicExpr::AO__c11_atomic_fetch_or:
908  case AtomicExpr::AO__c11_atomic_fetch_xor:
909  case AtomicExpr::AO__c11_atomic_fetch_max:
910  case AtomicExpr::AO__c11_atomic_fetch_min:
911  case AtomicExpr::AO__opencl_atomic_fetch_and:
912  case AtomicExpr::AO__opencl_atomic_fetch_or:
913  case AtomicExpr::AO__opencl_atomic_fetch_xor:
914  case AtomicExpr::AO__opencl_atomic_fetch_min:
915  case AtomicExpr::AO__opencl_atomic_fetch_max:
916  case AtomicExpr::AO__atomic_fetch_and:
917  case AtomicExpr::AO__atomic_fetch_or:
918  case AtomicExpr::AO__atomic_fetch_xor:
919  case AtomicExpr::AO__atomic_fetch_nand:
920  case AtomicExpr::AO__atomic_and_fetch:
921  case AtomicExpr::AO__atomic_or_fetch:
922  case AtomicExpr::AO__atomic_xor_fetch:
923  case AtomicExpr::AO__atomic_nand_fetch:
924  case AtomicExpr::AO__atomic_max_fetch:
925  case AtomicExpr::AO__atomic_min_fetch:
926  case AtomicExpr::AO__atomic_fetch_max:
927  case AtomicExpr::AO__atomic_fetch_min:
928  Val1 = EmitValToTemp(*this, E->getVal1());
929  break;
930  }
931 
932  QualType RValTy = E->getType().getUnqualifiedType();
933 
934  // The inlined atomics only function on iN types, where N is a power of 2. We
935  // need to make sure (via temporaries if necessary) that all incoming values
936  // are compatible.
937  LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
938  AtomicInfo Atomics(*this, AtomicVal);
939 
940  if (ShouldCastToIntPtrTy) {
941  Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
942  if (Val1.isValid())
943  Val1 = Atomics.convertToAtomicIntPointer(Val1);
944  if (Val2.isValid())
945  Val2 = Atomics.convertToAtomicIntPointer(Val2);
946  }
947  if (Dest.isValid()) {
948  if (ShouldCastToIntPtrTy)
949  Dest = Atomics.emitCastToAtomicIntPointer(Dest);
950  } else if (E->isCmpXChg())
951  Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
952  else if (!RValTy->isVoidType()) {
953  Dest = Atomics.CreateTempAlloca();
954  if (ShouldCastToIntPtrTy)
955  Dest = Atomics.emitCastToAtomicIntPointer(Dest);
956  }
957 
958  // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
959  if (UseLibcall) {
960  bool UseOptimizedLibcall = false;
961  switch (E->getOp()) {
962  case AtomicExpr::AO__c11_atomic_init:
963  case AtomicExpr::AO__opencl_atomic_init:
964  llvm_unreachable("Already handled above with EmitAtomicInit!");
965 
966  case AtomicExpr::AO__c11_atomic_fetch_add:
967  case AtomicExpr::AO__opencl_atomic_fetch_add:
968  case AtomicExpr::AO__atomic_fetch_add:
969  case AtomicExpr::AO__c11_atomic_fetch_and:
970  case AtomicExpr::AO__opencl_atomic_fetch_and:
971  case AtomicExpr::AO__atomic_fetch_and:
972  case AtomicExpr::AO__c11_atomic_fetch_or:
973  case AtomicExpr::AO__opencl_atomic_fetch_or:
974  case AtomicExpr::AO__atomic_fetch_or:
975  case AtomicExpr::AO__atomic_fetch_nand:
976  case AtomicExpr::AO__c11_atomic_fetch_sub:
977  case AtomicExpr::AO__opencl_atomic_fetch_sub:
978  case AtomicExpr::AO__atomic_fetch_sub:
979  case AtomicExpr::AO__c11_atomic_fetch_xor:
980  case AtomicExpr::AO__opencl_atomic_fetch_xor:
981  case AtomicExpr::AO__opencl_atomic_fetch_min:
982  case AtomicExpr::AO__opencl_atomic_fetch_max:
983  case AtomicExpr::AO__atomic_fetch_xor:
984  case AtomicExpr::AO__c11_atomic_fetch_max:
985  case AtomicExpr::AO__c11_atomic_fetch_min:
986  case AtomicExpr::AO__atomic_add_fetch:
987  case AtomicExpr::AO__atomic_and_fetch:
988  case AtomicExpr::AO__atomic_nand_fetch:
989  case AtomicExpr::AO__atomic_or_fetch:
990  case AtomicExpr::AO__atomic_sub_fetch:
991  case AtomicExpr::AO__atomic_xor_fetch:
992  case AtomicExpr::AO__atomic_fetch_max:
993  case AtomicExpr::AO__atomic_fetch_min:
994  case AtomicExpr::AO__atomic_max_fetch:
995  case AtomicExpr::AO__atomic_min_fetch:
996  // For these, only library calls for certain sizes exist.
997  UseOptimizedLibcall = true;
998  break;
999 
1000  case AtomicExpr::AO__atomic_load:
1001  case AtomicExpr::AO__atomic_store:
1002  case AtomicExpr::AO__atomic_exchange:
1003  case AtomicExpr::AO__atomic_compare_exchange:
1004  // Use the generic version if we don't know that the operand will be
1005  // suitably aligned for the optimized version.
1006  if (Misaligned)
1007  break;
1008  LLVM_FALLTHROUGH;
1009  case AtomicExpr::AO__c11_atomic_load:
1010  case AtomicExpr::AO__c11_atomic_store:
1011  case AtomicExpr::AO__c11_atomic_exchange:
1012  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1013  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1014  case AtomicExpr::AO__opencl_atomic_load:
1015  case AtomicExpr::AO__opencl_atomic_store:
1016  case AtomicExpr::AO__opencl_atomic_exchange:
1017  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1018  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1019  case AtomicExpr::AO__atomic_load_n:
1020  case AtomicExpr::AO__atomic_store_n:
1021  case AtomicExpr::AO__atomic_exchange_n:
1022  case AtomicExpr::AO__atomic_compare_exchange_n:
1023  // Only use optimized library calls for sizes for which they exist.
1024  // FIXME: Size == 16 optimized library functions exist too.
1025  if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
1026  UseOptimizedLibcall = true;
1027  break;
1028  }
1029 
1030  CallArgList Args;
1031  if (!UseOptimizedLibcall) {
1032  // For non-optimized library calls, the size is the first parameter
1033  Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
1034  getContext().getSizeType());
1035  }
1036  // Atomic address is the first or second parameter
1037  // The OpenCL atomic library functions only accept pointer arguments to
1038  // generic address space.
1039  auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
1040  if (!E->isOpenCL())
1041  return V;
1042  auto AS = PT->castAs<PointerType>()->getPointeeType().getAddressSpace();
1043  if (AS == LangAS::opencl_generic)
1044  return V;
1046  auto T = V->getType();
1047  auto *DestType = T->getPointerElementType()->getPointerTo(DestAS);
1048 
1050  *this, V, AS, LangAS::opencl_generic, DestType, false);
1051  };
1052 
1053  Args.add(RValue::get(CastToGenericAddrSpace(
1054  EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())),
1055  getContext().VoidPtrTy);
1056 
1057  std::string LibCallName;
1058  QualType LoweredMemTy =
1059  MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
1060  QualType RetTy;
1061  bool HaveRetTy = false;
1062  llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
1063  bool PostOpMinMax = false;
1064  switch (E->getOp()) {
1065  case AtomicExpr::AO__c11_atomic_init:
1066  case AtomicExpr::AO__opencl_atomic_init:
1067  llvm_unreachable("Already handled!");
1068 
1069  // There is only one libcall for compare an exchange, because there is no
1070  // optimisation benefit possible from a libcall version of a weak compare
1071  // and exchange.
1072  // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
1073  // void *desired, int success, int failure)
1074  // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
1075  // int success, int failure)
1076  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1077  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1078  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1079  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1080  case AtomicExpr::AO__atomic_compare_exchange:
1081  case AtomicExpr::AO__atomic_compare_exchange_n:
1082  LibCallName = "__atomic_compare_exchange";
1083  RetTy = getContext().BoolTy;
1084  HaveRetTy = true;
1085  Args.add(
1086  RValue::get(CastToGenericAddrSpace(
1087  EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
1088  getContext().VoidPtrTy);
1089  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
1090  MemTy, E->getExprLoc(), TInfo.Width);
1091  Args.add(RValue::get(Order), getContext().IntTy);
1092  Order = OrderFail;
1093  break;
1094  // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
1095  // int order)
1096  // T __atomic_exchange_N(T *mem, T val, int order)
1097  case AtomicExpr::AO__c11_atomic_exchange:
1098  case AtomicExpr::AO__opencl_atomic_exchange:
1099  case AtomicExpr::AO__atomic_exchange_n:
1100  case AtomicExpr::AO__atomic_exchange:
1101  LibCallName = "__atomic_exchange";
1102  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1103  MemTy, E->getExprLoc(), TInfo.Width);
1104  break;
1105  // void __atomic_store(size_t size, void *mem, void *val, int order)
1106  // void __atomic_store_N(T *mem, T val, int order)
1107  case AtomicExpr::AO__c11_atomic_store:
1108  case AtomicExpr::AO__opencl_atomic_store:
1109  case AtomicExpr::AO__atomic_store:
1110  case AtomicExpr::AO__atomic_store_n:
1111  LibCallName = "__atomic_store";
1112  RetTy = getContext().VoidTy;
1113  HaveRetTy = true;
1114  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1115  MemTy, E->getExprLoc(), TInfo.Width);
1116  break;
1117  // void __atomic_load(size_t size, void *mem, void *return, int order)
1118  // T __atomic_load_N(T *mem, int order)
1119  case AtomicExpr::AO__c11_atomic_load:
1120  case AtomicExpr::AO__opencl_atomic_load:
1121  case AtomicExpr::AO__atomic_load:
1122  case AtomicExpr::AO__atomic_load_n:
1123  LibCallName = "__atomic_load";
1124  break;
1125  // T __atomic_add_fetch_N(T *mem, T val, int order)
1126  // T __atomic_fetch_add_N(T *mem, T val, int order)
1127  case AtomicExpr::AO__atomic_add_fetch:
1128  PostOp = llvm::Instruction::Add;
1129  LLVM_FALLTHROUGH;
1130  case AtomicExpr::AO__c11_atomic_fetch_add:
1131  case AtomicExpr::AO__opencl_atomic_fetch_add:
1132  case AtomicExpr::AO__atomic_fetch_add:
1133  LibCallName = "__atomic_fetch_add";
1134  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1135  LoweredMemTy, E->getExprLoc(), TInfo.Width);
1136  break;
1137  // T __atomic_and_fetch_N(T *mem, T val, int order)
1138  // T __atomic_fetch_and_N(T *mem, T val, int order)
1139  case AtomicExpr::AO__atomic_and_fetch:
1140  PostOp = llvm::Instruction::And;
1141  LLVM_FALLTHROUGH;
1142  case AtomicExpr::AO__c11_atomic_fetch_and:
1143  case AtomicExpr::AO__opencl_atomic_fetch_and:
1144  case AtomicExpr::AO__atomic_fetch_and:
1145  LibCallName = "__atomic_fetch_and";
1146  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1147  MemTy, E->getExprLoc(), TInfo.Width);
1148  break;
1149  // T __atomic_or_fetch_N(T *mem, T val, int order)
1150  // T __atomic_fetch_or_N(T *mem, T val, int order)
1151  case AtomicExpr::AO__atomic_or_fetch:
1152  PostOp = llvm::Instruction::Or;
1153  LLVM_FALLTHROUGH;
1154  case AtomicExpr::AO__c11_atomic_fetch_or:
1155  case AtomicExpr::AO__opencl_atomic_fetch_or:
1156  case AtomicExpr::AO__atomic_fetch_or:
1157  LibCallName = "__atomic_fetch_or";
1158  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1159  MemTy, E->getExprLoc(), TInfo.Width);
1160  break;
1161  // T __atomic_sub_fetch_N(T *mem, T val, int order)
1162  // T __atomic_fetch_sub_N(T *mem, T val, int order)
1163  case AtomicExpr::AO__atomic_sub_fetch:
1164  PostOp = llvm::Instruction::Sub;
1165  LLVM_FALLTHROUGH;
1166  case AtomicExpr::AO__c11_atomic_fetch_sub:
1167  case AtomicExpr::AO__opencl_atomic_fetch_sub:
1168  case AtomicExpr::AO__atomic_fetch_sub:
1169  LibCallName = "__atomic_fetch_sub";
1170  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1171  LoweredMemTy, E->getExprLoc(), TInfo.Width);
1172  break;
1173  // T __atomic_xor_fetch_N(T *mem, T val, int order)
1174  // T __atomic_fetch_xor_N(T *mem, T val, int order)
1175  case AtomicExpr::AO__atomic_xor_fetch:
1176  PostOp = llvm::Instruction::Xor;
1177  LLVM_FALLTHROUGH;
1178  case AtomicExpr::AO__c11_atomic_fetch_xor:
1179  case AtomicExpr::AO__opencl_atomic_fetch_xor:
1180  case AtomicExpr::AO__atomic_fetch_xor:
1181  LibCallName = "__atomic_fetch_xor";
1182  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1183  MemTy, E->getExprLoc(), TInfo.Width);
1184  break;
1185  case AtomicExpr::AO__atomic_min_fetch:
1186  PostOpMinMax = true;
1187  LLVM_FALLTHROUGH;
1188  case AtomicExpr::AO__c11_atomic_fetch_min:
1189  case AtomicExpr::AO__atomic_fetch_min:
1190  case AtomicExpr::AO__opencl_atomic_fetch_min:
1191  LibCallName = E->getValueType()->isSignedIntegerType()
1192  ? "__atomic_fetch_min"
1193  : "__atomic_fetch_umin";
1194  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1195  LoweredMemTy, E->getExprLoc(), TInfo.Width);
1196  break;
1197  case AtomicExpr::AO__atomic_max_fetch:
1198  PostOpMinMax = true;
1199  LLVM_FALLTHROUGH;
1200  case AtomicExpr::AO__c11_atomic_fetch_max:
1201  case AtomicExpr::AO__atomic_fetch_max:
1202  case AtomicExpr::AO__opencl_atomic_fetch_max:
1203  LibCallName = E->getValueType()->isSignedIntegerType()
1204  ? "__atomic_fetch_max"
1205  : "__atomic_fetch_umax";
1206  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1207  LoweredMemTy, E->getExprLoc(), TInfo.Width);
1208  break;
1209  // T __atomic_nand_fetch_N(T *mem, T val, int order)
1210  // T __atomic_fetch_nand_N(T *mem, T val, int order)
1211  case AtomicExpr::AO__atomic_nand_fetch:
1212  PostOp = llvm::Instruction::And; // the NOT is special cased below
1213  LLVM_FALLTHROUGH;
1214  case AtomicExpr::AO__atomic_fetch_nand:
1215  LibCallName = "__atomic_fetch_nand";
1216  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1217  MemTy, E->getExprLoc(), TInfo.Width);
1218  break;
1219  }
1220 
1221  if (E->isOpenCL()) {
1222  LibCallName = std::string("__opencl") +
1223  StringRef(LibCallName).drop_front(1).str();
1224 
1225  }
1226  // Optimized functions have the size in their name.
1227  if (UseOptimizedLibcall)
1228  LibCallName += "_" + llvm::utostr(Size);
1229  // By default, assume we return a value of the atomic type.
1230  if (!HaveRetTy) {
1231  if (UseOptimizedLibcall) {
1232  // Value is returned directly.
1233  // The function returns an appropriately sized integer type.
1235  getContext().toBits(TInfo.Width), /*Signed=*/false);
1236  } else {
1237  // Value is returned through parameter before the order.
1238  RetTy = getContext().VoidTy;
1240  getContext().VoidPtrTy);
1241  }
1242  }
1243  // order is always the last parameter
1244  Args.add(RValue::get(Order),
1245  getContext().IntTy);
1246  if (E->isOpenCL())
1247  Args.add(RValue::get(Scope), getContext().IntTy);
1248 
1249  // PostOp is only needed for the atomic_*_fetch operations, and
1250  // thus is only needed for and implemented in the
1251  // UseOptimizedLibcall codepath.
1252  assert(UseOptimizedLibcall || (!PostOp && !PostOpMinMax));
1253 
1254  RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
1255  // The value is returned directly from the libcall.
1256  if (E->isCmpXChg())
1257  return Res;
1258 
1259  // The value is returned directly for optimized libcalls but the expr
1260  // provided an out-param.
1261  if (UseOptimizedLibcall && Res.getScalarVal()) {
1262  llvm::Value *ResVal = Res.getScalarVal();
1263  if (PostOpMinMax) {
1264  llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1265  ResVal = EmitPostAtomicMinMax(Builder, E->getOp(),
1267  ResVal, LoadVal1);
1268  } else if (PostOp) {
1269  llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1270  ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1271  }
1272  if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
1273  ResVal = Builder.CreateNot(ResVal);
1274 
1276  ResVal,
1277  Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
1278  }
1279 
1280  if (RValTy->isVoidType())
1281  return RValue::get(nullptr);
1282 
1283  return convertTempToRValue(
1284  Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1285  RValTy, E->getExprLoc());
1286  }
1287 
1288  bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1289  E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
1290  E->getOp() == AtomicExpr::AO__atomic_store ||
1291  E->getOp() == AtomicExpr::AO__atomic_store_n;
1292  bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1293  E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1294  E->getOp() == AtomicExpr::AO__atomic_load ||
1295  E->getOp() == AtomicExpr::AO__atomic_load_n;
1296 
1297  if (isa<llvm::ConstantInt>(Order)) {
1298  auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1299  // We should not ever get to a case where the ordering isn't a valid C ABI
1300  // value, but it's hard to enforce that in general.
1301  if (llvm::isValidAtomicOrderingCABI(ord))
1302  switch ((llvm::AtomicOrderingCABI)ord) {
1303  case llvm::AtomicOrderingCABI::relaxed:
1304  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1305  llvm::AtomicOrdering::Monotonic, Scope);
1306  break;
1307  case llvm::AtomicOrderingCABI::consume:
1308  case llvm::AtomicOrderingCABI::acquire:
1309  if (IsStore)
1310  break; // Avoid crashing on code with undefined behavior
1311  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1312  llvm::AtomicOrdering::Acquire, Scope);
1313  break;
1314  case llvm::AtomicOrderingCABI::release:
1315  if (IsLoad)
1316  break; // Avoid crashing on code with undefined behavior
1317  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1318  llvm::AtomicOrdering::Release, Scope);
1319  break;
1320  case llvm::AtomicOrderingCABI::acq_rel:
1321  if (IsLoad || IsStore)
1322  break; // Avoid crashing on code with undefined behavior
1323  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1324  llvm::AtomicOrdering::AcquireRelease, Scope);
1325  break;
1326  case llvm::AtomicOrderingCABI::seq_cst:
1327  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1328  llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1329  break;
1330  }
1331  if (RValTy->isVoidType())
1332  return RValue::get(nullptr);
1333 
1334  return convertTempToRValue(
1335  Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1336  Dest.getAddressSpace())),
1337  RValTy, E->getExprLoc());
1338  }
1339 
1340  // Long case, when Order isn't obviously constant.
1341 
1342  // Create all the relevant BB's
1343  llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1344  *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1345  *SeqCstBB = nullptr;
1346  MonotonicBB = createBasicBlock("monotonic", CurFn);
1347  if (!IsStore)
1348  AcquireBB = createBasicBlock("acquire", CurFn);
1349  if (!IsLoad)
1350  ReleaseBB = createBasicBlock("release", CurFn);
1351  if (!IsLoad && !IsStore)
1352  AcqRelBB = createBasicBlock("acqrel", CurFn);
1353  SeqCstBB = createBasicBlock("seqcst", CurFn);
1354  llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1355 
1356  // Create the switch for the split
1357  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1358  // doesn't matter unless someone is crazy enough to use something that
1359  // doesn't fold to a constant for the ordering.
1360  Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1361  llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1362 
1363  // Emit all the different atomics
1364  Builder.SetInsertPoint(MonotonicBB);
1365  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1366  llvm::AtomicOrdering::Monotonic, Scope);
1367  Builder.CreateBr(ContBB);
1368  if (!IsStore) {
1369  Builder.SetInsertPoint(AcquireBB);
1370  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1371  llvm::AtomicOrdering::Acquire, Scope);
1372  Builder.CreateBr(ContBB);
1373  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
1374  AcquireBB);
1375  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
1376  AcquireBB);
1377  }
1378  if (!IsLoad) {
1379  Builder.SetInsertPoint(ReleaseBB);
1380  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1381  llvm::AtomicOrdering::Release, Scope);
1382  Builder.CreateBr(ContBB);
1383  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
1384  ReleaseBB);
1385  }
1386  if (!IsLoad && !IsStore) {
1387  Builder.SetInsertPoint(AcqRelBB);
1388  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1389  llvm::AtomicOrdering::AcquireRelease, Scope);
1390  Builder.CreateBr(ContBB);
1391  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
1392  AcqRelBB);
1393  }
1394  Builder.SetInsertPoint(SeqCstBB);
1395  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1396  llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1397  Builder.CreateBr(ContBB);
1398  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
1399  SeqCstBB);
1400 
1401  // Cleanup and return
1402  Builder.SetInsertPoint(ContBB);
1403  if (RValTy->isVoidType())
1404  return RValue::get(nullptr);
1405 
1406  assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1407  return convertTempToRValue(
1408  Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1409  Dest.getAddressSpace())),
1410  RValTy, E->getExprLoc());
1411 }
1412 
1413 Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
1414  unsigned addrspace =
1415  cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
1416  llvm::IntegerType *ty =
1417  llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1418  return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
1419 }
1420 
1421 Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
1422  llvm::Type *Ty = Addr.getElementType();
1423  uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1424  if (SourceSizeInBits != AtomicSizeInBits) {
1425  Address Tmp = CreateTempAlloca();
1426  CGF.Builder.CreateMemCpy(Tmp, Addr,
1427  std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1428  Addr = Tmp;
1429  }
1430 
1431  return emitCastToAtomicIntPointer(Addr);
1432 }
1433 
1434 RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1435  AggValueSlot resultSlot,
1436  SourceLocation loc,
1437  bool asValue) const {
1438  if (LVal.isSimple()) {
1439  if (EvaluationKind == TEK_Aggregate)
1440  return resultSlot.asRValue();
1441 
1442  // Drill into the padding structure if we have one.
1443  if (hasPadding())
1444  addr = CGF.Builder.CreateStructGEP(addr, 0);
1445 
1446  // Otherwise, just convert the temporary to an r-value using the
1447  // normal conversion routine.
1448  return CGF.convertTempToRValue(addr, getValueType(), loc);
1449  }
1450  if (!asValue)
1451  // Get RValue from temp memory as atomic for non-simple lvalues
1452  return RValue::get(CGF.Builder.CreateLoad(addr));
1453  if (LVal.isBitField())
1454  return CGF.EmitLoadOfBitfieldLValue(
1455  LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1456  LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1457  if (LVal.isVectorElt())
1458  return CGF.EmitLoadOfLValue(
1459  LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1460  LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1461  assert(LVal.isExtVectorElt());
1463  addr, LVal.getExtVectorElts(), LVal.getType(),
1464  LVal.getBaseInfo(), TBAAAccessInfo()));
1465 }
1466 
1467 RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1468  AggValueSlot ResultSlot,
1469  SourceLocation Loc,
1470  bool AsValue) const {
1471  // Try not to in some easy cases.
1472  assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
1473  if (getEvaluationKind() == TEK_Scalar &&
1474  (((!LVal.isBitField() ||
1475  LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1476  !hasPadding()) ||
1477  !AsValue)) {
1478  auto *ValTy = AsValue
1479  ? CGF.ConvertTypeForMem(ValueTy)
1480  : getAtomicAddress().getType()->getPointerElementType();
1481  if (ValTy->isIntegerTy()) {
1482  assert(IntVal->getType() == ValTy && "Different integer types.");
1483  return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
1484  } else if (ValTy->isPointerTy())
1485  return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1486  else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1487  return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1488  }
1489 
1490  // Create a temporary. This needs to be big enough to hold the
1491  // atomic integer.
1492  Address Temp = Address::invalid();
1493  bool TempIsVolatile = false;
1494  if (AsValue && getEvaluationKind() == TEK_Aggregate) {
1495  assert(!ResultSlot.isIgnored());
1496  Temp = ResultSlot.getAddress();
1497  TempIsVolatile = ResultSlot.isVolatile();
1498  } else {
1499  Temp = CreateTempAlloca();
1500  }
1501 
1502  // Slam the integer into the temporary.
1503  Address CastTemp = emitCastToAtomicIntPointer(Temp);
1504  CGF.Builder.CreateStore(IntVal, CastTemp)
1505  ->setVolatile(TempIsVolatile);
1506 
1507  return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1508 }
1509 
1510 void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1511  llvm::AtomicOrdering AO, bool) {
1512  // void __atomic_load(size_t size, void *mem, void *return, int order);
1513  CallArgList Args;
1514  Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1515  Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1516  CGF.getContext().VoidPtrTy);
1517  Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1518  CGF.getContext().VoidPtrTy);
1519  Args.add(
1520  RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
1521  CGF.getContext().IntTy);
1522  emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1523 }
1524 
1525 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1526  bool IsVolatile) {
1527  // Okay, we're doing this natively.
1528  Address Addr = getAtomicAddressAsAtomicIntPointer();
1529  llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1530  Load->setAtomic(AO);
1531 
1532  // Other decoration.
1533  if (IsVolatile)
1534  Load->setVolatile(true);
1536  return Load;
1537 }
1538 
1539 /// An LValue is a candidate for having its loads and stores be made atomic if
1540 /// we are operating under /volatile:ms *and* the LValue itself is volatile and
1541 /// performing such an operation can be performed without a libcall.
1543  if (!CGM.getCodeGenOpts().MSVolatile) return false;
1544  AtomicInfo AI(*this, LV);
1545  bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1546  // An atomic is inline if we don't need to use a libcall.
1547  bool AtomicIsInline = !AI.shouldUseLibcall();
1548  // MSVC doesn't seem to do this for types wider than a pointer.
1549  if (getContext().getTypeSize(LV.getType()) >
1550  getContext().getTypeSize(getContext().getIntPtrType()))
1551  return false;
1552  return IsVolatile && AtomicIsInline;
1553 }
1554 
1556  AggValueSlot Slot) {
1557  llvm::AtomicOrdering AO;
1558  bool IsVolatile = LV.isVolatileQualified();
1559  if (LV.getType()->isAtomicType()) {
1560  AO = llvm::AtomicOrdering::SequentiallyConsistent;
1561  } else {
1562  AO = llvm::AtomicOrdering::Acquire;
1563  IsVolatile = true;
1564  }
1565  return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1566 }
1567 
1568 RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1569  bool AsValue, llvm::AtomicOrdering AO,
1570  bool IsVolatile) {
1571  // Check whether we should use a library call.
1572  if (shouldUseLibcall()) {
1573  Address TempAddr = Address::invalid();
1574  if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1575  assert(getEvaluationKind() == TEK_Aggregate);
1576  TempAddr = ResultSlot.getAddress();
1577  } else
1578  TempAddr = CreateTempAlloca();
1579 
1580  EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
1581 
1582  // Okay, turn that back into the original value or whole atomic (for
1583  // non-simple lvalues) type.
1584  return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1585  }
1586 
1587  // Okay, we're doing this natively.
1588  auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1589 
1590  // If we're ignoring an aggregate return, don't do anything.
1591  if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1592  return RValue::getAggregate(Address::invalid(), false);
1593 
1594  // Okay, turn that back into the original value or atomic (for non-simple
1595  // lvalues) type.
1596  return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1597 }
1598 
1599 /// Emit a load from an l-value of atomic type. Note that the r-value
1600 /// we produce is an r-value of the atomic *value* type.
1602  llvm::AtomicOrdering AO, bool IsVolatile,
1603  AggValueSlot resultSlot) {
1604  AtomicInfo Atomics(*this, src);
1605  return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1606  IsVolatile);
1607 }
1608 
1609 /// Copy an r-value into memory as part of storing to an atomic type.
1610 /// This needs to create a bit-pattern suitable for atomic operations.
1611 void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1612  assert(LVal.isSimple());
1613  // If we have an r-value, the rvalue should be of the atomic type,
1614  // which means that the caller is responsible for having zeroed
1615  // any padding. Just do an aggregate copy of that type.
1616  if (rvalue.isAggregate()) {
1617  LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
1618  LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
1619  getAtomicType());
1620  bool IsVolatile = rvalue.isVolatileQualified() ||
1621  LVal.isVolatileQualified();
1622  CGF.EmitAggregateCopy(Dest, Src, getAtomicType(),
1623  AggValueSlot::DoesNotOverlap, IsVolatile);
1624  return;
1625  }
1626 
1627  // Okay, otherwise we're copying stuff.
1628 
1629  // Zero out the buffer if necessary.
1630  emitMemSetZeroIfNecessary();
1631 
1632  // Drill past the padding if present.
1633  LValue TempLVal = projectValue();
1634 
1635  // Okay, store the rvalue in.
1636  if (rvalue.isScalar()) {
1637  CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1638  } else {
1639  CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1640  }
1641 }
1642 
1643 
1644 /// Materialize an r-value into memory for the purposes of storing it
1645 /// to an atomic type.
1646 Address AtomicInfo::materializeRValue(RValue rvalue) const {
1647  // Aggregate r-values are already in memory, and EmitAtomicStore
1648  // requires them to be values of the atomic type.
1649  if (rvalue.isAggregate())
1650  return rvalue.getAggregateAddress();
1651 
1652  // Otherwise, make a temporary and materialize into it.
1653  LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1654  AtomicInfo Atomics(CGF, TempLV);
1655  Atomics.emitCopyIntoMemory(rvalue);
1656  return TempLV.getAddress(CGF);
1657 }
1658 
1659 llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1660  // If we've got a scalar value of the right size, try to avoid going
1661  // through memory.
1662  if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
1663  llvm::Value *Value = RVal.getScalarVal();
1664  if (isa<llvm::IntegerType>(Value->getType()))
1665  return CGF.EmitToMemory(Value, ValueTy);
1666  else {
1667  llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1668  CGF.getLLVMContext(),
1669  LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1670  if (isa<llvm::PointerType>(Value->getType()))
1671  return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1672  else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1673  return CGF.Builder.CreateBitCast(Value, InputIntTy);
1674  }
1675  }
1676  // Otherwise, we need to go through memory.
1677  // Put the r-value in memory.
1678  Address Addr = materializeRValue(RVal);
1679 
1680  // Cast the temporary to the atomic int type and pull a value out.
1681  Addr = emitCastToAtomicIntPointer(Addr);
1682  return CGF.Builder.CreateLoad(Addr);
1683 }
1684 
1685 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1686  llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1687  llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1688  // Do the atomic store.
1689  Address Addr = getAtomicAddressAsAtomicIntPointer();
1690  auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
1691  ExpectedVal, DesiredVal,
1692  Success, Failure);
1693  // Other decoration.
1694  Inst->setVolatile(LVal.isVolatileQualified());
1695  Inst->setWeak(IsWeak);
1696 
1697  // Okay, turn that back into the original value type.
1698  auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1699  auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1700  return std::make_pair(PreviousVal, SuccessFailureVal);
1701 }
1702 
1703 llvm::Value *
1704 AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1705  llvm::Value *DesiredAddr,
1706  llvm::AtomicOrdering Success,
1707  llvm::AtomicOrdering Failure) {
1708  // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1709  // void *desired, int success, int failure);
1710  CallArgList Args;
1711  Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1712  Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1713  CGF.getContext().VoidPtrTy);
1714  Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1715  CGF.getContext().VoidPtrTy);
1716  Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1717  CGF.getContext().VoidPtrTy);
1718  Args.add(RValue::get(
1719  llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
1720  CGF.getContext().IntTy);
1721  Args.add(RValue::get(
1722  llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
1723  CGF.getContext().IntTy);
1724  auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1725  CGF.getContext().BoolTy, Args);
1726 
1727  return SuccessFailureRVal.getScalarVal();
1728 }
1729 
1730 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1731  RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1732  llvm::AtomicOrdering Failure, bool IsWeak) {
1733  // Check whether we should use a library call.
1734  if (shouldUseLibcall()) {
1735  // Produce a source address.
1736  Address ExpectedAddr = materializeRValue(Expected);
1737  Address DesiredAddr = materializeRValue(Desired);
1738  auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1739  DesiredAddr.getPointer(),
1740  Success, Failure);
1741  return std::make_pair(
1742  convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1743  SourceLocation(), /*AsValue=*/false),
1744  Res);
1745  }
1746 
1747  // If we've got a scalar value of the right size, try to avoid going
1748  // through memory.
1749  auto *ExpectedVal = convertRValueToInt(Expected);
1750  auto *DesiredVal = convertRValueToInt(Desired);
1751  auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1752  Failure, IsWeak);
1753  return std::make_pair(
1754  ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1755  SourceLocation(), /*AsValue=*/false),
1756  Res.second);
1757 }
1758 
1759 static void
1760 EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1761  const llvm::function_ref<RValue(RValue)> &UpdateOp,
1762  Address DesiredAddr) {
1763  RValue UpRVal;
1764  LValue AtomicLVal = Atomics.getAtomicLValue();
1765  LValue DesiredLVal;
1766  if (AtomicLVal.isSimple()) {
1767  UpRVal = OldRVal;
1768  DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1769  } else {
1770  // Build new lvalue for temp address.
1771  Address Ptr = Atomics.materializeRValue(OldRVal);
1772  LValue UpdateLVal;
1773  if (AtomicLVal.isBitField()) {
1774  UpdateLVal =
1775  LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1776  AtomicLVal.getType(),
1777  AtomicLVal.getBaseInfo(),
1778  AtomicLVal.getTBAAInfo());
1779  DesiredLVal =
1780  LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1781  AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1782  AtomicLVal.getTBAAInfo());
1783  } else if (AtomicLVal.isVectorElt()) {
1784  UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1785  AtomicLVal.getType(),
1786  AtomicLVal.getBaseInfo(),
1787  AtomicLVal.getTBAAInfo());
1788  DesiredLVal = LValue::MakeVectorElt(
1789  DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1790  AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1791  } else {
1792  assert(AtomicLVal.isExtVectorElt());
1793  UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1794  AtomicLVal.getType(),
1795  AtomicLVal.getBaseInfo(),
1796  AtomicLVal.getTBAAInfo());
1797  DesiredLVal = LValue::MakeExtVectorElt(
1798  DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1799  AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1800  }
1801  UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1802  }
1803  // Store new value in the corresponding memory area.
1804  RValue NewRVal = UpdateOp(UpRVal);
1805  if (NewRVal.isScalar()) {
1806  CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1807  } else {
1808  assert(NewRVal.isComplex());
1809  CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1810  /*isInit=*/false);
1811  }
1812 }
1813 
1814 void AtomicInfo::EmitAtomicUpdateLibcall(
1815  llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1816  bool IsVolatile) {
1817  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1818 
1819  Address ExpectedAddr = CreateTempAlloca();
1820 
1821  EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1822  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1823  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1824  CGF.EmitBlock(ContBB);
1825  Address DesiredAddr = CreateTempAlloca();
1826  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1827  requiresMemSetZero(getAtomicAddress().getElementType())) {
1828  auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1829  CGF.Builder.CreateStore(OldVal, DesiredAddr);
1830  }
1831  auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1833  SourceLocation(), /*AsValue=*/false);
1834  EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1835  auto *Res =
1836  EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1837  DesiredAddr.getPointer(),
1838  AO, Failure);
1839  CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1840  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1841 }
1842 
1843 void AtomicInfo::EmitAtomicUpdateOp(
1844  llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1845  bool IsVolatile) {
1846  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1847 
1848  // Do the atomic load.
1849  auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
1850  // For non-simple lvalues perform compare-and-swap procedure.
1851  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1852  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1853  auto *CurBB = CGF.Builder.GetInsertBlock();
1854  CGF.EmitBlock(ContBB);
1855  llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1856  /*NumReservedValues=*/2);
1857  PHI->addIncoming(OldVal, CurBB);
1858  Address NewAtomicAddr = CreateTempAlloca();
1859  Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1860  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1861  requiresMemSetZero(getAtomicAddress().getElementType())) {
1862  CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1863  }
1864  auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
1865  SourceLocation(), /*AsValue=*/false);
1866  EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1867  auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1868  // Try to write new value using cmpxchg operation.
1869  auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1870  PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1871  CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1872  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1873 }
1874 
1875 static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1876  RValue UpdateRVal, Address DesiredAddr) {
1877  LValue AtomicLVal = Atomics.getAtomicLValue();
1878  LValue DesiredLVal;
1879  // Build new lvalue for temp address.
1880  if (AtomicLVal.isBitField()) {
1881  DesiredLVal =
1882  LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1883  AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1884  AtomicLVal.getTBAAInfo());
1885  } else if (AtomicLVal.isVectorElt()) {
1886  DesiredLVal =
1887  LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1888  AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1889  AtomicLVal.getTBAAInfo());
1890  } else {
1891  assert(AtomicLVal.isExtVectorElt());
1892  DesiredLVal = LValue::MakeExtVectorElt(
1893  DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1894  AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1895  }
1896  // Store new value in the corresponding memory area.
1897  assert(UpdateRVal.isScalar());
1898  CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1899 }
1900 
1901 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1902  RValue UpdateRVal, bool IsVolatile) {
1903  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1904 
1905  Address ExpectedAddr = CreateTempAlloca();
1906 
1907  EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1908  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1909  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1910  CGF.EmitBlock(ContBB);
1911  Address DesiredAddr = CreateTempAlloca();
1912  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1913  requiresMemSetZero(getAtomicAddress().getElementType())) {
1914  auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1915  CGF.Builder.CreateStore(OldVal, DesiredAddr);
1916  }
1917  EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1918  auto *Res =
1919  EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1920  DesiredAddr.getPointer(),
1921  AO, Failure);
1922  CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1923  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1924 }
1925 
1926 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1927  bool IsVolatile) {
1928  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1929 
1930  // Do the atomic load.
1931  auto *OldVal = EmitAtomicLoadOp(Failure, IsVolatile);
1932  // For non-simple lvalues perform compare-and-swap procedure.
1933  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1934  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1935  auto *CurBB = CGF.Builder.GetInsertBlock();
1936  CGF.EmitBlock(ContBB);
1937  llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1938  /*NumReservedValues=*/2);
1939  PHI->addIncoming(OldVal, CurBB);
1940  Address NewAtomicAddr = CreateTempAlloca();
1941  Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1942  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1943  requiresMemSetZero(getAtomicAddress().getElementType())) {
1944  CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1945  }
1946  EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
1947  auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1948  // Try to write new value using cmpxchg operation.
1949  auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1950  PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1951  CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1952  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1953 }
1954 
1955 void AtomicInfo::EmitAtomicUpdate(
1956  llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1957  bool IsVolatile) {
1958  if (shouldUseLibcall()) {
1959  EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1960  } else {
1961  EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1962  }
1963 }
1964 
1965 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1966  bool IsVolatile) {
1967  if (shouldUseLibcall()) {
1968  EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1969  } else {
1970  EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1971  }
1972 }
1973 
1975  bool isInit) {
1976  bool IsVolatile = lvalue.isVolatileQualified();
1977  llvm::AtomicOrdering AO;
1978  if (lvalue.getType()->isAtomicType()) {
1979  AO = llvm::AtomicOrdering::SequentiallyConsistent;
1980  } else {
1981  AO = llvm::AtomicOrdering::Release;
1982  IsVolatile = true;
1983  }
1984  return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1985 }
1986 
1987 /// Emit a store to an l-value of atomic type.
1988 ///
1989 /// Note that the r-value is expected to be an r-value *of the atomic
1990 /// type*; this means that for aggregate r-values, it should include
1991 /// storage for any padding that was necessary.
1993  llvm::AtomicOrdering AO, bool IsVolatile,
1994  bool isInit) {
1995  // If this is an aggregate r-value, it should agree in type except
1996  // maybe for address-space qualification.
1997  assert(!rvalue.isAggregate() ||
1998  rvalue.getAggregateAddress().getElementType() ==
1999  dest.getAddress(*this).getElementType());
2000 
2001  AtomicInfo atomics(*this, dest);
2002  LValue LVal = atomics.getAtomicLValue();
2003 
2004  // If this is an initialization, just put the value there normally.
2005  if (LVal.isSimple()) {
2006  if (isInit) {
2007  atomics.emitCopyIntoMemory(rvalue);
2008  return;
2009  }
2010 
2011  // Check whether we should use a library call.
2012  if (atomics.shouldUseLibcall()) {
2013  // Produce a source address.
2014  Address srcAddr = atomics.materializeRValue(rvalue);
2015 
2016  // void __atomic_store(size_t size, void *mem, void *val, int order)
2017  CallArgList args;
2018  args.add(RValue::get(atomics.getAtomicSizeValue()),
2019  getContext().getSizeType());
2020  args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
2021  getContext().VoidPtrTy);
2022  args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
2023  getContext().VoidPtrTy);
2024  args.add(
2025  RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
2026  getContext().IntTy);
2027  emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
2028  return;
2029  }
2030 
2031  // Okay, we're doing this natively.
2032  llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
2033 
2034  // Do the atomic store.
2035  Address addr =
2036  atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
2037  intValue = Builder.CreateIntCast(
2038  intValue, addr.getElementType(), /*isSigned=*/false);
2039  llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
2040 
2041  if (AO == llvm::AtomicOrdering::Acquire)
2042  AO = llvm::AtomicOrdering::Monotonic;
2043  else if (AO == llvm::AtomicOrdering::AcquireRelease)
2044  AO = llvm::AtomicOrdering::Release;
2045  // Initializations don't need to be atomic.
2046  if (!isInit)
2047  store->setAtomic(AO);
2048 
2049  // Other decoration.
2050  if (IsVolatile)
2051  store->setVolatile(true);
2053  return;
2054  }
2055 
2056  // Emit simple atomic update operation.
2057  atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
2058 }
2059 
2060 /// Emit a compare-and-exchange op for atomic type.
2061 ///
2062 std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
2063  LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
2064  llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
2065  AggValueSlot Slot) {
2066  // If this is an aggregate r-value, it should agree in type except
2067  // maybe for address-space qualification.
2068  assert(!Expected.isAggregate() ||
2069  Expected.getAggregateAddress().getElementType() ==
2070  Obj.getAddress(*this).getElementType());
2071  assert(!Desired.isAggregate() ||
2072  Desired.getAggregateAddress().getElementType() ==
2073  Obj.getAddress(*this).getElementType());
2074  AtomicInfo Atomics(*this, Obj);
2075 
2076  return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
2077  IsWeak);
2078 }
2079 
2081  LValue LVal, llvm::AtomicOrdering AO,
2082  const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
2083  AtomicInfo Atomics(*this, LVal);
2084  Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2085 }
2086 
2088  AtomicInfo atomics(*this, dest);
2089 
2090  switch (atomics.getEvaluationKind()) {
2091  case TEK_Scalar: {
2092  llvm::Value *value = EmitScalarExpr(init);
2093  atomics.emitCopyIntoMemory(RValue::get(value));
2094  return;
2095  }
2096 
2097  case TEK_Complex: {
2098  ComplexPairTy value = EmitComplexExpr(init);
2099  atomics.emitCopyIntoMemory(RValue::getComplex(value));
2100  return;
2101  }
2102 
2103  case TEK_Aggregate: {
2104  // Fix up the destination if the initializer isn't an expression
2105  // of atomic type.
2106  bool Zeroed = false;
2107  if (!init->getType()->isAtomicType()) {
2108  Zeroed = atomics.emitMemSetZeroIfNecessary();
2109  dest = atomics.projectValue();
2110  }
2111 
2112  // Evaluate the expression directly into the destination.
2114  dest, *this, AggValueSlot::IsNotDestructed,
2118 
2119  EmitAggExpr(init, slot);
2120  return;
2121  }
2122  }
2123  llvm_unreachable("bad evaluation kind");
2124 }
clang::CodeGen::CodeGenFunction::ConvertTypeForMem
llvm::Type * ConvertTypeForMem(QualType T)
Definition: CodeGenFunction.cpp:207
clang::ASTContext::getTypeSizeInChars
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
Definition: ASTContext.cpp:2450
clang::AtomicExpr::getPtr
Expr * getPtr() const
Definition: Expr.h:6261
clang::AtomicExpr::getVal2
Expr * getVal2() const
Definition: Expr.h:6281
clang::CodeGen::CGBuilderTy::CreateMemCpy
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:286
clang::AtomicExpr
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition: Expr.h:6229
clang::CodeGen::CodeGenTypeCache::SizeTy
llvm::IntegerType * SizeTy
Definition: CodeGenTypeCache.h:50
clang::CodeGen::CodeGenFunction::getTypeSize
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
Definition: CGStmtOpenMP.cpp:303
clang::CodeGen::CodeGenFunction::EmitAtomicCompareExchange
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
Definition: CGAtomic.cpp:2062
clang::interp::Add
bool Add(InterpState &S, CodePtr OpPC)
Definition: Interp.h:134
clang::CharUnits::getAsAlign
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition: CharUnits.h:183
clang::interp::APInt
llvm::APInt APInt
Definition: Integral.h:27
clang::CodeGen::CodeGenTypeCache::IntTy
llvm::IntegerType * IntTy
int
Definition: CodeGenTypeCache.h:42
EmitPostAtomicMinMax
static llvm::Value * EmitPostAtomicMinMax(CGBuilderTy &Builder, AtomicExpr::AtomicOp Op, bool IsSigned, llvm::Value *OldVal, llvm::Value *RHS)
Duplicate the atomic min/max operation in conventional IR for the builtin variants that return the ne...
Definition: CGAtomic.cpp:492
clang::CodeGen::TEK_Aggregate
@ TEK_Aggregate
Definition: CodeGenFunction.h:115
clang::CodeGen::RValue
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CGValue.h:39
clang::CodeGen::CodeGenModule::CreateRuntimeFunction
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
Definition: CodeGenModule.cpp:3792
type
clang::CodeGen::RValue::getAggregate
static RValue getAggregate(Address addr, bool isVolatile=false)
Definition: CGValue.h:107
CodeGenFunction.h
string
string(SUBSTRING ${CMAKE_CURRENT_BINARY_DIR} 0 ${PATH_LIB_START} PATH_HEAD) string(SUBSTRING $
Definition: CMakeLists.txt:22
clang::CodeGen::CodeGenFunction::EmitCall
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke, bool IsMustTail, SourceLocation Loc)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition: CGCall.cpp:4616
clang::AtomicExpr::getValueType
QualType getValueType() const
Definition: Expr.cpp:4743
clang::CodeGen::AggValueSlot::DoesNotNeedGCBarriers
@ DoesNotNeedGCBarriers
Definition: CGValue.h:528
clang::CodeGen::Address::getAlignment
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:66
clang::CodeGen::LValue::MakeBitfield
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
Definition: CGValue.h:432
clang::ASTContext::VoidTy
CanQualType VoidTy
Definition: ASTContext.h:1075
clang::AtomicExpr::getOrder
Expr * getOrder() const
Definition: Expr.h:6264
clang::SourceLocation
Encodes a location in the source.
Definition: SourceLocation.h:88
clang::QualType::getQualifiers
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:6451
clang::AtomicExpr::getOp
AtomicOp getOp() const
Definition: Expr.h:6293
clang::CodeGen::LValue::getAddress
Address getAddress(CodeGenFunction &CGF) const
Definition: CGValue.h:329
clang::CodeGen::CodeGenFunction::EmitAtomicStore
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
Definition: CGAtomic.cpp:1974
clang::CodeGen::CodeGenModule::getSize
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
Definition: CodeGenModule.cpp:981
clang::CodeGen::TargetCodeGenInfo::getLLVMSyncScopeID
virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const
Get the syncscope used in LLVM IR.
Definition: TargetInfo.cpp:519
CGRecordLayout.h
clang::CodeGen::LValue::getVectorIdx
llvm::Value * getVectorIdx() const
Definition: CGValue.h:346
clang::QualType
A (possibly-)qualified type.
Definition: Type.h:673
clang::CodeGen::CodeGenFunction::EmitLoadOfExtVectorElementLValue
RValue EmitLoadOfExtVectorElementLValue(LValue V)
Definition: CGExpr.cpp:1982
AttributeLangSupport::C
@ C
Definition: SemaDeclAttr.cpp:54
clang::CodeGen::Address::isValid
bool isValid() const
Definition: Address.h:35
clang::Type::isFloatingType
bool isFloatingType() const
Definition: Type.cpp:2104
clang::DiagnosticsEngine
Concrete class used by the front-end to report problems and issues.
Definition: Diagnostic.h:191
int
__device__ int
Definition: __clang_hip_libdevice_declares.h:63
clang::TypeInfo::Width
uint64_t Width
Definition: ASTContext.h:183
clang::CodeGen::CodeGenFunction::createBasicBlock
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
Definition: CodeGenFunction.h:2394
clang::CodeGen::CGBuilderTy::CreateStore
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:95
clang::AtomicExpr::getScopeModel
static std::unique_ptr< AtomicScopeModel > getScopeModel(AtomicOp Op)
Get atomic scope model for the atomic op code.
Definition: Expr.h:6340
TargetInfo.h
clang::CodeGen::CodeGenModule::getLangOpts
const LangOptions & getLangOpts() const
Definition: CodeGenModule.h:703
clang::Type::isVoidType
bool isVoidType() const
Definition: Type.h:6955
llvm::Expected
Definition: LLVM.h:41
clang::CodeGen::CodeGenFunction::EmitStoreOfScalar
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
Definition: CodeGenFunction.h:3802
clang::CodeGen::CGBitFieldInfo::Size
unsigned Size
The total size of the bit-field, in bits.
Definition: CGRecordLayout.h:71
clang::AtomicExpr::isOpenCL
bool isOpenCL() const
Definition: Expr.h:6314
clang::ento::Store
const void * Store
Store - This opaque type encapsulates an immutable mapping from locations to values.
Definition: StoreRef.h:27
clang::CodeGen::CGBuilderTy
Definition: CGBuilder.h:43
clang::CodeGen::CGBuilderTy::CreateStructGEP
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:188
clang::ASTContext::toBits
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
Definition: ASTContext.cpp:2444
clang::AtomicExpr::getBeginLoc
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.h:6322
clang::AtomicExpr::getOrderFail
Expr * getOrderFail() const
Definition: Expr.h:6277
clang::ArrayType::Normal
@ Normal
Definition: Type.h:2890
clang::CodeGen::CallArgList::add
void add(RValue rvalue, QualType type)
Definition: CGCall.h:288
clang::CodeGen::CodeGenFunction::EmitStoreThroughLValue
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:2053
clang::CodeGen::CodeGenFunction::Builder
CGBuilderTy Builder
Definition: CodeGenFunction.h:274
clang::CodeGen::CGBuilderTy::CreateBitCast
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:151
clang::CodeGen::CodeGenTypeCache::VoidPtrTy
llvm::PointerType * VoidPtrTy
Definition: CodeGenTypeCache.h:56
Offset
unsigned Offset
Definition: Format.cpp:2335
clang::TargetInfo::getMaxAtomicInlineWidth
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
Definition: TargetInfo.h:717
clang::CodeGen::TypeEvaluationKind
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
Definition: CodeGenFunction.h:112
clang::ASTContext::getTypeAlignInChars
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
Definition: ASTContext.cpp:2459
clang::CodeGen::CodeGenFunction::EmitLoadOfScalar
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
Definition: CodeGenFunction.h:3780
clang::CodeGen::LValue::MakeAddr
static LValue MakeAddr(Address address, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:389
clang::CodeGen::CodeGenFunction::ComplexPairTy
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Definition: CodeGenFunction.h:272
clang::CodeGen::ReturnValueSlot
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition: CGCall.h:362
clang::CodeGen::AggValueSlot::IsZeroed
@ IsZeroed
Definition: CGValue.h:526
CGCall.h
clang::TypeInfo
Definition: ASTContext.h:182
clang::getAsString
llvm::StringRef getAsString(SyncScope S)
Definition: SyncScope.h:50
V
#define V(N, I)
Definition: ASTContext.h:3121
clang::CodeGen::CGBitFieldInfo::Offset
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
Definition: CGRecordLayout.h:68
clang::ASTContext::getExtVectorType
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size.
Definition: ASTContext.cpp:4040
min
__DEVICE__ int min(int __a, int __b)
Definition: __clang_cuda_math.h:197
emitAtomicLibcall
static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)
Definition: CGAtomic.cpp:303
clang::CodeGen::TBAAAccessInfo
Definition: CodeGenTBAA.h:42
clang::CodeGen::AggValueSlot::asRValue
RValue asRValue() const
Definition: CGValue.h:639
clang::CodeGen::LValue::MakeVectorElt
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:402
clang::CodeGen::AggValueSlot::ignored
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored.
Definition: CGValue.h:533
clang::CodeGen::LValue::isVolatileQualified
bool isVolatileQualified() const
Definition: CGValue.h:260
clang::Scope
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:40
clang::CodeGen::CodeGenFunction::EmitCastToVoidPtr
llvm::Value * EmitCastToVoidPtr(llvm::Value *value)
Emit a cast to void* in the appropriate address space.
Definition: CGExpr.cpp:54
clang::CodeGen::CGBuilderTy::CreatePointerBitCastOrAddrSpaceCast
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:173
clang::CodeGen::LValue::isBitField
bool isBitField() const
Definition: CGValue.h:255
clang::VectorType
Represents a GCC generic vector type.
Definition: Type.h:3229
clang::CodeGen::CodeGenModule::getCodeGenOpts
const CodeGenOptions & getCodeGenOpts() const
Definition: CodeGenModule.h:708
clang::CodeGen::Address::getType
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:43
clang::CodeGen::LValue::getBitFieldInfo
const CGBitFieldInfo & getBitFieldInfo() const
Definition: CGValue.h:381
clang::ASTContext
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:212
clang::ASTContext::getSizeType
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
Definition: ASTContext.cpp:5712
CGFunctionInfo.h
clang::CodeGen::CodeGenFunction::EmitAtomicUpdate
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
Definition: CGAtomic.cpp:2080
clang::CodeGen::RValue::isAggregate
bool isAggregate() const
Definition: CGValue.h:54
clang::CodeGen::TEK_Complex
@ TEK_Complex
Definition: CodeGenFunction.h:114
clang::CodeGen::LValue::MakeExtVectorElt
static LValue MakeExtVectorElt(Address vecAddress, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:414
clang::Type::getAs
const T * getAs() const
Member-template getAs<specific type>'.
Definition: Type.h:7161
clang::CodeGen::RValue::isVolatileQualified
bool isVolatileQualified() const
Definition: CGValue.h:56
emitAtomicCmpXchgFailureSet
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *FailureOrderVal, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope)
Given an ordering required on success, emit all possible cmpxchg instructions to cope with the provid...
Definition: CGAtomic.cpp:415
clang::CodeGen::CodeGenFunction::getTarget
const TargetInfo & getTarget() const
Definition: CodeGenFunction.h:1989
clang::CodeGen::CodeGenFunction::EmitAtomicExpr
RValue EmitAtomicExpr(AtomicExpr *E)
Definition: CGAtomic.cpp:785
clang::CodeGen::CodeGenTypeCache::VoidTy
llvm::Type * VoidTy
void
Definition: CodeGenTypeCache.h:34
clang::CodeGen::LValue::isVolatile
bool isVolatile() const
Definition: CGValue.h:303
clang::CodeGen::LValue::getType
QualType getType() const
Definition: CGValue.h:266
clang::interp::Load
bool Load(InterpState &S, CodePtr OpPC)
Definition: Interp.h:618
clang::CodeGen::LValue::getBitFieldPointer
llvm::Value * getBitFieldPointer() const
Definition: CGValue.h:380
clang::CodeGen::CodeGenFunction::convertTempToRValue
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
Definition: CGExpr.cpp:5378
ASTContext.h
clang::ASTContext::getIntTypeForBitwidth
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth,...
Definition: ASTContext.cpp:11262
clang::Type::getPointeeType
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:625
clang::CodeGen::CodeGenModule::getTypes
CodeGenTypes & getTypes()
Definition: CodeGenModule.h:726
clang::CodeGen::CodeGenFunction::EmitFromMemory
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition: CGExpr.cpp:1766
clang::CodeGen::CodeGenFunction::EmitAtomicLoad
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
Definition: CGAtomic.cpp:1555
emitAtomicCmpXchg
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::AtomicOrdering FailureOrder, llvm::SyncScope::ID Scope)
Definition: CGAtomic.cpp:365
clang::CodeGen::CodeGenFunction::getLLVMContext
llvm::LLVMContext & getLLVMContext()
Definition: CodeGenFunction.h:1990
clang::CodeGen::CGBitFieldInfo::StorageOffset
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
Definition: CGRecordLayout.h:81
clang::Type::isAtomicType
bool isAtomicType() const
Definition: Type.h:6807
clang::CodeGen::CodeGenFunction::getContext
ASTContext & getContext() const
Definition: CodeGenFunction.h:1947
clang::CodeGen::Address
An aligned address.
Definition: Address.h:24
CodeGenModule.h
clang::CodeGen::RValue::isComplex
bool isComplex() const
Definition: CGValue.h:53
clang::CodeGen::CodeGenFunction::getEvaluationKind
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
Definition: CodeGenFunction.cpp:215
clang::AtomicExpr::AtomicOp
AtomicOp
Definition: Expr.h:6231
clang::CodeGen::LValue::getBaseInfo
LValueBaseInfo getBaseInfo() const
Definition: CGValue.h:321
clang::CodeGen::Address::getAddressSpace
unsigned getAddressSpace() const
Return the address space that this address resides in.
Definition: Address.h:56
clang::CodeGen::CodeGenFunction::EmitAtomicInit
void EmitAtomicInit(Expr *E, LValue lvalue)
Definition: CGAtomic.cpp:2087
clang::CodeGen::CodeGenFunction::EmitAnyExprToMem
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
Definition: CGExpr.cpp:231
clang::CodeGen::CGCallee::forDirect
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition: CGCall.h:135
clang::CodeGen::LValue::getTBAAInfo
TBAAAccessInfo getTBAAInfo() const
Definition: CGValue.h:310
clang::QualType::getUnqualifiedType
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition: Type.h:6504
clang::CodeGen::CodeGenFunction::EmitLoadOfBitfieldLValue
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
Definition: CGExpr.cpp:1945
clang::ASTContext::getTargetAddressSpace
unsigned getTargetAddressSpace(QualType T) const
Definition: ASTContext.h:2720
clang::CodeGen::LValue::setAlignment
void setAlignment(CharUnits A)
Definition: CGValue.h:319
clang::CodeGen::CodeGenFunction::LValueIsSuitableForInlineAtomic
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
Definition: CGAtomic.cpp:1542
clang::CodeGen::CGBitFieldInfo
Structure with information about how a bitfield should be accessed.
Definition: CGRecordLayout.h:65
clang::CodeGen::CodeGenFunction::EmitToMemory
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition: CGExpr.cpp:1752
clang::CodeGen::CodeGenTypes::GetFunctionType
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1595
clang::CodeGen::CodeGenModule::getDataLayout
const llvm::DataLayout & getDataLayout() const
Definition: CodeGenModule.h:711
clang::Type::castAs
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:7226
EmitAtomicOp
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, llvm::AtomicOrdering Order, llvm::SyncScope::ID Scope)
Definition: CGAtomic.cpp:512
clang::prec::And
@ And
Definition: OperatorPrecedence.h:35
clang::CodeGen::LValue
LValue - This represents an lvalue references.
Definition: CGValue.h:167
clang::CodeGen::LValue::isVectorElt
bool isVectorElt() const
Definition: CGValue.h:254
clang::CodeGen::LValue::isSimple
bool isSimple() const
Definition: CGValue.h:253
clang::CodeGen::CodeGenFunction::getTargetHooks
const TargetCodeGenInfo & getTargetHooks() const
Definition: CodeGenFunction.h:1991
EmitValToTemp
static Address EmitValToTemp(CodeGenFunction &CGF, Expr *E)
Definition: CGAtomic.cpp:695
clang::ASTContext::VoidPtrTy
CanQualType VoidPtrTy
Definition: ASTContext.h:1102
clang::Type::isPointerType
bool isPointerType() const
Definition: Type.h:6672
clang::AtomicExpr::isCmpXChg
bool isCmpXChg() const
Definition: Expr.h:6305
clang::syntax::NodeRole::Size
@ Size
clang::AtomicExpr::isVolatile
bool isVolatile() const
Definition: Expr.h:6301
clang::CodeGen::Address::getPointer
llvm::Value * getPointer() const
Definition: Address.h:37
clang::CodeGen::CodeGenFunction
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
Definition: CodeGenFunction.h:235
clang::ASTContext::IntTy
CanQualType IntTy
Definition: ASTContext.h:1084
clang::CharUnits::isZero
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:116
clang::CodeGen::AggValueSlot::IsNotAliased
@ IsNotAliased
Definition: CGValue.h:524
isFullSizeType
static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
Definition: CGAtomic.cpp:323
clang::Expr::getExprLoc
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:229
clang::CodeGen::CodeGenModule
This class organizes the cross-function state that is used while generating LLVM code.
Definition: CodeGenModule.h:284
clang::ASTContext::getIntPtrType
QualType getIntPtrType() const
Return a type compatible with "intptr_t" (C99 7.18.1.4), as defined by the target.
Definition: ASTContext.cpp:5746
clang::QualType::isNull
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition: Type.h:738
clang::CodeGen::LValue::getExtVectorPointer
llvm::Value * getExtVectorPointer() const
Definition: CGValue.h:367
clang::CodeGen::CodeGenFunction::EmitComplexExpr
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
Definition: CGExprComplex.cpp:1120
Value
Value
Definition: UninitializedValues.cpp:102
clang::CodeGen::CodeGenFunction::EmitAggExpr
void EmitAggExpr(const Expr *E, AggValueSlot AS)
EmitAggExpr - Emit the computation of the specified expression of aggregate type.
Definition: CGExprAgg.cpp:1989
clang::CodeGen::Address::invalid
static Address invalid()
Definition: Address.h:34
clang::CodeGen::AggValueSlot::forLValue
static AggValueSlot forLValue(const LValue &LV, CodeGenFunction &CGF, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition: CGValue.h:575
clang::CodeGen::LValue::getExtVectorElts
llvm::Constant * getExtVectorElts() const
Definition: CGValue.h:371
clang::AtomicExpr::getScope
Expr * getScope() const
Definition: Expr.h:6267
clang::CodeGen::AggValueSlot::isVolatile
bool isVolatile() const
Definition: CGValue.h:592
AddDirectArgument
static void AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args, bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy, SourceLocation Loc, CharUnits SizeInChars)
Definition: CGAtomic.cpp:761
clang::CodeGen::AggValueSlot
An aggregate value slot.
Definition: CGValue.h:471
clang::Type::isSignedIntegerType
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition: Type.cpp:2013
clang::CodeGen::CGBitFieldInfo::StorageSize
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
Definition: CGRecordLayout.h:78
clang::CodeGen::RValue::getComplexVal
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:66
clang::CodeGen::Address::getElementType
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:51
clang::CodeGen::CodeGenFunction::MakeAddrLValue
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
Definition: CodeGenFunction.h:2456
clang::CodeGen::CodeGenModule::DecorateInstructionWithTBAA
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
Definition: CodeGenModule.cpp:945
clang::CodeGen::LValue::getPointer
llvm::Value * getPointer(CodeGenFunction &CGF) const
Definition: CGValue.h:325
clang::CodeGen::CodeGenFunction::EmitScalarExpr
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
Definition: CGExprScalar.cpp:4848
clang::CodeGen::CGFunctionInfo
CGFunctionInfo - Class to encapsulate the information about a function definition.
Definition: CGFunctionInfo.h:546
clang::CodeGen::CodeGenFunction::EmitLoadOfLValue
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition: CGExpr.cpp:1883
clang::CodeGen::AggValueSlot::isIgnored
bool isIgnored() const
Definition: CGValue.h:619
clang::CodeGen::RValue::getComplex
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:93
clang::PointerType
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2640
clang::CodeGen::CodeGenFunction::EmitStoreOfComplex
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
Definition: CGExprComplex.cpp:1139
clang::CodeGen::CodeGenFunction::CGM
CodeGenModule & CGM
Definition: CodeGenFunction.h:266
clang::CodeGen::TEK_Scalar
@ TEK_Scalar
Definition: CodeGenFunction.h:113
clang::CodeGen::CodeGenFunction::CurFn
llvm::Function * CurFn
Definition: CodeGenFunction.h:330
clang::Builtin::ID
ID
Definition: Builtins.h:48
EmitAtomicUpdateValue
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, Address DesiredAddr)
Definition: CGAtomic.cpp:1760
clang
Definition: CalledOnceCheck.h:17
clang::TypeInfo::Align
unsigned Align
Definition: ASTContext.h:184
clang::CodeGen::CGBuilderTy::CreateAtomicCmpXchg
llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(llvm::Value *Ptr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition: CGBuilder.h:131
clang::CodeGen::RValue::get
static RValue get(llvm::Value *V)
Definition: CGValue.h:86
clang::ASTContext::toCharUnitsFromBits
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
Definition: ASTContext.cpp:2439
clang::CodeGen::CodeGenFunction::EmitAggregateCopy
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
Definition: CGExprAgg.cpp:2052
clang::CodeGen::LValue::isGlobalReg
bool isGlobalReg() const
Definition: CGValue.h:257
FrontendDiagnostic.h
clang::CodeGen::AggValueSlot::getAddress
Address getAddress() const
Definition: CGValue.h:615
clang::CodeGen::LValue::getExtVectorAddress
Address getExtVectorAddress() const
Definition: CGValue.h:364
clang::Expr::getType
QualType getType() const
Definition: Expr.h:141
clang::CodeGen::RValue::getScalarVal
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:59
clang::CodeGen::LValue::getAlignment
CharUnits getAlignment() const
Definition: CGValue.h:318
clang::CodeGen::CodeGenFunction::hasVolatileMember
bool hasVolatileMember(QualType T)
hasVolatileMember - returns true if aggregate type has a volatile member.
Definition: CodeGenFunction.h:2631
clang::AtomicExpr::getVal1
Expr * getVal1() const
Definition: Expr.h:6271
clang::CodeGen::CodeGenModule::getDiags
DiagnosticsEngine & getDiags() const
Definition: CodeGenModule.h:710
clang::CodeGen::RValue::isScalar
bool isScalar() const
Definition: CGValue.h:52
clang::CodeGen::CGBuilderTy::CreateAtomicRMW
llvm::AtomicRMWInst * CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, llvm::Value *Ptr, llvm::Value *Val, llvm::AtomicOrdering Ordering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition: CGBuilder.h:143
clang::ASTContext::getPointerType
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
Definition: ASTContext.cpp:3249
clang::CodeGen::TargetCodeGenInfo::performAddrSpaceCast
virtual llvm::Value * performAddrSpaceCast(CodeGen::CodeGenFunction &CGF, llvm::Value *V, LangAS SrcAddr, LangAS DestAddr, llvm::Type *DestTy, bool IsNonNull=false) const
Perform address space cast of an expression of pointer type.
Definition: TargetInfo.cpp:497
clang::CharUnits
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
clang::ASTContext::getTypeInfoInChars
TypeInfoChars getTypeInfoInChars(const Type *T) const
Definition: ASTContext.cpp:1874
clang::AtomicType
Definition: Type.h:6242
true
#define true
Definition: stdbool.h:16
clang::CodeGen::CodeGenFunction::CreateMemTemp
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:135
clang::CodeGen::LValue::getVectorPointer
llvm::Value * getVectorPointer() const
Definition: CGValue.h:342
clang::Expr
This represents one expression.
Definition: Expr.h:109
clang::CodeGen::AggValueSlot::IsNotDestructed
@ IsNotDestructed
Definition: CGValue.h:525
clang::CodeGen::CodeGenFunction::EmitPointerWithAlignment
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition: CGExpr.cpp:1060
clang::CodeGen::CodeGenTypeCache::Int8Ty
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Definition: CodeGenTypeCache.h:37
clang::AtomicExpr::getWeak
Expr * getWeak() const
Definition: Expr.h:6287
clang::CodeGen::CallArgList
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:264
clang::LangAS::opencl_generic
@ opencl_generic
clang::CodeGen::CodeGenTypes::arrangeBuiltinFunctionCall
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
Definition: CGCall.cpp:644
clang::ASTContext::BoolTy
CanQualType BoolTy
Definition: ASTContext.h:1076
clang::CodeGen::CGBuilderTy::CreateMemSet
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:315
clang::CodeGen::AggValueSlot::IsNotZeroed
@ IsNotZeroed
Definition: CGValue.h:526
clang::CodeGen::LValue::isExtVectorElt
bool isExtVectorElt() const
Definition: CGValue.h:256
clang::CodeGen::CodeGenFunction::EmitBlock
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:529
clang::interp::Sub
bool Sub(InterpState &S, CodePtr OpPC)
Definition: Interp.h:142
clang::CodeGen::CodeGenModule::getLLVMContext
llvm::LLVMContext & getLLVMContext()
Definition: CodeGenModule.h:720
clang::CharUnits::getQuantity
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:179
clang::CodeGen::RValue::getAggregateAddress
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:71
clang::DiagnosticsEngine::Report
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
Definition: Diagnostic.h:1523
clang::CodeGen::CGBuilderTy::CreateLoad
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:68
clang::CodeGen::AggValueSlot::DoesNotOverlap
@ DoesNotOverlap
Definition: CGValue.h:527