clang  8.0.0svn
CGAtomic.cpp
Go to the documentation of this file.
1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the code for emitting atomic operations.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "CGCall.h"
15 #include "CGRecordLayout.h"
16 #include "CodeGenFunction.h"
17 #include "CodeGenModule.h"
18 #include "TargetInfo.h"
19 #include "clang/AST/ASTContext.h"
22 #include "llvm/ADT/DenseMap.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/Intrinsics.h"
25 #include "llvm/IR/Operator.h"
26 
27 using namespace clang;
28 using namespace CodeGen;
29 
30 namespace {
31  class AtomicInfo {
32  CodeGenFunction &CGF;
33  QualType AtomicTy;
34  QualType ValueTy;
35  uint64_t AtomicSizeInBits;
36  uint64_t ValueSizeInBits;
37  CharUnits AtomicAlign;
38  CharUnits ValueAlign;
39  CharUnits LValueAlign;
40  TypeEvaluationKind EvaluationKind;
41  bool UseLibcall;
42  LValue LVal;
43  CGBitFieldInfo BFI;
44  public:
45  AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
46  : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
47  EvaluationKind(TEK_Scalar), UseLibcall(true) {
48  assert(!lvalue.isGlobalReg());
49  ASTContext &C = CGF.getContext();
50  if (lvalue.isSimple()) {
51  AtomicTy = lvalue.getType();
52  if (auto *ATy = AtomicTy->getAs<AtomicType>())
53  ValueTy = ATy->getValueType();
54  else
55  ValueTy = AtomicTy;
56  EvaluationKind = CGF.getEvaluationKind(ValueTy);
57 
58  uint64_t ValueAlignInBits;
59  uint64_t AtomicAlignInBits;
60  TypeInfo ValueTI = C.getTypeInfo(ValueTy);
61  ValueSizeInBits = ValueTI.Width;
62  ValueAlignInBits = ValueTI.Align;
63 
64  TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
65  AtomicSizeInBits = AtomicTI.Width;
66  AtomicAlignInBits = AtomicTI.Align;
67 
68  assert(ValueSizeInBits <= AtomicSizeInBits);
69  assert(ValueAlignInBits <= AtomicAlignInBits);
70 
71  AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
72  ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
73  if (lvalue.getAlignment().isZero())
74  lvalue.setAlignment(AtomicAlign);
75 
76  LVal = lvalue;
77  } else if (lvalue.isBitField()) {
78  ValueTy = lvalue.getType();
79  ValueSizeInBits = C.getTypeSize(ValueTy);
80  auto &OrigBFI = lvalue.getBitFieldInfo();
81  auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
82  AtomicSizeInBits = C.toBits(
83  C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
84  .alignTo(lvalue.getAlignment()));
85  auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
86  auto OffsetInChars =
87  (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
88  lvalue.getAlignment();
89  VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
90  VoidPtrAddr, OffsetInChars.getQuantity());
92  VoidPtrAddr,
93  CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
94  "atomic_bitfield_base");
95  BFI = OrigBFI;
96  BFI.Offset = Offset;
97  BFI.StorageSize = AtomicSizeInBits;
98  BFI.StorageOffset += OffsetInChars;
99  LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
100  BFI, lvalue.getType(), lvalue.getBaseInfo(),
101  lvalue.getTBAAInfo());
102  AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
103  if (AtomicTy.isNull()) {
104  llvm::APInt Size(
105  /*numBits=*/32,
106  C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
107  AtomicTy = C.getConstantArrayType(C.CharTy, Size, ArrayType::Normal,
108  /*IndexTypeQuals=*/0);
109  }
110  AtomicAlign = ValueAlign = lvalue.getAlignment();
111  } else if (lvalue.isVectorElt()) {
112  ValueTy = lvalue.getType()->getAs<VectorType>()->getElementType();
113  ValueSizeInBits = C.getTypeSize(ValueTy);
114  AtomicTy = lvalue.getType();
115  AtomicSizeInBits = C.getTypeSize(AtomicTy);
116  AtomicAlign = ValueAlign = lvalue.getAlignment();
117  LVal = lvalue;
118  } else {
119  assert(lvalue.isExtVectorElt());
120  ValueTy = lvalue.getType();
121  ValueSizeInBits = C.getTypeSize(ValueTy);
122  AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
123  lvalue.getType(), lvalue.getExtVectorAddress()
124  .getElementType()->getVectorNumElements());
125  AtomicSizeInBits = C.getTypeSize(AtomicTy);
126  AtomicAlign = ValueAlign = lvalue.getAlignment();
127  LVal = lvalue;
128  }
129  UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
130  AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
131  }
132 
133  QualType getAtomicType() const { return AtomicTy; }
134  QualType getValueType() const { return ValueTy; }
135  CharUnits getAtomicAlignment() const { return AtomicAlign; }
136  CharUnits getValueAlignment() const { return ValueAlign; }
137  uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
138  uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
139  TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
140  bool shouldUseLibcall() const { return UseLibcall; }
141  const LValue &getAtomicLValue() const { return LVal; }
142  llvm::Value *getAtomicPointer() const {
143  if (LVal.isSimple())
144  return LVal.getPointer();
145  else if (LVal.isBitField())
146  return LVal.getBitFieldPointer();
147  else if (LVal.isVectorElt())
148  return LVal.getVectorPointer();
149  assert(LVal.isExtVectorElt());
150  return LVal.getExtVectorPointer();
151  }
152  Address getAtomicAddress() const {
153  return Address(getAtomicPointer(), getAtomicAlignment());
154  }
155 
156  Address getAtomicAddressAsAtomicIntPointer() const {
157  return emitCastToAtomicIntPointer(getAtomicAddress());
158  }
159 
160  /// Is the atomic size larger than the underlying value type?
161  ///
162  /// Note that the absence of padding does not mean that atomic
163  /// objects are completely interchangeable with non-atomic
164  /// objects: we might have promoted the alignment of a type
165  /// without making it bigger.
166  bool hasPadding() const {
167  return (ValueSizeInBits != AtomicSizeInBits);
168  }
169 
170  bool emitMemSetZeroIfNecessary() const;
171 
172  llvm::Value *getAtomicSizeValue() const {
173  CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
174  return CGF.CGM.getSize(size);
175  }
176 
177  /// Cast the given pointer to an integer pointer suitable for atomic
178  /// operations if the source.
179  Address emitCastToAtomicIntPointer(Address Addr) const;
180 
181  /// If Addr is compatible with the iN that will be used for an atomic
182  /// operation, bitcast it. Otherwise, create a temporary that is suitable
183  /// and copy the value across.
184  Address convertToAtomicIntPointer(Address Addr) const;
185 
186  /// Turn an atomic-layout object into an r-value.
187  RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
188  SourceLocation loc, bool AsValue) const;
189 
190  /// Converts a rvalue to integer value.
191  llvm::Value *convertRValueToInt(RValue RVal) const;
192 
193  RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
194  AggValueSlot ResultSlot,
195  SourceLocation Loc, bool AsValue) const;
196 
197  /// Copy an atomic r-value into atomic-layout memory.
198  void emitCopyIntoMemory(RValue rvalue) const;
199 
200  /// Project an l-value down to the value field.
201  LValue projectValue() const {
202  assert(LVal.isSimple());
203  Address addr = getAtomicAddress();
204  if (hasPadding())
205  addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
206 
207  return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
208  LVal.getBaseInfo(), LVal.getTBAAInfo());
209  }
210 
211  /// Emits atomic load.
212  /// \returns Loaded value.
213  RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
214  bool AsValue, llvm::AtomicOrdering AO,
215  bool IsVolatile);
216 
217  /// Emits atomic compare-and-exchange sequence.
218  /// \param Expected Expected value.
219  /// \param Desired Desired value.
220  /// \param Success Atomic ordering for success operation.
221  /// \param Failure Atomic ordering for failed operation.
222  /// \param IsWeak true if atomic operation is weak, false otherwise.
223  /// \returns Pair of values: previous value from storage (value type) and
224  /// boolean flag (i1 type) with true if success and false otherwise.
225  std::pair<RValue, llvm::Value *>
226  EmitAtomicCompareExchange(RValue Expected, RValue Desired,
227  llvm::AtomicOrdering Success =
228  llvm::AtomicOrdering::SequentiallyConsistent,
229  llvm::AtomicOrdering Failure =
230  llvm::AtomicOrdering::SequentiallyConsistent,
231  bool IsWeak = false);
232 
233  /// Emits atomic update.
234  /// \param AO Atomic ordering.
235  /// \param UpdateOp Update operation for the current lvalue.
236  void EmitAtomicUpdate(llvm::AtomicOrdering AO,
237  const llvm::function_ref<RValue(RValue)> &UpdateOp,
238  bool IsVolatile);
239  /// Emits atomic update.
240  /// \param AO Atomic ordering.
241  void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
242  bool IsVolatile);
243 
244  /// Materialize an atomic r-value in atomic-layout memory.
245  Address materializeRValue(RValue rvalue) const;
246 
247  /// Creates temp alloca for intermediate operations on atomic value.
248  Address CreateTempAlloca() const;
249  private:
250  bool requiresMemSetZero(llvm::Type *type) const;
251 
252 
253  /// Emits atomic load as a libcall.
254  void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
255  llvm::AtomicOrdering AO, bool IsVolatile);
256  /// Emits atomic load as LLVM instruction.
257  llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
258  /// Emits atomic compare-and-exchange op as a libcall.
259  llvm::Value *EmitAtomicCompareExchangeLibcall(
260  llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
261  llvm::AtomicOrdering Success =
262  llvm::AtomicOrdering::SequentiallyConsistent,
263  llvm::AtomicOrdering Failure =
264  llvm::AtomicOrdering::SequentiallyConsistent);
265  /// Emits atomic compare-and-exchange op as LLVM instruction.
266  std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
267  llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
268  llvm::AtomicOrdering Success =
269  llvm::AtomicOrdering::SequentiallyConsistent,
270  llvm::AtomicOrdering Failure =
271  llvm::AtomicOrdering::SequentiallyConsistent,
272  bool IsWeak = false);
273  /// Emit atomic update as libcalls.
274  void
275  EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
276  const llvm::function_ref<RValue(RValue)> &UpdateOp,
277  bool IsVolatile);
278  /// Emit atomic update as LLVM instructions.
279  void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
280  const llvm::function_ref<RValue(RValue)> &UpdateOp,
281  bool IsVolatile);
282  /// Emit atomic update as libcalls.
283  void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
284  bool IsVolatile);
285  /// Emit atomic update as LLVM instructions.
286  void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
287  bool IsVolatile);
288  };
289 }
290 
291 Address AtomicInfo::CreateTempAlloca() const {
292  Address TempAlloca = CGF.CreateMemTemp(
293  (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
294  : AtomicTy,
295  getAtomicAlignment(),
296  "atomic-temp");
297  // Cast to pointer to value type for bitfields.
298  if (LVal.isBitField())
300  TempAlloca, getAtomicAddress().getType());
301  return TempAlloca;
302 }
303 
305  StringRef fnName,
306  QualType resultType,
307  CallArgList &args) {
308  const CGFunctionInfo &fnInfo =
309  CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
310  llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
311  llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
312  auto callee = CGCallee::forDirect(fn);
313  return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
314 }
315 
316 /// Does a store of the given IR type modify the full expected width?
318  uint64_t expectedSize) {
319  return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
320 }
321 
322 /// Does the atomic type require memsetting to zero before initialization?
323 ///
324 /// The IR type is provided as a way of making certain queries faster.
325 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
326  // If the atomic type has size padding, we definitely need a memset.
327  if (hasPadding()) return true;
328 
329  // Otherwise, do some simple heuristics to try to avoid it:
330  switch (getEvaluationKind()) {
331  // For scalars and complexes, check whether the store size of the
332  // type uses the full size.
333  case TEK_Scalar:
334  return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
335  case TEK_Complex:
336  return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
337  AtomicSizeInBits / 2);
338 
339  // Padding in structs has an undefined bit pattern. User beware.
340  case TEK_Aggregate:
341  return false;
342  }
343  llvm_unreachable("bad evaluation kind");
344 }
345 
346 bool AtomicInfo::emitMemSetZeroIfNecessary() const {
347  assert(LVal.isSimple());
348  llvm::Value *addr = LVal.getPointer();
349  if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
350  return false;
351 
352  CGF.Builder.CreateMemSet(
353  addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
354  CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
355  LVal.getAlignment().getQuantity());
356  return true;
357 }
358 
359 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
360  Address Dest, Address Ptr,
361  Address Val1, Address Val2,
362  uint64_t Size,
363  llvm::AtomicOrdering SuccessOrder,
364  llvm::AtomicOrdering FailureOrder,
366  // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
367  llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
368  llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
369 
370  llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
371  Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
372  Scope);
373  Pair->setVolatile(E->isVolatile());
374  Pair->setWeak(IsWeak);
375 
376  // Cmp holds the result of the compare-exchange operation: true on success,
377  // false on failure.
378  llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
379  llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
380 
381  // This basic block is used to hold the store instruction if the operation
382  // failed.
383  llvm::BasicBlock *StoreExpectedBB =
384  CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
385 
386  // This basic block is the exit point of the operation, we should end up
387  // here regardless of whether or not the operation succeeded.
388  llvm::BasicBlock *ContinueBB =
389  CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
390 
391  // Update Expected if Expected isn't equal to Old, otherwise branch to the
392  // exit point.
393  CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
394 
395  CGF.Builder.SetInsertPoint(StoreExpectedBB);
396  // Update the memory at Expected with Old's value.
397  CGF.Builder.CreateStore(Old, Val1);
398  // Finally, branch to the exit point.
399  CGF.Builder.CreateBr(ContinueBB);
400 
401  CGF.Builder.SetInsertPoint(ContinueBB);
402  // Update the memory at Dest with Cmp's value.
403  CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
404 }
405 
406 /// Given an ordering required on success, emit all possible cmpxchg
407 /// instructions to cope with the provided (but possibly only dynamically known)
408 /// FailureOrder.
410  bool IsWeak, Address Dest, Address Ptr,
411  Address Val1, Address Val2,
412  llvm::Value *FailureOrderVal,
413  uint64_t Size,
414  llvm::AtomicOrdering SuccessOrder,
416  llvm::AtomicOrdering FailureOrder;
417  if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
418  auto FOS = FO->getSExtValue();
419  if (!llvm::isValidAtomicOrderingCABI(FOS))
420  FailureOrder = llvm::AtomicOrdering::Monotonic;
421  else
422  switch ((llvm::AtomicOrderingCABI)FOS) {
423  case llvm::AtomicOrderingCABI::relaxed:
424  case llvm::AtomicOrderingCABI::release:
425  case llvm::AtomicOrderingCABI::acq_rel:
426  FailureOrder = llvm::AtomicOrdering::Monotonic;
427  break;
428  case llvm::AtomicOrderingCABI::consume:
429  case llvm::AtomicOrderingCABI::acquire:
430  FailureOrder = llvm::AtomicOrdering::Acquire;
431  break;
432  case llvm::AtomicOrderingCABI::seq_cst:
433  FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
434  break;
435  }
436  if (isStrongerThan(FailureOrder, SuccessOrder)) {
437  // Don't assert on undefined behavior "failure argument shall be no
438  // stronger than the success argument".
439  FailureOrder =
440  llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
441  }
442  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
443  FailureOrder, Scope);
444  return;
445  }
446 
447  // Create all the relevant BB's
448  llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
449  *SeqCstBB = nullptr;
450  MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
451  if (SuccessOrder != llvm::AtomicOrdering::Monotonic &&
452  SuccessOrder != llvm::AtomicOrdering::Release)
453  AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
454  if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent)
455  SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
456 
457  llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
458 
459  llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
460 
461  // Emit all the different atomics
462 
463  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
464  // doesn't matter unless someone is crazy enough to use something that
465  // doesn't fold to a constant for the ordering.
466  CGF.Builder.SetInsertPoint(MonotonicBB);
467  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
468  Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
469  CGF.Builder.CreateBr(ContBB);
470 
471  if (AcquireBB) {
472  CGF.Builder.SetInsertPoint(AcquireBB);
473  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
474  Size, SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
475  CGF.Builder.CreateBr(ContBB);
476  SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
477  AcquireBB);
478  SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
479  AcquireBB);
480  }
481  if (SeqCstBB) {
482  CGF.Builder.SetInsertPoint(SeqCstBB);
483  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
484  llvm::AtomicOrdering::SequentiallyConsistent, Scope);
485  CGF.Builder.CreateBr(ContBB);
486  SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
487  SeqCstBB);
488  }
489 
490  CGF.Builder.SetInsertPoint(ContBB);
491 }
492 
493 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
494  Address Ptr, Address Val1, Address Val2,
495  llvm::Value *IsWeak, llvm::Value *FailureOrder,
496  uint64_t Size, llvm::AtomicOrdering Order,
498  llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
499  llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
500 
501  switch (E->getOp()) {
502  case AtomicExpr::AO__c11_atomic_init:
503  case AtomicExpr::AO__opencl_atomic_init:
504  llvm_unreachable("Already handled!");
505 
506  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
507  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
508  emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
509  FailureOrder, Size, Order, Scope);
510  return;
511  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
512  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
513  emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
514  FailureOrder, Size, Order, Scope);
515  return;
516  case AtomicExpr::AO__atomic_compare_exchange:
517  case AtomicExpr::AO__atomic_compare_exchange_n: {
518  if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
519  emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
520  Val1, Val2, FailureOrder, Size, Order, Scope);
521  } else {
522  // Create all the relevant BB's
523  llvm::BasicBlock *StrongBB =
524  CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
525  llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
526  llvm::BasicBlock *ContBB =
527  CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
528 
529  llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
530  SI->addCase(CGF.Builder.getInt1(false), StrongBB);
531 
532  CGF.Builder.SetInsertPoint(StrongBB);
533  emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
534  FailureOrder, Size, Order, Scope);
535  CGF.Builder.CreateBr(ContBB);
536 
537  CGF.Builder.SetInsertPoint(WeakBB);
538  emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
539  FailureOrder, Size, Order, Scope);
540  CGF.Builder.CreateBr(ContBB);
541 
542  CGF.Builder.SetInsertPoint(ContBB);
543  }
544  return;
545  }
546  case AtomicExpr::AO__c11_atomic_load:
547  case AtomicExpr::AO__opencl_atomic_load:
548  case AtomicExpr::AO__atomic_load_n:
549  case AtomicExpr::AO__atomic_load: {
550  llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
551  Load->setAtomic(Order, Scope);
552  Load->setVolatile(E->isVolatile());
553  CGF.Builder.CreateStore(Load, Dest);
554  return;
555  }
556 
557  case AtomicExpr::AO__c11_atomic_store:
558  case AtomicExpr::AO__opencl_atomic_store:
559  case AtomicExpr::AO__atomic_store:
560  case AtomicExpr::AO__atomic_store_n: {
561  llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
562  llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
563  Store->setAtomic(Order, Scope);
564  Store->setVolatile(E->isVolatile());
565  return;
566  }
567 
568  case AtomicExpr::AO__c11_atomic_exchange:
569  case AtomicExpr::AO__opencl_atomic_exchange:
570  case AtomicExpr::AO__atomic_exchange_n:
571  case AtomicExpr::AO__atomic_exchange:
572  Op = llvm::AtomicRMWInst::Xchg;
573  break;
574 
575  case AtomicExpr::AO__atomic_add_fetch:
576  PostOp = llvm::Instruction::Add;
577  LLVM_FALLTHROUGH;
578  case AtomicExpr::AO__c11_atomic_fetch_add:
579  case AtomicExpr::AO__opencl_atomic_fetch_add:
580  case AtomicExpr::AO__atomic_fetch_add:
581  Op = llvm::AtomicRMWInst::Add;
582  break;
583 
584  case AtomicExpr::AO__atomic_sub_fetch:
585  PostOp = llvm::Instruction::Sub;
586  LLVM_FALLTHROUGH;
587  case AtomicExpr::AO__c11_atomic_fetch_sub:
588  case AtomicExpr::AO__opencl_atomic_fetch_sub:
589  case AtomicExpr::AO__atomic_fetch_sub:
590  Op = llvm::AtomicRMWInst::Sub;
591  break;
592 
593  case AtomicExpr::AO__opencl_atomic_fetch_min:
594  case AtomicExpr::AO__atomic_fetch_min:
595  Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
596  : llvm::AtomicRMWInst::UMin;
597  break;
598 
599  case AtomicExpr::AO__opencl_atomic_fetch_max:
600  case AtomicExpr::AO__atomic_fetch_max:
601  Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
602  : llvm::AtomicRMWInst::UMax;
603  break;
604 
605  case AtomicExpr::AO__atomic_and_fetch:
606  PostOp = llvm::Instruction::And;
607  LLVM_FALLTHROUGH;
608  case AtomicExpr::AO__c11_atomic_fetch_and:
609  case AtomicExpr::AO__opencl_atomic_fetch_and:
610  case AtomicExpr::AO__atomic_fetch_and:
612  break;
613 
614  case AtomicExpr::AO__atomic_or_fetch:
615  PostOp = llvm::Instruction::Or;
616  LLVM_FALLTHROUGH;
617  case AtomicExpr::AO__c11_atomic_fetch_or:
618  case AtomicExpr::AO__opencl_atomic_fetch_or:
619  case AtomicExpr::AO__atomic_fetch_or:
620  Op = llvm::AtomicRMWInst::Or;
621  break;
622 
623  case AtomicExpr::AO__atomic_xor_fetch:
624  PostOp = llvm::Instruction::Xor;
625  LLVM_FALLTHROUGH;
626  case AtomicExpr::AO__c11_atomic_fetch_xor:
627  case AtomicExpr::AO__opencl_atomic_fetch_xor:
628  case AtomicExpr::AO__atomic_fetch_xor:
629  Op = llvm::AtomicRMWInst::Xor;
630  break;
631 
632  case AtomicExpr::AO__atomic_nand_fetch:
633  PostOp = llvm::Instruction::And; // the NOT is special cased below
634  LLVM_FALLTHROUGH;
635  case AtomicExpr::AO__atomic_fetch_nand:
636  Op = llvm::AtomicRMWInst::Nand;
637  break;
638  }
639 
640  llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
641  llvm::AtomicRMWInst *RMWI =
642  CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order, Scope);
643  RMWI->setVolatile(E->isVolatile());
644 
645  // For __atomic_*_fetch operations, perform the operation again to
646  // determine the value which was written.
647  llvm::Value *Result = RMWI;
648  if (PostOp)
649  Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
650  if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
651  Result = CGF.Builder.CreateNot(Result);
652  CGF.Builder.CreateStore(Result, Dest);
653 }
654 
655 // This function emits any expression (scalar, complex, or aggregate)
656 // into a temporary alloca.
657 static Address
659  Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
660  CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
661  /*Init*/ true);
662  return DeclPtr;
663 }
664 
666  Address Ptr, Address Val1, Address Val2,
667  llvm::Value *IsWeak, llvm::Value *FailureOrder,
668  uint64_t Size, llvm::AtomicOrdering Order,
669  llvm::Value *Scope) {
670  auto ScopeModel = Expr->getScopeModel();
671 
672  // LLVM atomic instructions always have synch scope. If clang atomic
673  // expression has no scope operand, use default LLVM synch scope.
674  if (!ScopeModel) {
675  EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
676  Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));
677  return;
678  }
679 
680  // Handle constant scope.
681  if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
682  auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
683  ScopeModel->map(SC->getZExtValue()), CGF.CGM.getLLVMContext());
684  EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
685  Order, SCID);
686  return;
687  }
688 
689  // Handle non-constant scope.
690  auto &Builder = CGF.Builder;
691  auto Scopes = ScopeModel->getRuntimeValues();
692  llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
693  for (auto S : Scopes)
694  BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
695 
696  llvm::BasicBlock *ContBB =
697  CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);
698 
699  auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
700  // If unsupported synch scope is encountered at run time, assume a fallback
701  // synch scope value.
702  auto FallBack = ScopeModel->getFallBackValue();
703  llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
704  for (auto S : Scopes) {
705  auto *B = BB[S];
706  if (S != FallBack)
707  SI->addCase(Builder.getInt32(S), B);
708 
709  Builder.SetInsertPoint(B);
710  EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
711  Order,
712  CGF.getTargetHooks().getLLVMSyncScopeID(ScopeModel->map(S),
713  CGF.getLLVMContext()));
714  Builder.CreateBr(ContBB);
715  }
716 
717  Builder.SetInsertPoint(ContBB);
718 }
719 
720 static void
722  bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
723  SourceLocation Loc, CharUnits SizeInChars) {
724  if (UseOptimizedLibcall) {
725  // Load value and pass it to the function directly.
726  CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
727  int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
728  ValTy =
729  CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
730  llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
731  SizeInBits)->getPointerTo();
732  Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
733  Val = CGF.EmitLoadOfScalar(Ptr, false,
734  CGF.getContext().getPointerType(ValTy),
735  Loc);
736  // Coerce the value into an appropriately sized integer type.
737  Args.add(RValue::get(Val), ValTy);
738  } else {
739  // Non-optimized functions always take a reference.
740  Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
741  CGF.getContext().VoidPtrTy);
742  }
743 }
744 
746  QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
747  QualType MemTy = AtomicTy;
748  if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
749  MemTy = AT->getValueType();
750  llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
751 
752  Address Val1 = Address::invalid();
753  Address Val2 = Address::invalid();
754  Address Dest = Address::invalid();
755  Address Ptr = EmitPointerWithAlignment(E->getPtr());
756 
757  if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
758  E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
759  LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
760  EmitAtomicInit(E->getVal1(), lvalue);
761  return RValue::get(nullptr);
762  }
763 
764  CharUnits sizeChars, alignChars;
765  std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
766  uint64_t Size = sizeChars.getQuantity();
767  unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
768 
769  bool Oversized = getContext().toBits(sizeChars) > MaxInlineWidthInBits;
770  bool Misaligned = (Ptr.getAlignment() % sizeChars) != 0;
771  bool UseLibcall = Misaligned | Oversized;
772 
773  if (UseLibcall) {
774  CGM.getDiags().Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
775  << !Oversized;
776  }
777 
778  llvm::Value *Order = EmitScalarExpr(E->getOrder());
779  llvm::Value *Scope =
780  E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
781 
782  switch (E->getOp()) {
783  case AtomicExpr::AO__c11_atomic_init:
784  case AtomicExpr::AO__opencl_atomic_init:
785  llvm_unreachable("Already handled above with EmitAtomicInit!");
786 
787  case AtomicExpr::AO__c11_atomic_load:
788  case AtomicExpr::AO__opencl_atomic_load:
789  case AtomicExpr::AO__atomic_load_n:
790  break;
791 
792  case AtomicExpr::AO__atomic_load:
793  Dest = EmitPointerWithAlignment(E->getVal1());
794  break;
795 
796  case AtomicExpr::AO__atomic_store:
797  Val1 = EmitPointerWithAlignment(E->getVal1());
798  break;
799 
800  case AtomicExpr::AO__atomic_exchange:
801  Val1 = EmitPointerWithAlignment(E->getVal1());
802  Dest = EmitPointerWithAlignment(E->getVal2());
803  break;
804 
805  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
806  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
807  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
808  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
809  case AtomicExpr::AO__atomic_compare_exchange_n:
810  case AtomicExpr::AO__atomic_compare_exchange:
811  Val1 = EmitPointerWithAlignment(E->getVal1());
812  if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
813  Val2 = EmitPointerWithAlignment(E->getVal2());
814  else
815  Val2 = EmitValToTemp(*this, E->getVal2());
816  OrderFail = EmitScalarExpr(E->getOrderFail());
817  if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
818  E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
819  IsWeak = EmitScalarExpr(E->getWeak());
820  break;
821 
822  case AtomicExpr::AO__c11_atomic_fetch_add:
823  case AtomicExpr::AO__c11_atomic_fetch_sub:
824  case AtomicExpr::AO__opencl_atomic_fetch_add:
825  case AtomicExpr::AO__opencl_atomic_fetch_sub:
826  if (MemTy->isPointerType()) {
827  // For pointer arithmetic, we're required to do a bit of math:
828  // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
829  // ... but only for the C11 builtins. The GNU builtins expect the
830  // user to multiply by sizeof(T).
831  QualType Val1Ty = E->getVal1()->getType();
832  llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
833  CharUnits PointeeIncAmt =
834  getContext().getTypeSizeInChars(MemTy->getPointeeType());
835  Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
836  auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
837  Val1 = Temp;
838  EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
839  break;
840  }
841  LLVM_FALLTHROUGH;
842  case AtomicExpr::AO__atomic_fetch_add:
843  case AtomicExpr::AO__atomic_fetch_sub:
844  case AtomicExpr::AO__atomic_add_fetch:
845  case AtomicExpr::AO__atomic_sub_fetch:
846  case AtomicExpr::AO__c11_atomic_store:
847  case AtomicExpr::AO__c11_atomic_exchange:
848  case AtomicExpr::AO__opencl_atomic_store:
849  case AtomicExpr::AO__opencl_atomic_exchange:
850  case AtomicExpr::AO__atomic_store_n:
851  case AtomicExpr::AO__atomic_exchange_n:
852  case AtomicExpr::AO__c11_atomic_fetch_and:
853  case AtomicExpr::AO__c11_atomic_fetch_or:
854  case AtomicExpr::AO__c11_atomic_fetch_xor:
855  case AtomicExpr::AO__opencl_atomic_fetch_and:
856  case AtomicExpr::AO__opencl_atomic_fetch_or:
857  case AtomicExpr::AO__opencl_atomic_fetch_xor:
858  case AtomicExpr::AO__opencl_atomic_fetch_min:
859  case AtomicExpr::AO__opencl_atomic_fetch_max:
860  case AtomicExpr::AO__atomic_fetch_and:
861  case AtomicExpr::AO__atomic_fetch_or:
862  case AtomicExpr::AO__atomic_fetch_xor:
863  case AtomicExpr::AO__atomic_fetch_nand:
864  case AtomicExpr::AO__atomic_and_fetch:
865  case AtomicExpr::AO__atomic_or_fetch:
866  case AtomicExpr::AO__atomic_xor_fetch:
867  case AtomicExpr::AO__atomic_nand_fetch:
868  case AtomicExpr::AO__atomic_fetch_min:
869  case AtomicExpr::AO__atomic_fetch_max:
870  Val1 = EmitValToTemp(*this, E->getVal1());
871  break;
872  }
873 
874  QualType RValTy = E->getType().getUnqualifiedType();
875 
876  // The inlined atomics only function on iN types, where N is a power of 2. We
877  // need to make sure (via temporaries if necessary) that all incoming values
878  // are compatible.
879  LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
880  AtomicInfo Atomics(*this, AtomicVal);
881 
882  Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
883  if (Val1.isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
884  if (Val2.isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
885  if (Dest.isValid())
886  Dest = Atomics.emitCastToAtomicIntPointer(Dest);
887  else if (E->isCmpXChg())
888  Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
889  else if (!RValTy->isVoidType())
890  Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
891 
892  // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
893  if (UseLibcall) {
894  bool UseOptimizedLibcall = false;
895  switch (E->getOp()) {
896  case AtomicExpr::AO__c11_atomic_init:
897  case AtomicExpr::AO__opencl_atomic_init:
898  llvm_unreachable("Already handled above with EmitAtomicInit!");
899 
900  case AtomicExpr::AO__c11_atomic_fetch_add:
901  case AtomicExpr::AO__opencl_atomic_fetch_add:
902  case AtomicExpr::AO__atomic_fetch_add:
903  case AtomicExpr::AO__c11_atomic_fetch_and:
904  case AtomicExpr::AO__opencl_atomic_fetch_and:
905  case AtomicExpr::AO__atomic_fetch_and:
906  case AtomicExpr::AO__c11_atomic_fetch_or:
907  case AtomicExpr::AO__opencl_atomic_fetch_or:
908  case AtomicExpr::AO__atomic_fetch_or:
909  case AtomicExpr::AO__atomic_fetch_nand:
910  case AtomicExpr::AO__c11_atomic_fetch_sub:
911  case AtomicExpr::AO__opencl_atomic_fetch_sub:
912  case AtomicExpr::AO__atomic_fetch_sub:
913  case AtomicExpr::AO__c11_atomic_fetch_xor:
914  case AtomicExpr::AO__opencl_atomic_fetch_xor:
915  case AtomicExpr::AO__opencl_atomic_fetch_min:
916  case AtomicExpr::AO__opencl_atomic_fetch_max:
917  case AtomicExpr::AO__atomic_fetch_xor:
918  case AtomicExpr::AO__atomic_add_fetch:
919  case AtomicExpr::AO__atomic_and_fetch:
920  case AtomicExpr::AO__atomic_nand_fetch:
921  case AtomicExpr::AO__atomic_or_fetch:
922  case AtomicExpr::AO__atomic_sub_fetch:
923  case AtomicExpr::AO__atomic_xor_fetch:
924  case AtomicExpr::AO__atomic_fetch_min:
925  case AtomicExpr::AO__atomic_fetch_max:
926  // For these, only library calls for certain sizes exist.
927  UseOptimizedLibcall = true;
928  break;
929 
930  case AtomicExpr::AO__atomic_load:
931  case AtomicExpr::AO__atomic_store:
932  case AtomicExpr::AO__atomic_exchange:
933  case AtomicExpr::AO__atomic_compare_exchange:
934  // Use the generic version if we don't know that the operand will be
935  // suitably aligned for the optimized version.
936  if (Misaligned)
937  break;
938  LLVM_FALLTHROUGH;
939  case AtomicExpr::AO__c11_atomic_load:
940  case AtomicExpr::AO__c11_atomic_store:
941  case AtomicExpr::AO__c11_atomic_exchange:
942  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
943  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
944  case AtomicExpr::AO__opencl_atomic_load:
945  case AtomicExpr::AO__opencl_atomic_store:
946  case AtomicExpr::AO__opencl_atomic_exchange:
947  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
948  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
949  case AtomicExpr::AO__atomic_load_n:
950  case AtomicExpr::AO__atomic_store_n:
951  case AtomicExpr::AO__atomic_exchange_n:
952  case AtomicExpr::AO__atomic_compare_exchange_n:
953  // Only use optimized library calls for sizes for which they exist.
954  // FIXME: Size == 16 optimized library functions exist too.
955  if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
956  UseOptimizedLibcall = true;
957  break;
958  }
959 
960  CallArgList Args;
961  if (!UseOptimizedLibcall) {
962  // For non-optimized library calls, the size is the first parameter
963  Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
964  getContext().getSizeType());
965  }
966  // Atomic address is the first or second parameter
967  // The OpenCL atomic library functions only accept pointer arguments to
968  // generic address space.
969  auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
970  if (!E->isOpenCL())
971  return V;
972  auto AS = PT->getAs<PointerType>()->getPointeeType().getAddressSpace();
973  if (AS == LangAS::opencl_generic)
974  return V;
975  auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
976  auto T = V->getType();
977  auto *DestType = T->getPointerElementType()->getPointerTo(DestAS);
978 
979  return getTargetHooks().performAddrSpaceCast(
980  *this, V, AS, LangAS::opencl_generic, DestType, false);
981  };
982 
983  Args.add(RValue::get(CastToGenericAddrSpace(
984  EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())),
985  getContext().VoidPtrTy);
986 
987  std::string LibCallName;
988  QualType LoweredMemTy =
989  MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
990  QualType RetTy;
991  bool HaveRetTy = false;
992  llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
993  switch (E->getOp()) {
994  case AtomicExpr::AO__c11_atomic_init:
995  case AtomicExpr::AO__opencl_atomic_init:
996  llvm_unreachable("Already handled!");
997 
998  // There is only one libcall for compare an exchange, because there is no
999  // optimisation benefit possible from a libcall version of a weak compare
1000  // and exchange.
1001  // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
1002  // void *desired, int success, int failure)
1003  // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
1004  // int success, int failure)
1005  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1006  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1007  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1008  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1009  case AtomicExpr::AO__atomic_compare_exchange:
1010  case AtomicExpr::AO__atomic_compare_exchange_n:
1011  LibCallName = "__atomic_compare_exchange";
1012  RetTy = getContext().BoolTy;
1013  HaveRetTy = true;
1014  Args.add(
1015  RValue::get(CastToGenericAddrSpace(
1016  EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
1017  getContext().VoidPtrTy);
1018  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
1019  MemTy, E->getExprLoc(), sizeChars);
1020  Args.add(RValue::get(Order), getContext().IntTy);
1021  Order = OrderFail;
1022  break;
1023  // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
1024  // int order)
1025  // T __atomic_exchange_N(T *mem, T val, int order)
1026  case AtomicExpr::AO__c11_atomic_exchange:
1027  case AtomicExpr::AO__opencl_atomic_exchange:
1028  case AtomicExpr::AO__atomic_exchange_n:
1029  case AtomicExpr::AO__atomic_exchange:
1030  LibCallName = "__atomic_exchange";
1031  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1032  MemTy, E->getExprLoc(), sizeChars);
1033  break;
1034  // void __atomic_store(size_t size, void *mem, void *val, int order)
1035  // void __atomic_store_N(T *mem, T val, int order)
1036  case AtomicExpr::AO__c11_atomic_store:
1037  case AtomicExpr::AO__opencl_atomic_store:
1038  case AtomicExpr::AO__atomic_store:
1039  case AtomicExpr::AO__atomic_store_n:
1040  LibCallName = "__atomic_store";
1041  RetTy = getContext().VoidTy;
1042  HaveRetTy = true;
1043  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1044  MemTy, E->getExprLoc(), sizeChars);
1045  break;
1046  // void __atomic_load(size_t size, void *mem, void *return, int order)
1047  // T __atomic_load_N(T *mem, int order)
1048  case AtomicExpr::AO__c11_atomic_load:
1049  case AtomicExpr::AO__opencl_atomic_load:
1050  case AtomicExpr::AO__atomic_load:
1051  case AtomicExpr::AO__atomic_load_n:
1052  LibCallName = "__atomic_load";
1053  break;
1054  // T __atomic_add_fetch_N(T *mem, T val, int order)
1055  // T __atomic_fetch_add_N(T *mem, T val, int order)
1056  case AtomicExpr::AO__atomic_add_fetch:
1057  PostOp = llvm::Instruction::Add;
1058  LLVM_FALLTHROUGH;
1059  case AtomicExpr::AO__c11_atomic_fetch_add:
1060  case AtomicExpr::AO__opencl_atomic_fetch_add:
1061  case AtomicExpr::AO__atomic_fetch_add:
1062  LibCallName = "__atomic_fetch_add";
1063  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1064  LoweredMemTy, E->getExprLoc(), sizeChars);
1065  break;
1066  // T __atomic_and_fetch_N(T *mem, T val, int order)
1067  // T __atomic_fetch_and_N(T *mem, T val, int order)
1068  case AtomicExpr::AO__atomic_and_fetch:
1069  PostOp = llvm::Instruction::And;
1070  LLVM_FALLTHROUGH;
1071  case AtomicExpr::AO__c11_atomic_fetch_and:
1072  case AtomicExpr::AO__opencl_atomic_fetch_and:
1073  case AtomicExpr::AO__atomic_fetch_and:
1074  LibCallName = "__atomic_fetch_and";
1075  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1076  MemTy, E->getExprLoc(), sizeChars);
1077  break;
1078  // T __atomic_or_fetch_N(T *mem, T val, int order)
1079  // T __atomic_fetch_or_N(T *mem, T val, int order)
1080  case AtomicExpr::AO__atomic_or_fetch:
1081  PostOp = llvm::Instruction::Or;
1082  LLVM_FALLTHROUGH;
1083  case AtomicExpr::AO__c11_atomic_fetch_or:
1084  case AtomicExpr::AO__opencl_atomic_fetch_or:
1085  case AtomicExpr::AO__atomic_fetch_or:
1086  LibCallName = "__atomic_fetch_or";
1087  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1088  MemTy, E->getExprLoc(), sizeChars);
1089  break;
1090  // T __atomic_sub_fetch_N(T *mem, T val, int order)
1091  // T __atomic_fetch_sub_N(T *mem, T val, int order)
1092  case AtomicExpr::AO__atomic_sub_fetch:
1093  PostOp = llvm::Instruction::Sub;
1094  LLVM_FALLTHROUGH;
1095  case AtomicExpr::AO__c11_atomic_fetch_sub:
1096  case AtomicExpr::AO__opencl_atomic_fetch_sub:
1097  case AtomicExpr::AO__atomic_fetch_sub:
1098  LibCallName = "__atomic_fetch_sub";
1099  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1100  LoweredMemTy, E->getExprLoc(), sizeChars);
1101  break;
1102  // T __atomic_xor_fetch_N(T *mem, T val, int order)
1103  // T __atomic_fetch_xor_N(T *mem, T val, int order)
1104  case AtomicExpr::AO__atomic_xor_fetch:
1105  PostOp = llvm::Instruction::Xor;
1106  LLVM_FALLTHROUGH;
1107  case AtomicExpr::AO__c11_atomic_fetch_xor:
1108  case AtomicExpr::AO__opencl_atomic_fetch_xor:
1109  case AtomicExpr::AO__atomic_fetch_xor:
1110  LibCallName = "__atomic_fetch_xor";
1111  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1112  MemTy, E->getExprLoc(), sizeChars);
1113  break;
1114  case AtomicExpr::AO__atomic_fetch_min:
1115  case AtomicExpr::AO__opencl_atomic_fetch_min:
1116  LibCallName = E->getValueType()->isSignedIntegerType()
1117  ? "__atomic_fetch_min"
1118  : "__atomic_fetch_umin";
1119  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1120  LoweredMemTy, E->getExprLoc(), sizeChars);
1121  break;
1122  case AtomicExpr::AO__atomic_fetch_max:
1123  case AtomicExpr::AO__opencl_atomic_fetch_max:
1124  LibCallName = E->getValueType()->isSignedIntegerType()
1125  ? "__atomic_fetch_max"
1126  : "__atomic_fetch_umax";
1127  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1128  LoweredMemTy, E->getExprLoc(), sizeChars);
1129  break;
1130  // T __atomic_nand_fetch_N(T *mem, T val, int order)
1131  // T __atomic_fetch_nand_N(T *mem, T val, int order)
1132  case AtomicExpr::AO__atomic_nand_fetch:
1133  PostOp = llvm::Instruction::And; // the NOT is special cased below
1134  LLVM_FALLTHROUGH;
1135  case AtomicExpr::AO__atomic_fetch_nand:
1136  LibCallName = "__atomic_fetch_nand";
1137  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1138  MemTy, E->getExprLoc(), sizeChars);
1139  break;
1140  }
1141 
1142  if (E->isOpenCL()) {
1143  LibCallName = std::string("__opencl") +
1144  StringRef(LibCallName).drop_front(1).str();
1145 
1146  }
1147  // Optimized functions have the size in their name.
1148  if (UseOptimizedLibcall)
1149  LibCallName += "_" + llvm::utostr(Size);
1150  // By default, assume we return a value of the atomic type.
1151  if (!HaveRetTy) {
1152  if (UseOptimizedLibcall) {
1153  // Value is returned directly.
1154  // The function returns an appropriately sized integer type.
1155  RetTy = getContext().getIntTypeForBitwidth(
1156  getContext().toBits(sizeChars), /*Signed=*/false);
1157  } else {
1158  // Value is returned through parameter before the order.
1159  RetTy = getContext().VoidTy;
1160  Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
1161  getContext().VoidPtrTy);
1162  }
1163  }
1164  // order is always the last parameter
1165  Args.add(RValue::get(Order),
1166  getContext().IntTy);
1167  if (E->isOpenCL())
1168  Args.add(RValue::get(Scope), getContext().IntTy);
1169 
1170  // PostOp is only needed for the atomic_*_fetch operations, and
1171  // thus is only needed for and implemented in the
1172  // UseOptimizedLibcall codepath.
1173  assert(UseOptimizedLibcall || !PostOp);
1174 
1175  RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
1176  // The value is returned directly from the libcall.
1177  if (E->isCmpXChg())
1178  return Res;
1179 
1180  // The value is returned directly for optimized libcalls but the expr
1181  // provided an out-param.
1182  if (UseOptimizedLibcall && Res.getScalarVal()) {
1183  llvm::Value *ResVal = Res.getScalarVal();
1184  if (PostOp) {
1185  llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1186  ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1187  }
1188  if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
1189  ResVal = Builder.CreateNot(ResVal);
1190 
1191  Builder.CreateStore(
1192  ResVal,
1193  Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
1194  }
1195 
1196  if (RValTy->isVoidType())
1197  return RValue::get(nullptr);
1198 
1199  return convertTempToRValue(
1200  Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1201  RValTy, E->getExprLoc());
1202  }
1203 
1204  bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1205  E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
1206  E->getOp() == AtomicExpr::AO__atomic_store ||
1207  E->getOp() == AtomicExpr::AO__atomic_store_n;
1208  bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1209  E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1210  E->getOp() == AtomicExpr::AO__atomic_load ||
1211  E->getOp() == AtomicExpr::AO__atomic_load_n;
1212 
1213  if (isa<llvm::ConstantInt>(Order)) {
1214  auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1215  // We should not ever get to a case where the ordering isn't a valid C ABI
1216  // value, but it's hard to enforce that in general.
1217  if (llvm::isValidAtomicOrderingCABI(ord))
1218  switch ((llvm::AtomicOrderingCABI)ord) {
1219  case llvm::AtomicOrderingCABI::relaxed:
1220  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1221  llvm::AtomicOrdering::Monotonic, Scope);
1222  break;
1223  case llvm::AtomicOrderingCABI::consume:
1224  case llvm::AtomicOrderingCABI::acquire:
1225  if (IsStore)
1226  break; // Avoid crashing on code with undefined behavior
1227  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1228  llvm::AtomicOrdering::Acquire, Scope);
1229  break;
1230  case llvm::AtomicOrderingCABI::release:
1231  if (IsLoad)
1232  break; // Avoid crashing on code with undefined behavior
1233  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1234  llvm::AtomicOrdering::Release, Scope);
1235  break;
1236  case llvm::AtomicOrderingCABI::acq_rel:
1237  if (IsLoad || IsStore)
1238  break; // Avoid crashing on code with undefined behavior
1239  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1240  llvm::AtomicOrdering::AcquireRelease, Scope);
1241  break;
1242  case llvm::AtomicOrderingCABI::seq_cst:
1243  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1244  llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1245  break;
1246  }
1247  if (RValTy->isVoidType())
1248  return RValue::get(nullptr);
1249 
1250  return convertTempToRValue(
1251  Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1252  Dest.getAddressSpace())),
1253  RValTy, E->getExprLoc());
1254  }
1255 
1256  // Long case, when Order isn't obviously constant.
1257 
1258  // Create all the relevant BB's
1259  llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1260  *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1261  *SeqCstBB = nullptr;
1262  MonotonicBB = createBasicBlock("monotonic", CurFn);
1263  if (!IsStore)
1264  AcquireBB = createBasicBlock("acquire", CurFn);
1265  if (!IsLoad)
1266  ReleaseBB = createBasicBlock("release", CurFn);
1267  if (!IsLoad && !IsStore)
1268  AcqRelBB = createBasicBlock("acqrel", CurFn);
1269  SeqCstBB = createBasicBlock("seqcst", CurFn);
1270  llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1271 
1272  // Create the switch for the split
1273  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1274  // doesn't matter unless someone is crazy enough to use something that
1275  // doesn't fold to a constant for the ordering.
1276  Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1277  llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1278 
1279  // Emit all the different atomics
1280  Builder.SetInsertPoint(MonotonicBB);
1281  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1282  llvm::AtomicOrdering::Monotonic, Scope);
1283  Builder.CreateBr(ContBB);
1284  if (!IsStore) {
1285  Builder.SetInsertPoint(AcquireBB);
1286  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1287  llvm::AtomicOrdering::Acquire, Scope);
1288  Builder.CreateBr(ContBB);
1289  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
1290  AcquireBB);
1291  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
1292  AcquireBB);
1293  }
1294  if (!IsLoad) {
1295  Builder.SetInsertPoint(ReleaseBB);
1296  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1297  llvm::AtomicOrdering::Release, Scope);
1298  Builder.CreateBr(ContBB);
1299  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
1300  ReleaseBB);
1301  }
1302  if (!IsLoad && !IsStore) {
1303  Builder.SetInsertPoint(AcqRelBB);
1304  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1305  llvm::AtomicOrdering::AcquireRelease, Scope);
1306  Builder.CreateBr(ContBB);
1307  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
1308  AcqRelBB);
1309  }
1310  Builder.SetInsertPoint(SeqCstBB);
1311  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1312  llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1313  Builder.CreateBr(ContBB);
1314  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
1315  SeqCstBB);
1316 
1317  // Cleanup and return
1318  Builder.SetInsertPoint(ContBB);
1319  if (RValTy->isVoidType())
1320  return RValue::get(nullptr);
1321 
1322  assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1323  return convertTempToRValue(
1324  Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1325  Dest.getAddressSpace())),
1326  RValTy, E->getExprLoc());
1327 }
1328 
1329 Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
1330  unsigned addrspace =
1331  cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
1332  llvm::IntegerType *ty =
1333  llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1334  return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
1335 }
1336 
1337 Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
1338  llvm::Type *Ty = Addr.getElementType();
1339  uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1340  if (SourceSizeInBits != AtomicSizeInBits) {
1341  Address Tmp = CreateTempAlloca();
1342  CGF.Builder.CreateMemCpy(Tmp, Addr,
1343  std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1344  Addr = Tmp;
1345  }
1346 
1347  return emitCastToAtomicIntPointer(Addr);
1348 }
1349 
1350 RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1351  AggValueSlot resultSlot,
1352  SourceLocation loc,
1353  bool asValue) const {
1354  if (LVal.isSimple()) {
1355  if (EvaluationKind == TEK_Aggregate)
1356  return resultSlot.asRValue();
1357 
1358  // Drill into the padding structure if we have one.
1359  if (hasPadding())
1360  addr = CGF.Builder.CreateStructGEP(addr, 0, CharUnits());
1361 
1362  // Otherwise, just convert the temporary to an r-value using the
1363  // normal conversion routine.
1364  return CGF.convertTempToRValue(addr, getValueType(), loc);
1365  }
1366  if (!asValue)
1367  // Get RValue from temp memory as atomic for non-simple lvalues
1368  return RValue::get(CGF.Builder.CreateLoad(addr));
1369  if (LVal.isBitField())
1370  return CGF.EmitLoadOfBitfieldLValue(
1371  LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1372  LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1373  if (LVal.isVectorElt())
1374  return CGF.EmitLoadOfLValue(
1375  LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1376  LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1377  assert(LVal.isExtVectorElt());
1379  addr, LVal.getExtVectorElts(), LVal.getType(),
1380  LVal.getBaseInfo(), TBAAAccessInfo()));
1381 }
1382 
1383 RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1384  AggValueSlot ResultSlot,
1385  SourceLocation Loc,
1386  bool AsValue) const {
1387  // Try not to in some easy cases.
1388  assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
1389  if (getEvaluationKind() == TEK_Scalar &&
1390  (((!LVal.isBitField() ||
1391  LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1392  !hasPadding()) ||
1393  !AsValue)) {
1394  auto *ValTy = AsValue
1395  ? CGF.ConvertTypeForMem(ValueTy)
1396  : getAtomicAddress().getType()->getPointerElementType();
1397  if (ValTy->isIntegerTy()) {
1398  assert(IntVal->getType() == ValTy && "Different integer types.");
1399  return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
1400  } else if (ValTy->isPointerTy())
1401  return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1402  else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1403  return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1404  }
1405 
1406  // Create a temporary. This needs to be big enough to hold the
1407  // atomic integer.
1408  Address Temp = Address::invalid();
1409  bool TempIsVolatile = false;
1410  if (AsValue && getEvaluationKind() == TEK_Aggregate) {
1411  assert(!ResultSlot.isIgnored());
1412  Temp = ResultSlot.getAddress();
1413  TempIsVolatile = ResultSlot.isVolatile();
1414  } else {
1415  Temp = CreateTempAlloca();
1416  }
1417 
1418  // Slam the integer into the temporary.
1419  Address CastTemp = emitCastToAtomicIntPointer(Temp);
1420  CGF.Builder.CreateStore(IntVal, CastTemp)
1421  ->setVolatile(TempIsVolatile);
1422 
1423  return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1424 }
1425 
1426 void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1427  llvm::AtomicOrdering AO, bool) {
1428  // void __atomic_load(size_t size, void *mem, void *return, int order);
1429  CallArgList Args;
1430  Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1431  Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1432  CGF.getContext().VoidPtrTy);
1433  Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1434  CGF.getContext().VoidPtrTy);
1435  Args.add(
1436  RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
1437  CGF.getContext().IntTy);
1438  emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1439 }
1440 
1441 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1442  bool IsVolatile) {
1443  // Okay, we're doing this natively.
1444  Address Addr = getAtomicAddressAsAtomicIntPointer();
1445  llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1446  Load->setAtomic(AO);
1447 
1448  // Other decoration.
1449  if (IsVolatile)
1450  Load->setVolatile(true);
1451  CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
1452  return Load;
1453 }
1454 
1455 /// An LValue is a candidate for having its loads and stores be made atomic if
1456 /// we are operating under /volatile:ms *and* the LValue itself is volatile and
1457 /// performing such an operation can be performed without a libcall.
1459  if (!CGM.getCodeGenOpts().MSVolatile) return false;
1460  AtomicInfo AI(*this, LV);
1461  bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1462  // An atomic is inline if we don't need to use a libcall.
1463  bool AtomicIsInline = !AI.shouldUseLibcall();
1464  // MSVC doesn't seem to do this for types wider than a pointer.
1465  if (getContext().getTypeSize(LV.getType()) >
1466  getContext().getTypeSize(getContext().getIntPtrType()))
1467  return false;
1468  return IsVolatile && AtomicIsInline;
1469 }
1470 
1472  AggValueSlot Slot) {
1473  llvm::AtomicOrdering AO;
1474  bool IsVolatile = LV.isVolatileQualified();
1475  if (LV.getType()->isAtomicType()) {
1476  AO = llvm::AtomicOrdering::SequentiallyConsistent;
1477  } else {
1478  AO = llvm::AtomicOrdering::Acquire;
1479  IsVolatile = true;
1480  }
1481  return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1482 }
1483 
1484 RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1485  bool AsValue, llvm::AtomicOrdering AO,
1486  bool IsVolatile) {
1487  // Check whether we should use a library call.
1488  if (shouldUseLibcall()) {
1489  Address TempAddr = Address::invalid();
1490  if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1491  assert(getEvaluationKind() == TEK_Aggregate);
1492  TempAddr = ResultSlot.getAddress();
1493  } else
1494  TempAddr = CreateTempAlloca();
1495 
1496  EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
1497 
1498  // Okay, turn that back into the original value or whole atomic (for
1499  // non-simple lvalues) type.
1500  return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1501  }
1502 
1503  // Okay, we're doing this natively.
1504  auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1505 
1506  // If we're ignoring an aggregate return, don't do anything.
1507  if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1508  return RValue::getAggregate(Address::invalid(), false);
1509 
1510  // Okay, turn that back into the original value or atomic (for non-simple
1511  // lvalues) type.
1512  return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1513 }
1514 
1515 /// Emit a load from an l-value of atomic type. Note that the r-value
1516 /// we produce is an r-value of the atomic *value* type.
1518  llvm::AtomicOrdering AO, bool IsVolatile,
1519  AggValueSlot resultSlot) {
1520  AtomicInfo Atomics(*this, src);
1521  return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1522  IsVolatile);
1523 }
1524 
1525 /// Copy an r-value into memory as part of storing to an atomic type.
1526 /// This needs to create a bit-pattern suitable for atomic operations.
1527 void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1528  assert(LVal.isSimple());
1529  // If we have an r-value, the rvalue should be of the atomic type,
1530  // which means that the caller is responsible for having zeroed
1531  // any padding. Just do an aggregate copy of that type.
1532  if (rvalue.isAggregate()) {
1533  LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
1534  LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
1535  getAtomicType());
1536  bool IsVolatile = rvalue.isVolatileQualified() ||
1537  LVal.isVolatileQualified();
1538  CGF.EmitAggregateCopy(Dest, Src, getAtomicType(),
1539  AggValueSlot::DoesNotOverlap, IsVolatile);
1540  return;
1541  }
1542 
1543  // Okay, otherwise we're copying stuff.
1544 
1545  // Zero out the buffer if necessary.
1546  emitMemSetZeroIfNecessary();
1547 
1548  // Drill past the padding if present.
1549  LValue TempLVal = projectValue();
1550 
1551  // Okay, store the rvalue in.
1552  if (rvalue.isScalar()) {
1553  CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1554  } else {
1555  CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1556  }
1557 }
1558 
1559 
1560 /// Materialize an r-value into memory for the purposes of storing it
1561 /// to an atomic type.
1562 Address AtomicInfo::materializeRValue(RValue rvalue) const {
1563  // Aggregate r-values are already in memory, and EmitAtomicStore
1564  // requires them to be values of the atomic type.
1565  if (rvalue.isAggregate())
1566  return rvalue.getAggregateAddress();
1567 
1568  // Otherwise, make a temporary and materialize into it.
1569  LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1570  AtomicInfo Atomics(CGF, TempLV);
1571  Atomics.emitCopyIntoMemory(rvalue);
1572  return TempLV.getAddress();
1573 }
1574 
1575 llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1576  // If we've got a scalar value of the right size, try to avoid going
1577  // through memory.
1578  if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
1579  llvm::Value *Value = RVal.getScalarVal();
1580  if (isa<llvm::IntegerType>(Value->getType()))
1581  return CGF.EmitToMemory(Value, ValueTy);
1582  else {
1583  llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1584  CGF.getLLVMContext(),
1585  LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1586  if (isa<llvm::PointerType>(Value->getType()))
1587  return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1588  else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1589  return CGF.Builder.CreateBitCast(Value, InputIntTy);
1590  }
1591  }
1592  // Otherwise, we need to go through memory.
1593  // Put the r-value in memory.
1594  Address Addr = materializeRValue(RVal);
1595 
1596  // Cast the temporary to the atomic int type and pull a value out.
1597  Addr = emitCastToAtomicIntPointer(Addr);
1598  return CGF.Builder.CreateLoad(Addr);
1599 }
1600 
1601 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1602  llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1603  llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1604  // Do the atomic store.
1605  Address Addr = getAtomicAddressAsAtomicIntPointer();
1606  auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
1607  ExpectedVal, DesiredVal,
1608  Success, Failure);
1609  // Other decoration.
1610  Inst->setVolatile(LVal.isVolatileQualified());
1611  Inst->setWeak(IsWeak);
1612 
1613  // Okay, turn that back into the original value type.
1614  auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1615  auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1616  return std::make_pair(PreviousVal, SuccessFailureVal);
1617 }
1618 
1619 llvm::Value *
1620 AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1621  llvm::Value *DesiredAddr,
1622  llvm::AtomicOrdering Success,
1623  llvm::AtomicOrdering Failure) {
1624  // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1625  // void *desired, int success, int failure);
1626  CallArgList Args;
1627  Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1628  Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1629  CGF.getContext().VoidPtrTy);
1630  Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1631  CGF.getContext().VoidPtrTy);
1632  Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1633  CGF.getContext().VoidPtrTy);
1634  Args.add(RValue::get(
1635  llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
1636  CGF.getContext().IntTy);
1637  Args.add(RValue::get(
1638  llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
1639  CGF.getContext().IntTy);
1640  auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1641  CGF.getContext().BoolTy, Args);
1642 
1643  return SuccessFailureRVal.getScalarVal();
1644 }
1645 
1646 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1647  RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1648  llvm::AtomicOrdering Failure, bool IsWeak) {
1649  if (isStrongerThan(Failure, Success))
1650  // Don't assert on undefined behavior "failure argument shall be no stronger
1651  // than the success argument".
1652  Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1653 
1654  // Check whether we should use a library call.
1655  if (shouldUseLibcall()) {
1656  // Produce a source address.
1657  Address ExpectedAddr = materializeRValue(Expected);
1658  Address DesiredAddr = materializeRValue(Desired);
1659  auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1660  DesiredAddr.getPointer(),
1661  Success, Failure);
1662  return std::make_pair(
1663  convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1664  SourceLocation(), /*AsValue=*/false),
1665  Res);
1666  }
1667 
1668  // If we've got a scalar value of the right size, try to avoid going
1669  // through memory.
1670  auto *ExpectedVal = convertRValueToInt(Expected);
1671  auto *DesiredVal = convertRValueToInt(Desired);
1672  auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1673  Failure, IsWeak);
1674  return std::make_pair(
1675  ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1676  SourceLocation(), /*AsValue=*/false),
1677  Res.second);
1678 }
1679 
1680 static void
1681 EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1682  const llvm::function_ref<RValue(RValue)> &UpdateOp,
1683  Address DesiredAddr) {
1684  RValue UpRVal;
1685  LValue AtomicLVal = Atomics.getAtomicLValue();
1686  LValue DesiredLVal;
1687  if (AtomicLVal.isSimple()) {
1688  UpRVal = OldRVal;
1689  DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1690  } else {
1691  // Build new lvalue for temp address
1692  Address Ptr = Atomics.materializeRValue(OldRVal);
1693  LValue UpdateLVal;
1694  if (AtomicLVal.isBitField()) {
1695  UpdateLVal =
1696  LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1697  AtomicLVal.getType(),
1698  AtomicLVal.getBaseInfo(),
1699  AtomicLVal.getTBAAInfo());
1700  DesiredLVal =
1701  LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1702  AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1703  AtomicLVal.getTBAAInfo());
1704  } else if (AtomicLVal.isVectorElt()) {
1705  UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1706  AtomicLVal.getType(),
1707  AtomicLVal.getBaseInfo(),
1708  AtomicLVal.getTBAAInfo());
1709  DesiredLVal = LValue::MakeVectorElt(
1710  DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1711  AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1712  } else {
1713  assert(AtomicLVal.isExtVectorElt());
1714  UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1715  AtomicLVal.getType(),
1716  AtomicLVal.getBaseInfo(),
1717  AtomicLVal.getTBAAInfo());
1718  DesiredLVal = LValue::MakeExtVectorElt(
1719  DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1720  AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1721  }
1722  UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1723  }
1724  // Store new value in the corresponding memory area
1725  RValue NewRVal = UpdateOp(UpRVal);
1726  if (NewRVal.isScalar()) {
1727  CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1728  } else {
1729  assert(NewRVal.isComplex());
1730  CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1731  /*isInit=*/false);
1732  }
1733 }
1734 
1735 void AtomicInfo::EmitAtomicUpdateLibcall(
1736  llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1737  bool IsVolatile) {
1738  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1739 
1740  Address ExpectedAddr = CreateTempAlloca();
1741 
1742  EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1743  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1744  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1745  CGF.EmitBlock(ContBB);
1746  Address DesiredAddr = CreateTempAlloca();
1747  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1748  requiresMemSetZero(getAtomicAddress().getElementType())) {
1749  auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1750  CGF.Builder.CreateStore(OldVal, DesiredAddr);
1751  }
1752  auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1754  SourceLocation(), /*AsValue=*/false);
1755  EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1756  auto *Res =
1757  EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1758  DesiredAddr.getPointer(),
1759  AO, Failure);
1760  CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1761  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1762 }
1763 
1764 void AtomicInfo::EmitAtomicUpdateOp(
1765  llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1766  bool IsVolatile) {
1767  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1768 
1769  // Do the atomic load.
1770  auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1771  // For non-simple lvalues perform compare-and-swap procedure.
1772  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1773  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1774  auto *CurBB = CGF.Builder.GetInsertBlock();
1775  CGF.EmitBlock(ContBB);
1776  llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1777  /*NumReservedValues=*/2);
1778  PHI->addIncoming(OldVal, CurBB);
1779  Address NewAtomicAddr = CreateTempAlloca();
1780  Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1781  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1782  requiresMemSetZero(getAtomicAddress().getElementType())) {
1783  CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1784  }
1785  auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
1786  SourceLocation(), /*AsValue=*/false);
1787  EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1788  auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1789  // Try to write new value using cmpxchg operation
1790  auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1791  PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1792  CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1793  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1794 }
1795 
1796 static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1797  RValue UpdateRVal, Address DesiredAddr) {
1798  LValue AtomicLVal = Atomics.getAtomicLValue();
1799  LValue DesiredLVal;
1800  // Build new lvalue for temp address
1801  if (AtomicLVal.isBitField()) {
1802  DesiredLVal =
1803  LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1804  AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1805  AtomicLVal.getTBAAInfo());
1806  } else if (AtomicLVal.isVectorElt()) {
1807  DesiredLVal =
1808  LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1809  AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1810  AtomicLVal.getTBAAInfo());
1811  } else {
1812  assert(AtomicLVal.isExtVectorElt());
1813  DesiredLVal = LValue::MakeExtVectorElt(
1814  DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1815  AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1816  }
1817  // Store new value in the corresponding memory area
1818  assert(UpdateRVal.isScalar());
1819  CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1820 }
1821 
1822 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1823  RValue UpdateRVal, bool IsVolatile) {
1824  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1825 
1826  Address ExpectedAddr = CreateTempAlloca();
1827 
1828  EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1829  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1830  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1831  CGF.EmitBlock(ContBB);
1832  Address DesiredAddr = CreateTempAlloca();
1833  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1834  requiresMemSetZero(getAtomicAddress().getElementType())) {
1835  auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1836  CGF.Builder.CreateStore(OldVal, DesiredAddr);
1837  }
1838  EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1839  auto *Res =
1840  EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1841  DesiredAddr.getPointer(),
1842  AO, Failure);
1843  CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1844  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1845 }
1846 
1847 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1848  bool IsVolatile) {
1849  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1850 
1851  // Do the atomic load.
1852  auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1853  // For non-simple lvalues perform compare-and-swap procedure.
1854  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1855  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1856  auto *CurBB = CGF.Builder.GetInsertBlock();
1857  CGF.EmitBlock(ContBB);
1858  llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1859  /*NumReservedValues=*/2);
1860  PHI->addIncoming(OldVal, CurBB);
1861  Address NewAtomicAddr = CreateTempAlloca();
1862  Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1863  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1864  requiresMemSetZero(getAtomicAddress().getElementType())) {
1865  CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1866  }
1867  EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
1868  auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1869  // Try to write new value using cmpxchg operation
1870  auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1871  PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1872  CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1873  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1874 }
1875 
1876 void AtomicInfo::EmitAtomicUpdate(
1877  llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1878  bool IsVolatile) {
1879  if (shouldUseLibcall()) {
1880  EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1881  } else {
1882  EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1883  }
1884 }
1885 
1886 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1887  bool IsVolatile) {
1888  if (shouldUseLibcall()) {
1889  EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1890  } else {
1891  EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1892  }
1893 }
1894 
1896  bool isInit) {
1897  bool IsVolatile = lvalue.isVolatileQualified();
1898  llvm::AtomicOrdering AO;
1899  if (lvalue.getType()->isAtomicType()) {
1900  AO = llvm::AtomicOrdering::SequentiallyConsistent;
1901  } else {
1902  AO = llvm::AtomicOrdering::Release;
1903  IsVolatile = true;
1904  }
1905  return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1906 }
1907 
1908 /// Emit a store to an l-value of atomic type.
1909 ///
1910 /// Note that the r-value is expected to be an r-value *of the atomic
1911 /// type*; this means that for aggregate r-values, it should include
1912 /// storage for any padding that was necessary.
1914  llvm::AtomicOrdering AO, bool IsVolatile,
1915  bool isInit) {
1916  // If this is an aggregate r-value, it should agree in type except
1917  // maybe for address-space qualification.
1918  assert(!rvalue.isAggregate() ||
1920  == dest.getAddress().getElementType());
1921 
1922  AtomicInfo atomics(*this, dest);
1923  LValue LVal = atomics.getAtomicLValue();
1924 
1925  // If this is an initialization, just put the value there normally.
1926  if (LVal.isSimple()) {
1927  if (isInit) {
1928  atomics.emitCopyIntoMemory(rvalue);
1929  return;
1930  }
1931 
1932  // Check whether we should use a library call.
1933  if (atomics.shouldUseLibcall()) {
1934  // Produce a source address.
1935  Address srcAddr = atomics.materializeRValue(rvalue);
1936 
1937  // void __atomic_store(size_t size, void *mem, void *val, int order)
1938  CallArgList args;
1939  args.add(RValue::get(atomics.getAtomicSizeValue()),
1940  getContext().getSizeType());
1941  args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
1942  getContext().VoidPtrTy);
1943  args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
1944  getContext().VoidPtrTy);
1945  args.add(
1946  RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
1947  getContext().IntTy);
1948  emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1949  return;
1950  }
1951 
1952  // Okay, we're doing this natively.
1953  llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
1954 
1955  // Do the atomic store.
1956  Address addr =
1957  atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
1958  intValue = Builder.CreateIntCast(
1959  intValue, addr.getElementType(), /*isSigned=*/false);
1960  llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1961 
1962  // Initializations don't need to be atomic.
1963  if (!isInit)
1964  store->setAtomic(AO);
1965 
1966  // Other decoration.
1967  if (IsVolatile)
1968  store->setVolatile(true);
1969  CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
1970  return;
1971  }
1972 
1973  // Emit simple atomic update operation.
1974  atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
1975 }
1976 
1977 /// Emit a compare-and-exchange op for atomic type.
1978 ///
1979 std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
1980  LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
1981  llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
1982  AggValueSlot Slot) {
1983  // If this is an aggregate r-value, it should agree in type except
1984  // maybe for address-space qualification.
1985  assert(!Expected.isAggregate() ||
1986  Expected.getAggregateAddress().getElementType() ==
1987  Obj.getAddress().getElementType());
1988  assert(!Desired.isAggregate() ||
1989  Desired.getAggregateAddress().getElementType() ==
1990  Obj.getAddress().getElementType());
1991  AtomicInfo Atomics(*this, Obj);
1992 
1993  return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
1994  IsWeak);
1995 }
1996 
1998  LValue LVal, llvm::AtomicOrdering AO,
1999  const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
2000  AtomicInfo Atomics(*this, LVal);
2001  Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2002 }
2003 
2005  AtomicInfo atomics(*this, dest);
2006 
2007  switch (atomics.getEvaluationKind()) {
2008  case TEK_Scalar: {
2009  llvm::Value *value = EmitScalarExpr(init);
2010  atomics.emitCopyIntoMemory(RValue::get(value));
2011  return;
2012  }
2013 
2014  case TEK_Complex: {
2015  ComplexPairTy value = EmitComplexExpr(init);
2016  atomics.emitCopyIntoMemory(RValue::getComplex(value));
2017  return;
2018  }
2019 
2020  case TEK_Aggregate: {
2021  // Fix up the destination if the initializer isn't an expression
2022  // of atomic type.
2023  bool Zeroed = false;
2024  if (!init->getType()->isAtomicType()) {
2025  Zeroed = atomics.emitMemSetZeroIfNecessary();
2026  dest = atomics.projectValue();
2027  }
2028 
2029  // Evaluate the expression directly into the destination.
2035  Zeroed ? AggValueSlot::IsZeroed :
2037 
2038  EmitAggExpr(init, slot);
2039  return;
2040  }
2041  }
2042  llvm_unreachable("bad evaluation kind");
2043 }
bool isAggregate() const
Definition: CGValue.h:54
const llvm::DataLayout & getDataLayout() const
llvm::Value * getVectorPointer() const
Definition: CGValue.h:338
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Definition: CGCall.h:361
Expr * getVal2() const
Definition: Expr.h:5446
Defines the clang::ASTContext interface.
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
Definition: CGAtomic.cpp:1979
llvm::IntegerType * IntTy
int
Address getAddress() const
Definition: CGValue.h:580
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2543
CanQualType VoidPtrTy
Definition: ASTContext.h:1043
A (possibly-)qualified type.
Definition: Type.h:642
llvm::Type * ConvertTypeForMem(QualType T)
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:139
llvm::LLVMContext & getLLVMContext()
void setAlignment(CharUnits A)
Definition: CGValue.h:317
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
Definition: Type.cpp:505
bool isVolatile() const
Definition: CGValue.h:301
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:1910
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:116
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
Definition: CGAtomic.cpp:1895
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, llvm::AtomicOrdering Order, llvm::SyncScope::ID Scope)
Definition: CGAtomic.cpp:493
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::Instruction **callOrInvoke, SourceLocation Loc)
EmitCall - Generate a call of the given function, expecting the given result type, and using the given argument list which specifies both the LLVM arguments and the types they were derived from.
Definition: CGCall.cpp:3796
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
const T * getAs() const
Member-template getAs<specific type>&#39;.
Definition: Type.h:6716
const void * Store
Store - This opaque type encapsulates an immutable mapping from locations to values.
Definition: StoreRef.h:28
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:380
Expr * getVal1() const
Definition: Expr.h:5436
llvm::Value * getPointer() const
Definition: Address.h:38
unsigned getAddressSpace() const
Return the address space that this address resides in.
Definition: Address.h:57
void add(RValue rvalue, QualType type)
Definition: CGCall.h:285
Address getAddress() const
Definition: CGValue.h:327
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:154
bool isVolatileQualified() const
Definition: CGValue.h:258
CharUnits getAlignment() const
Definition: CGValue.h:316
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
Definition: CGAtomic.cpp:1997
RValue EmitLoadOfExtVectorElementLValue(LValue V)
Definition: CGExpr.cpp:1834
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
unsigned Size
The total size of the bit-field, in bits.
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
Expr * getPtr() const
Definition: Expr.h:5426
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:67
static LValue MakeExtVectorElt(Address vecAddress, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:392
bool isComplex() const
Definition: CGValue.h:53
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:274
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:40
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, Address DesiredAddr)
Definition: CGAtomic.cpp:1681
unsigned Align
Definition: ASTContext.h:144
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitAtomicInit(Expr *E, LValue lvalue)
Definition: CGAtomic.cpp:2004
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
bool isSimple() const
Definition: CGValue.h:252
RValue EmitAtomicExpr(AtomicExpr *E)
Definition: CGAtomic.cpp:745
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
Definition: CGExpr.cpp:4826
bool isCmpXChg() const
Definition: Expr.h:5470
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:71
static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
Definition: CGAtomic.cpp:317
llvm::Constant * CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false)
Create a new runtime function with the specified type and name.
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
bool isVectorElt() const
Definition: CGValue.h:253
bool isValid() const
Definition: Address.h:36
Expr * getScope() const
Definition: Expr.h:5432
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
Definition: CGExpr.cpp:223
LValueBaseInfo getBaseInfo() const
Definition: CGValue.h:319
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:39
Address getExtVectorAddress() const
Definition: CGValue.h:342
static std::unique_ptr< AtomicScopeModel > getScopeModel(AtomicOp Op)
Get atomic scope model for the atomic op code.
Definition: Expr.h:5505
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:179
unsigned Offset
Definition: Format.cpp:1631
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
This represents one expression.
Definition: Expr.h:106
llvm::StringRef getAsString(SyncScope S)
Definition: SyncScope.h:51
static Address invalid()
Definition: Address.h:35
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::AtomicOrdering FailureOrder, llvm::SyncScope::ID Scope)
Definition: CGAtomic.cpp:359
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:66
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition: CGCall.h:134
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:44
Expr * getOrder() const
Definition: Expr.h:5429
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation...
Definition: CGExpr.cpp:1673
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
Definition: CGValue.h:410
llvm::LLVMContext & getLLVMContext()
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
Definition: CGCall.cpp:644
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], or an enum decl which has a signed representation.
Definition: Type.cpp:1844
QualType getType() const
Definition: Expr.h:128
bool isVolatile() const
Definition: Expr.h:5466
TBAAAccessInfo getTBAAInfo() const
Definition: CGValue.h:308
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:296
Represents a GCC generic vector type.
Definition: Type.h:3174
AtomicOp getOp() const
Definition: Expr.h:5458
llvm::Value * EmitCastToVoidPtr(llvm::Value *value)
Emit a cast to void* in the appropriate address space.
Definition: CGExpr.cpp:50
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
Definition: CGExpr.cpp:1802
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
Definition: CGAtomic.cpp:1471
The l-value was considered opaque, so the alignment was determined from a type.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:142
Encodes a location in the source.
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
Definition: CGAtomic.cpp:1458
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation...
Definition: CGExpr.cpp:1687
const CGBitFieldInfo & getBitFieldInfo() const
Definition: CGValue.h:359
An aggregate value slot.
Definition: CGValue.h:437
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size...
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load, __atomic_store, and __atomic_compare_exchange_*, for the similarly-named C++11 instructions, and __c11 variants for <stdatomic.h>, and corresponding __opencl_atomic_* for OpenCL 2.0.
Definition: Expr.h:5394
CanQualType VoidTy
Definition: ASTContext.h:1015
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *FailureOrderVal, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope)
Given an ordering required on success, emit all possible cmpxchg instructions to cope with the provid...
Definition: CGAtomic.cpp:409
An aligned address.
Definition: Address.h:25
QualType getType() const
Definition: CGValue.h:264
const TargetCodeGenInfo & getTargetHooks() const
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:216
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:59
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
Dataflow Directional Tag Classes.
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.h:5487
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:93
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored...
Definition: CGValue.h:499
Address CreateStructGEP(Address Addr, unsigned Index, CharUnits Offset, const llvm::Twine &Name="")
Definition: CGBuilder.h:172
bool isOpenCL() const
Definition: Expr.h:5479
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:70
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
bool isBitField() const
Definition: CGValue.h:254
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:108
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition: CGValue.h:540
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
llvm::Value * getExtVectorPointer() const
Definition: CGValue.h:345
Expr * getOrderFail() const
Definition: Expr.h:5442
bool isVolatileQualified() const
Definition: CGValue.h:56
static void AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args, bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy, SourceLocation Loc, CharUnits SizeInChars)
Definition: CGAtomic.cpp:721
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:52
bool isAtomicType() const
Definition: Type.h:6392
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:443
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
virtual llvm::SyncScope::ID getLLVMSyncScopeID(SyncScope S, llvm::LLVMContext &C) const
Get the syncscope used in LLVM IR.
Definition: TargetInfo.cpp:467
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition: Type.h:6138
llvm::Value * getBitFieldPointer() const
Definition: CGValue.h:358
true
A convenience builder class for complex constant initializers, especially for anonymous global struct...
bool isVoidType() const
Definition: Type.h:6530
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:6085
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition: CGExpr.cpp:1756
bool isGlobalReg() const
Definition: CGValue.h:256
uint64_t Width
Definition: ASTContext.h:143
CanQualType IntTy
Definition: ASTContext.h:1024
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
Definition: CGExprAgg.cpp:1823
static RValue get(llvm::Value *V)
Definition: CGValue.h:86
bool isPointerType() const
Definition: Type.h:6282
__DEVICE__ int min(int __a, int __b)
bool isExtVectorElt() const
Definition: CGValue.h:255
static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)
Definition: CGAtomic.cpp:304
static RValue getAggregate(Address addr, bool isVolatile=false)
Definition: CGValue.h:107
LValue - This represents an lvalue references.
Definition: CGValue.h:167
RValue asRValue() const
Definition: CGValue.h:604
CanQualType BoolTy
Definition: ASTContext.h:1016
QualType getValueType() const
Definition: Expr.cpp:4216
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:164
static LValue MakeAddr(Address address, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:367
llvm::Value * getVectorIdx() const
Definition: CGValue.h:339
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:260
static Address EmitValToTemp(CodeGenFunction &CGF, Expr *E)
Definition: CGAtomic.cpp:658
llvm::Value * getPointer() const
Definition: CGValue.h:323
bool isScalar() const
Definition: CGValue.h:52
Expr * getWeak() const
Definition: Expr.h:5452
llvm::Constant * getExtVectorElts() const
Definition: CGValue.h:349
Structure with information about how a bitfield should be accessed.
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth, signed/unsigned.
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1548