clang  10.0.0svn
CGAtomic.cpp
Go to the documentation of this file.
1 //===--- CGAtomic.cpp - Emit LLVM IR for atomic operations ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the code for emitting atomic operations.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGCall.h"
14 #include "CGRecordLayout.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/ASTContext.h"
21 #include "llvm/ADT/DenseMap.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/Intrinsics.h"
24 #include "llvm/IR/Operator.h"
25 
26 using namespace clang;
27 using namespace CodeGen;
28 
29 namespace {
30  class AtomicInfo {
31  CodeGenFunction &CGF;
32  QualType AtomicTy;
33  QualType ValueTy;
34  uint64_t AtomicSizeInBits;
35  uint64_t ValueSizeInBits;
36  CharUnits AtomicAlign;
37  CharUnits ValueAlign;
38  TypeEvaluationKind EvaluationKind;
39  bool UseLibcall;
40  LValue LVal;
41  CGBitFieldInfo BFI;
42  public:
43  AtomicInfo(CodeGenFunction &CGF, LValue &lvalue)
44  : CGF(CGF), AtomicSizeInBits(0), ValueSizeInBits(0),
45  EvaluationKind(TEK_Scalar), UseLibcall(true) {
46  assert(!lvalue.isGlobalReg());
47  ASTContext &C = CGF.getContext();
48  if (lvalue.isSimple()) {
49  AtomicTy = lvalue.getType();
50  if (auto *ATy = AtomicTy->getAs<AtomicType>())
51  ValueTy = ATy->getValueType();
52  else
53  ValueTy = AtomicTy;
54  EvaluationKind = CGF.getEvaluationKind(ValueTy);
55 
56  uint64_t ValueAlignInBits;
57  uint64_t AtomicAlignInBits;
58  TypeInfo ValueTI = C.getTypeInfo(ValueTy);
59  ValueSizeInBits = ValueTI.Width;
60  ValueAlignInBits = ValueTI.Align;
61 
62  TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
63  AtomicSizeInBits = AtomicTI.Width;
64  AtomicAlignInBits = AtomicTI.Align;
65 
66  assert(ValueSizeInBits <= AtomicSizeInBits);
67  assert(ValueAlignInBits <= AtomicAlignInBits);
68 
69  AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
70  ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
71  if (lvalue.getAlignment().isZero())
72  lvalue.setAlignment(AtomicAlign);
73 
74  LVal = lvalue;
75  } else if (lvalue.isBitField()) {
76  ValueTy = lvalue.getType();
77  ValueSizeInBits = C.getTypeSize(ValueTy);
78  auto &OrigBFI = lvalue.getBitFieldInfo();
79  auto Offset = OrigBFI.Offset % C.toBits(lvalue.getAlignment());
80  AtomicSizeInBits = C.toBits(
81  C.toCharUnitsFromBits(Offset + OrigBFI.Size + C.getCharWidth() - 1)
82  .alignTo(lvalue.getAlignment()));
83  auto VoidPtrAddr = CGF.EmitCastToVoidPtr(lvalue.getBitFieldPointer());
84  auto OffsetInChars =
85  (C.toCharUnitsFromBits(OrigBFI.Offset) / lvalue.getAlignment()) *
86  lvalue.getAlignment();
87  VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
88  VoidPtrAddr, OffsetInChars.getQuantity());
90  VoidPtrAddr,
91  CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
92  "atomic_bitfield_base");
93  BFI = OrigBFI;
94  BFI.Offset = Offset;
95  BFI.StorageSize = AtomicSizeInBits;
96  BFI.StorageOffset += OffsetInChars;
97  LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
98  BFI, lvalue.getType(), lvalue.getBaseInfo(),
99  lvalue.getTBAAInfo());
100  AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
101  if (AtomicTy.isNull()) {
102  llvm::APInt Size(
103  /*numBits=*/32,
104  C.toCharUnitsFromBits(AtomicSizeInBits).getQuantity());
105  AtomicTy =
106  C.getConstantArrayType(C.CharTy, Size, nullptr, ArrayType::Normal,
107  /*IndexTypeQuals=*/0);
108  }
109  AtomicAlign = ValueAlign = lvalue.getAlignment();
110  } else if (lvalue.isVectorElt()) {
111  ValueTy = lvalue.getType()->castAs<VectorType>()->getElementType();
112  ValueSizeInBits = C.getTypeSize(ValueTy);
113  AtomicTy = lvalue.getType();
114  AtomicSizeInBits = C.getTypeSize(AtomicTy);
115  AtomicAlign = ValueAlign = lvalue.getAlignment();
116  LVal = lvalue;
117  } else {
118  assert(lvalue.isExtVectorElt());
119  ValueTy = lvalue.getType();
120  ValueSizeInBits = C.getTypeSize(ValueTy);
121  AtomicTy = ValueTy = CGF.getContext().getExtVectorType(
122  lvalue.getType(), lvalue.getExtVectorAddress()
123  .getElementType()->getVectorNumElements());
124  AtomicSizeInBits = C.getTypeSize(AtomicTy);
125  AtomicAlign = ValueAlign = lvalue.getAlignment();
126  LVal = lvalue;
127  }
128  UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
129  AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
130  }
131 
132  QualType getAtomicType() const { return AtomicTy; }
133  QualType getValueType() const { return ValueTy; }
134  CharUnits getAtomicAlignment() const { return AtomicAlign; }
135  uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
136  uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
137  TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
138  bool shouldUseLibcall() const { return UseLibcall; }
139  const LValue &getAtomicLValue() const { return LVal; }
140  llvm::Value *getAtomicPointer() const {
141  if (LVal.isSimple())
142  return LVal.getPointer();
143  else if (LVal.isBitField())
144  return LVal.getBitFieldPointer();
145  else if (LVal.isVectorElt())
146  return LVal.getVectorPointer();
147  assert(LVal.isExtVectorElt());
148  return LVal.getExtVectorPointer();
149  }
150  Address getAtomicAddress() const {
151  return Address(getAtomicPointer(), getAtomicAlignment());
152  }
153 
154  Address getAtomicAddressAsAtomicIntPointer() const {
155  return emitCastToAtomicIntPointer(getAtomicAddress());
156  }
157 
158  /// Is the atomic size larger than the underlying value type?
159  ///
160  /// Note that the absence of padding does not mean that atomic
161  /// objects are completely interchangeable with non-atomic
162  /// objects: we might have promoted the alignment of a type
163  /// without making it bigger.
164  bool hasPadding() const {
165  return (ValueSizeInBits != AtomicSizeInBits);
166  }
167 
168  bool emitMemSetZeroIfNecessary() const;
169 
170  llvm::Value *getAtomicSizeValue() const {
171  CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
172  return CGF.CGM.getSize(size);
173  }
174 
175  /// Cast the given pointer to an integer pointer suitable for atomic
176  /// operations if the source.
177  Address emitCastToAtomicIntPointer(Address Addr) const;
178 
179  /// If Addr is compatible with the iN that will be used for an atomic
180  /// operation, bitcast it. Otherwise, create a temporary that is suitable
181  /// and copy the value across.
182  Address convertToAtomicIntPointer(Address Addr) const;
183 
184  /// Turn an atomic-layout object into an r-value.
185  RValue convertAtomicTempToRValue(Address addr, AggValueSlot resultSlot,
186  SourceLocation loc, bool AsValue) const;
187 
188  /// Converts a rvalue to integer value.
189  llvm::Value *convertRValueToInt(RValue RVal) const;
190 
191  RValue ConvertIntToValueOrAtomic(llvm::Value *IntVal,
192  AggValueSlot ResultSlot,
193  SourceLocation Loc, bool AsValue) const;
194 
195  /// Copy an atomic r-value into atomic-layout memory.
196  void emitCopyIntoMemory(RValue rvalue) const;
197 
198  /// Project an l-value down to the value field.
199  LValue projectValue() const {
200  assert(LVal.isSimple());
201  Address addr = getAtomicAddress();
202  if (hasPadding())
203  addr = CGF.Builder.CreateStructGEP(addr, 0);
204 
205  return LValue::MakeAddr(addr, getValueType(), CGF.getContext(),
206  LVal.getBaseInfo(), LVal.getTBAAInfo());
207  }
208 
209  /// Emits atomic load.
210  /// \returns Loaded value.
211  RValue EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
212  bool AsValue, llvm::AtomicOrdering AO,
213  bool IsVolatile);
214 
215  /// Emits atomic compare-and-exchange sequence.
216  /// \param Expected Expected value.
217  /// \param Desired Desired value.
218  /// \param Success Atomic ordering for success operation.
219  /// \param Failure Atomic ordering for failed operation.
220  /// \param IsWeak true if atomic operation is weak, false otherwise.
221  /// \returns Pair of values: previous value from storage (value type) and
222  /// boolean flag (i1 type) with true if success and false otherwise.
223  std::pair<RValue, llvm::Value *>
224  EmitAtomicCompareExchange(RValue Expected, RValue Desired,
225  llvm::AtomicOrdering Success =
226  llvm::AtomicOrdering::SequentiallyConsistent,
227  llvm::AtomicOrdering Failure =
228  llvm::AtomicOrdering::SequentiallyConsistent,
229  bool IsWeak = false);
230 
231  /// Emits atomic update.
232  /// \param AO Atomic ordering.
233  /// \param UpdateOp Update operation for the current lvalue.
234  void EmitAtomicUpdate(llvm::AtomicOrdering AO,
235  const llvm::function_ref<RValue(RValue)> &UpdateOp,
236  bool IsVolatile);
237  /// Emits atomic update.
238  /// \param AO Atomic ordering.
239  void EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
240  bool IsVolatile);
241 
242  /// Materialize an atomic r-value in atomic-layout memory.
243  Address materializeRValue(RValue rvalue) const;
244 
245  /// Creates temp alloca for intermediate operations on atomic value.
246  Address CreateTempAlloca() const;
247  private:
248  bool requiresMemSetZero(llvm::Type *type) const;
249 
250 
251  /// Emits atomic load as a libcall.
252  void EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
253  llvm::AtomicOrdering AO, bool IsVolatile);
254  /// Emits atomic load as LLVM instruction.
255  llvm::Value *EmitAtomicLoadOp(llvm::AtomicOrdering AO, bool IsVolatile);
256  /// Emits atomic compare-and-exchange op as a libcall.
257  llvm::Value *EmitAtomicCompareExchangeLibcall(
258  llvm::Value *ExpectedAddr, llvm::Value *DesiredAddr,
259  llvm::AtomicOrdering Success =
260  llvm::AtomicOrdering::SequentiallyConsistent,
261  llvm::AtomicOrdering Failure =
262  llvm::AtomicOrdering::SequentiallyConsistent);
263  /// Emits atomic compare-and-exchange op as LLVM instruction.
264  std::pair<llvm::Value *, llvm::Value *> EmitAtomicCompareExchangeOp(
265  llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
266  llvm::AtomicOrdering Success =
267  llvm::AtomicOrdering::SequentiallyConsistent,
268  llvm::AtomicOrdering Failure =
269  llvm::AtomicOrdering::SequentiallyConsistent,
270  bool IsWeak = false);
271  /// Emit atomic update as libcalls.
272  void
273  EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
274  const llvm::function_ref<RValue(RValue)> &UpdateOp,
275  bool IsVolatile);
276  /// Emit atomic update as LLVM instructions.
277  void EmitAtomicUpdateOp(llvm::AtomicOrdering AO,
278  const llvm::function_ref<RValue(RValue)> &UpdateOp,
279  bool IsVolatile);
280  /// Emit atomic update as libcalls.
281  void EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO, RValue UpdateRVal,
282  bool IsVolatile);
283  /// Emit atomic update as LLVM instructions.
284  void EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRal,
285  bool IsVolatile);
286  };
287 }
288 
289 Address AtomicInfo::CreateTempAlloca() const {
290  Address TempAlloca = CGF.CreateMemTemp(
291  (LVal.isBitField() && ValueSizeInBits > AtomicSizeInBits) ? ValueTy
292  : AtomicTy,
293  getAtomicAlignment(),
294  "atomic-temp");
295  // Cast to pointer to value type for bitfields.
296  if (LVal.isBitField())
298  TempAlloca, getAtomicAddress().getType());
299  return TempAlloca;
300 }
301 
303  StringRef fnName,
304  QualType resultType,
305  CallArgList &args) {
306  const CGFunctionInfo &fnInfo =
307  CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
308  llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
309  llvm::FunctionCallee fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
310  auto callee = CGCallee::forDirect(fn);
311  return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
312 }
313 
314 /// Does a store of the given IR type modify the full expected width?
316  uint64_t expectedSize) {
317  return (CGM.getDataLayout().getTypeStoreSize(type) * 8 == expectedSize);
318 }
319 
320 /// Does the atomic type require memsetting to zero before initialization?
321 ///
322 /// The IR type is provided as a way of making certain queries faster.
323 bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
324  // If the atomic type has size padding, we definitely need a memset.
325  if (hasPadding()) return true;
326 
327  // Otherwise, do some simple heuristics to try to avoid it:
328  switch (getEvaluationKind()) {
329  // For scalars and complexes, check whether the store size of the
330  // type uses the full size.
331  case TEK_Scalar:
332  return !isFullSizeType(CGF.CGM, type, AtomicSizeInBits);
333  case TEK_Complex:
334  return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
335  AtomicSizeInBits / 2);
336 
337  // Padding in structs has an undefined bit pattern. User beware.
338  case TEK_Aggregate:
339  return false;
340  }
341  llvm_unreachable("bad evaluation kind");
342 }
343 
344 bool AtomicInfo::emitMemSetZeroIfNecessary() const {
345  assert(LVal.isSimple());
346  llvm::Value *addr = LVal.getPointer();
347  if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
348  return false;
349 
350  CGF.Builder.CreateMemSet(
351  addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
352  CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits).getQuantity(),
353  LVal.getAlignment().getQuantity());
354  return true;
355 }
356 
357 static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak,
358  Address Dest, Address Ptr,
359  Address Val1, Address Val2,
360  uint64_t Size,
361  llvm::AtomicOrdering SuccessOrder,
362  llvm::AtomicOrdering FailureOrder,
364  // Note that cmpxchg doesn't support weak cmpxchg, at least at the moment.
365  llvm::Value *Expected = CGF.Builder.CreateLoad(Val1);
366  llvm::Value *Desired = CGF.Builder.CreateLoad(Val2);
367 
368  llvm::AtomicCmpXchgInst *Pair = CGF.Builder.CreateAtomicCmpXchg(
369  Ptr.getPointer(), Expected, Desired, SuccessOrder, FailureOrder,
370  Scope);
371  Pair->setVolatile(E->isVolatile());
372  Pair->setWeak(IsWeak);
373 
374  // Cmp holds the result of the compare-exchange operation: true on success,
375  // false on failure.
376  llvm::Value *Old = CGF.Builder.CreateExtractValue(Pair, 0);
377  llvm::Value *Cmp = CGF.Builder.CreateExtractValue(Pair, 1);
378 
379  // This basic block is used to hold the store instruction if the operation
380  // failed.
381  llvm::BasicBlock *StoreExpectedBB =
382  CGF.createBasicBlock("cmpxchg.store_expected", CGF.CurFn);
383 
384  // This basic block is the exit point of the operation, we should end up
385  // here regardless of whether or not the operation succeeded.
386  llvm::BasicBlock *ContinueBB =
387  CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
388 
389  // Update Expected if Expected isn't equal to Old, otherwise branch to the
390  // exit point.
391  CGF.Builder.CreateCondBr(Cmp, ContinueBB, StoreExpectedBB);
392 
393  CGF.Builder.SetInsertPoint(StoreExpectedBB);
394  // Update the memory at Expected with Old's value.
395  CGF.Builder.CreateStore(Old, Val1);
396  // Finally, branch to the exit point.
397  CGF.Builder.CreateBr(ContinueBB);
398 
399  CGF.Builder.SetInsertPoint(ContinueBB);
400  // Update the memory at Dest with Cmp's value.
401  CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
402 }
403 
404 /// Given an ordering required on success, emit all possible cmpxchg
405 /// instructions to cope with the provided (but possibly only dynamically known)
406 /// FailureOrder.
408  bool IsWeak, Address Dest, Address Ptr,
409  Address Val1, Address Val2,
410  llvm::Value *FailureOrderVal,
411  uint64_t Size,
412  llvm::AtomicOrdering SuccessOrder,
414  llvm::AtomicOrdering FailureOrder;
415  if (llvm::ConstantInt *FO = dyn_cast<llvm::ConstantInt>(FailureOrderVal)) {
416  auto FOS = FO->getSExtValue();
417  if (!llvm::isValidAtomicOrderingCABI(FOS))
418  FailureOrder = llvm::AtomicOrdering::Monotonic;
419  else
420  switch ((llvm::AtomicOrderingCABI)FOS) {
421  case llvm::AtomicOrderingCABI::relaxed:
422  case llvm::AtomicOrderingCABI::release:
423  case llvm::AtomicOrderingCABI::acq_rel:
424  FailureOrder = llvm::AtomicOrdering::Monotonic;
425  break;
426  case llvm::AtomicOrderingCABI::consume:
427  case llvm::AtomicOrderingCABI::acquire:
428  FailureOrder = llvm::AtomicOrdering::Acquire;
429  break;
430  case llvm::AtomicOrderingCABI::seq_cst:
431  FailureOrder = llvm::AtomicOrdering::SequentiallyConsistent;
432  break;
433  }
434  if (isStrongerThan(FailureOrder, SuccessOrder)) {
435  // Don't assert on undefined behavior "failure argument shall be no
436  // stronger than the success argument".
437  FailureOrder =
438  llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrder);
439  }
440  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
441  FailureOrder, Scope);
442  return;
443  }
444 
445  // Create all the relevant BB's
446  llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
447  *SeqCstBB = nullptr;
448  MonotonicBB = CGF.createBasicBlock("monotonic_fail", CGF.CurFn);
449  if (SuccessOrder != llvm::AtomicOrdering::Monotonic &&
450  SuccessOrder != llvm::AtomicOrdering::Release)
451  AcquireBB = CGF.createBasicBlock("acquire_fail", CGF.CurFn);
452  if (SuccessOrder == llvm::AtomicOrdering::SequentiallyConsistent)
453  SeqCstBB = CGF.createBasicBlock("seqcst_fail", CGF.CurFn);
454 
455  llvm::BasicBlock *ContBB = CGF.createBasicBlock("atomic.continue", CGF.CurFn);
456 
457  llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(FailureOrderVal, MonotonicBB);
458 
459  // Emit all the different atomics
460 
461  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
462  // doesn't matter unless someone is crazy enough to use something that
463  // doesn't fold to a constant for the ordering.
464  CGF.Builder.SetInsertPoint(MonotonicBB);
465  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
466  Size, SuccessOrder, llvm::AtomicOrdering::Monotonic, Scope);
467  CGF.Builder.CreateBr(ContBB);
468 
469  if (AcquireBB) {
470  CGF.Builder.SetInsertPoint(AcquireBB);
471  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2,
472  Size, SuccessOrder, llvm::AtomicOrdering::Acquire, Scope);
473  CGF.Builder.CreateBr(ContBB);
474  SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
475  AcquireBB);
476  SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
477  AcquireBB);
478  }
479  if (SeqCstBB) {
480  CGF.Builder.SetInsertPoint(SeqCstBB);
481  emitAtomicCmpXchg(CGF, E, IsWeak, Dest, Ptr, Val1, Val2, Size, SuccessOrder,
482  llvm::AtomicOrdering::SequentiallyConsistent, Scope);
483  CGF.Builder.CreateBr(ContBB);
484  SI->addCase(CGF.Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
485  SeqCstBB);
486  }
487 
488  CGF.Builder.SetInsertPoint(ContBB);
489 }
490 
491 static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest,
492  Address Ptr, Address Val1, Address Val2,
493  llvm::Value *IsWeak, llvm::Value *FailureOrder,
494  uint64_t Size, llvm::AtomicOrdering Order,
496  llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
497  llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
498 
499  switch (E->getOp()) {
500  case AtomicExpr::AO__c11_atomic_init:
501  case AtomicExpr::AO__opencl_atomic_init:
502  llvm_unreachable("Already handled!");
503 
504  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
505  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
506  emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
507  FailureOrder, Size, Order, Scope);
508  return;
509  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
510  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
511  emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
512  FailureOrder, Size, Order, Scope);
513  return;
514  case AtomicExpr::AO__atomic_compare_exchange:
515  case AtomicExpr::AO__atomic_compare_exchange_n: {
516  if (llvm::ConstantInt *IsWeakC = dyn_cast<llvm::ConstantInt>(IsWeak)) {
517  emitAtomicCmpXchgFailureSet(CGF, E, IsWeakC->getZExtValue(), Dest, Ptr,
518  Val1, Val2, FailureOrder, Size, Order, Scope);
519  } else {
520  // Create all the relevant BB's
521  llvm::BasicBlock *StrongBB =
522  CGF.createBasicBlock("cmpxchg.strong", CGF.CurFn);
523  llvm::BasicBlock *WeakBB = CGF.createBasicBlock("cmxchg.weak", CGF.CurFn);
524  llvm::BasicBlock *ContBB =
525  CGF.createBasicBlock("cmpxchg.continue", CGF.CurFn);
526 
527  llvm::SwitchInst *SI = CGF.Builder.CreateSwitch(IsWeak, WeakBB);
528  SI->addCase(CGF.Builder.getInt1(false), StrongBB);
529 
530  CGF.Builder.SetInsertPoint(StrongBB);
531  emitAtomicCmpXchgFailureSet(CGF, E, false, Dest, Ptr, Val1, Val2,
532  FailureOrder, Size, Order, Scope);
533  CGF.Builder.CreateBr(ContBB);
534 
535  CGF.Builder.SetInsertPoint(WeakBB);
536  emitAtomicCmpXchgFailureSet(CGF, E, true, Dest, Ptr, Val1, Val2,
537  FailureOrder, Size, Order, Scope);
538  CGF.Builder.CreateBr(ContBB);
539 
540  CGF.Builder.SetInsertPoint(ContBB);
541  }
542  return;
543  }
544  case AtomicExpr::AO__c11_atomic_load:
545  case AtomicExpr::AO__opencl_atomic_load:
546  case AtomicExpr::AO__atomic_load_n:
547  case AtomicExpr::AO__atomic_load: {
548  llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
549  Load->setAtomic(Order, Scope);
550  Load->setVolatile(E->isVolatile());
551  CGF.Builder.CreateStore(Load, Dest);
552  return;
553  }
554 
555  case AtomicExpr::AO__c11_atomic_store:
556  case AtomicExpr::AO__opencl_atomic_store:
557  case AtomicExpr::AO__atomic_store:
558  case AtomicExpr::AO__atomic_store_n: {
559  llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
560  llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
561  Store->setAtomic(Order, Scope);
562  Store->setVolatile(E->isVolatile());
563  return;
564  }
565 
566  case AtomicExpr::AO__c11_atomic_exchange:
567  case AtomicExpr::AO__opencl_atomic_exchange:
568  case AtomicExpr::AO__atomic_exchange_n:
569  case AtomicExpr::AO__atomic_exchange:
570  Op = llvm::AtomicRMWInst::Xchg;
571  break;
572 
573  case AtomicExpr::AO__atomic_add_fetch:
574  PostOp = llvm::Instruction::Add;
575  LLVM_FALLTHROUGH;
576  case AtomicExpr::AO__c11_atomic_fetch_add:
577  case AtomicExpr::AO__opencl_atomic_fetch_add:
578  case AtomicExpr::AO__atomic_fetch_add:
580  break;
581 
582  case AtomicExpr::AO__atomic_sub_fetch:
583  PostOp = llvm::Instruction::Sub;
584  LLVM_FALLTHROUGH;
585  case AtomicExpr::AO__c11_atomic_fetch_sub:
586  case AtomicExpr::AO__opencl_atomic_fetch_sub:
587  case AtomicExpr::AO__atomic_fetch_sub:
589  break;
590 
591  case AtomicExpr::AO__opencl_atomic_fetch_min:
592  case AtomicExpr::AO__atomic_fetch_min:
593  Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Min
594  : llvm::AtomicRMWInst::UMin;
595  break;
596 
597  case AtomicExpr::AO__opencl_atomic_fetch_max:
598  case AtomicExpr::AO__atomic_fetch_max:
599  Op = E->getValueType()->isSignedIntegerType() ? llvm::AtomicRMWInst::Max
600  : llvm::AtomicRMWInst::UMax;
601  break;
602 
603  case AtomicExpr::AO__atomic_and_fetch:
604  PostOp = llvm::Instruction::And;
605  LLVM_FALLTHROUGH;
606  case AtomicExpr::AO__c11_atomic_fetch_and:
607  case AtomicExpr::AO__opencl_atomic_fetch_and:
608  case AtomicExpr::AO__atomic_fetch_and:
610  break;
611 
612  case AtomicExpr::AO__atomic_or_fetch:
613  PostOp = llvm::Instruction::Or;
614  LLVM_FALLTHROUGH;
615  case AtomicExpr::AO__c11_atomic_fetch_or:
616  case AtomicExpr::AO__opencl_atomic_fetch_or:
617  case AtomicExpr::AO__atomic_fetch_or:
618  Op = llvm::AtomicRMWInst::Or;
619  break;
620 
621  case AtomicExpr::AO__atomic_xor_fetch:
622  PostOp = llvm::Instruction::Xor;
623  LLVM_FALLTHROUGH;
624  case AtomicExpr::AO__c11_atomic_fetch_xor:
625  case AtomicExpr::AO__opencl_atomic_fetch_xor:
626  case AtomicExpr::AO__atomic_fetch_xor:
627  Op = llvm::AtomicRMWInst::Xor;
628  break;
629 
630  case AtomicExpr::AO__atomic_nand_fetch:
631  PostOp = llvm::Instruction::And; // the NOT is special cased below
632  LLVM_FALLTHROUGH;
633  case AtomicExpr::AO__atomic_fetch_nand:
634  Op = llvm::AtomicRMWInst::Nand;
635  break;
636  }
637 
638  llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1);
639  llvm::AtomicRMWInst *RMWI =
640  CGF.Builder.CreateAtomicRMW(Op, Ptr.getPointer(), LoadVal1, Order, Scope);
641  RMWI->setVolatile(E->isVolatile());
642 
643  // For __atomic_*_fetch operations, perform the operation again to
644  // determine the value which was written.
645  llvm::Value *Result = RMWI;
646  if (PostOp)
647  Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
648  if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
649  Result = CGF.Builder.CreateNot(Result);
650  CGF.Builder.CreateStore(Result, Dest);
651 }
652 
653 // This function emits any expression (scalar, complex, or aggregate)
654 // into a temporary alloca.
655 static Address
657  Address DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
658  CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
659  /*Init*/ true);
660  return DeclPtr;
661 }
662 
664  Address Ptr, Address Val1, Address Val2,
665  llvm::Value *IsWeak, llvm::Value *FailureOrder,
666  uint64_t Size, llvm::AtomicOrdering Order,
667  llvm::Value *Scope) {
668  auto ScopeModel = Expr->getScopeModel();
669 
670  // LLVM atomic instructions always have synch scope. If clang atomic
671  // expression has no scope operand, use default LLVM synch scope.
672  if (!ScopeModel) {
673  EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
674  Order, CGF.CGM.getLLVMContext().getOrInsertSyncScopeID(""));
675  return;
676  }
677 
678  // Handle constant scope.
679  if (auto SC = dyn_cast<llvm::ConstantInt>(Scope)) {
680  auto SCID = CGF.getTargetHooks().getLLVMSyncScopeID(
681  CGF.CGM.getLangOpts(), ScopeModel->map(SC->getZExtValue()),
682  Order, CGF.CGM.getLLVMContext());
683  EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
684  Order, SCID);
685  return;
686  }
687 
688  // Handle non-constant scope.
689  auto &Builder = CGF.Builder;
690  auto Scopes = ScopeModel->getRuntimeValues();
691  llvm::DenseMap<unsigned, llvm::BasicBlock *> BB;
692  for (auto S : Scopes)
693  BB[S] = CGF.createBasicBlock(getAsString(ScopeModel->map(S)), CGF.CurFn);
694 
695  llvm::BasicBlock *ContBB =
696  CGF.createBasicBlock("atomic.scope.continue", CGF.CurFn);
697 
698  auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
699  // If unsupported synch scope is encountered at run time, assume a fallback
700  // synch scope value.
701  auto FallBack = ScopeModel->getFallBackValue();
702  llvm::SwitchInst *SI = Builder.CreateSwitch(SC, BB[FallBack]);
703  for (auto S : Scopes) {
704  auto *B = BB[S];
705  if (S != FallBack)
706  SI->addCase(Builder.getInt32(S), B);
707 
708  Builder.SetInsertPoint(B);
709  EmitAtomicOp(CGF, Expr, Dest, Ptr, Val1, Val2, IsWeak, FailureOrder, Size,
710  Order,
712  ScopeModel->map(S),
713  Order,
714  CGF.getLLVMContext()));
715  Builder.CreateBr(ContBB);
716  }
717 
718  Builder.SetInsertPoint(ContBB);
719 }
720 
721 static void
723  bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
724  SourceLocation Loc, CharUnits SizeInChars) {
725  if (UseOptimizedLibcall) {
726  // Load value and pass it to the function directly.
727  CharUnits Align = CGF.getContext().getTypeAlignInChars(ValTy);
728  int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
729  ValTy =
730  CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
731  llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
732  SizeInBits)->getPointerTo();
733  Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
734  Val = CGF.EmitLoadOfScalar(Ptr, false,
735  CGF.getContext().getPointerType(ValTy),
736  Loc);
737  // Coerce the value into an appropriately sized integer type.
738  Args.add(RValue::get(Val), ValTy);
739  } else {
740  // Non-optimized functions always take a reference.
741  Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
742  CGF.getContext().VoidPtrTy);
743  }
744 }
745 
747  QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
748  QualType MemTy = AtomicTy;
749  if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
750  MemTy = AT->getValueType();
751  llvm::Value *IsWeak = nullptr, *OrderFail = nullptr;
752 
753  Address Val1 = Address::invalid();
754  Address Val2 = Address::invalid();
755  Address Dest = Address::invalid();
756  Address Ptr = EmitPointerWithAlignment(E->getPtr());
757 
758  if (E->getOp() == AtomicExpr::AO__c11_atomic_init ||
759  E->getOp() == AtomicExpr::AO__opencl_atomic_init) {
760  LValue lvalue = MakeAddrLValue(Ptr, AtomicTy);
761  EmitAtomicInit(E->getVal1(), lvalue);
762  return RValue::get(nullptr);
763  }
764 
765  CharUnits sizeChars, alignChars;
766  std::tie(sizeChars, alignChars) = getContext().getTypeInfoInChars(AtomicTy);
767  uint64_t Size = sizeChars.getQuantity();
768  unsigned MaxInlineWidthInBits = getTarget().getMaxAtomicInlineWidth();
769 
770  bool Oversized = getContext().toBits(sizeChars) > MaxInlineWidthInBits;
771  bool Misaligned = (Ptr.getAlignment() % sizeChars) != 0;
772  bool UseLibcall = Misaligned | Oversized;
773 
774  if (UseLibcall) {
775  CGM.getDiags().Report(E->getBeginLoc(), diag::warn_atomic_op_misaligned)
776  << !Oversized;
777  }
778 
779  llvm::Value *Order = EmitScalarExpr(E->getOrder());
780  llvm::Value *Scope =
781  E->getScopeModel() ? EmitScalarExpr(E->getScope()) : nullptr;
782 
783  switch (E->getOp()) {
784  case AtomicExpr::AO__c11_atomic_init:
785  case AtomicExpr::AO__opencl_atomic_init:
786  llvm_unreachable("Already handled above with EmitAtomicInit!");
787 
788  case AtomicExpr::AO__c11_atomic_load:
789  case AtomicExpr::AO__opencl_atomic_load:
790  case AtomicExpr::AO__atomic_load_n:
791  break;
792 
793  case AtomicExpr::AO__atomic_load:
794  Dest = EmitPointerWithAlignment(E->getVal1());
795  break;
796 
797  case AtomicExpr::AO__atomic_store:
798  Val1 = EmitPointerWithAlignment(E->getVal1());
799  break;
800 
801  case AtomicExpr::AO__atomic_exchange:
802  Val1 = EmitPointerWithAlignment(E->getVal1());
803  Dest = EmitPointerWithAlignment(E->getVal2());
804  break;
805 
806  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
807  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
808  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
809  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
810  case AtomicExpr::AO__atomic_compare_exchange_n:
811  case AtomicExpr::AO__atomic_compare_exchange:
812  Val1 = EmitPointerWithAlignment(E->getVal1());
813  if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
814  Val2 = EmitPointerWithAlignment(E->getVal2());
815  else
816  Val2 = EmitValToTemp(*this, E->getVal2());
817  OrderFail = EmitScalarExpr(E->getOrderFail());
818  if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
819  E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
820  IsWeak = EmitScalarExpr(E->getWeak());
821  break;
822 
823  case AtomicExpr::AO__c11_atomic_fetch_add:
824  case AtomicExpr::AO__c11_atomic_fetch_sub:
825  case AtomicExpr::AO__opencl_atomic_fetch_add:
826  case AtomicExpr::AO__opencl_atomic_fetch_sub:
827  if (MemTy->isPointerType()) {
828  // For pointer arithmetic, we're required to do a bit of math:
829  // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
830  // ... but only for the C11 builtins. The GNU builtins expect the
831  // user to multiply by sizeof(T).
832  QualType Val1Ty = E->getVal1()->getType();
833  llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
834  CharUnits PointeeIncAmt =
835  getContext().getTypeSizeInChars(MemTy->getPointeeType());
836  Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
837  auto Temp = CreateMemTemp(Val1Ty, ".atomictmp");
838  Val1 = Temp;
839  EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Temp, Val1Ty));
840  break;
841  }
842  LLVM_FALLTHROUGH;
843  case AtomicExpr::AO__atomic_fetch_add:
844  case AtomicExpr::AO__atomic_fetch_sub:
845  case AtomicExpr::AO__atomic_add_fetch:
846  case AtomicExpr::AO__atomic_sub_fetch:
847  case AtomicExpr::AO__c11_atomic_store:
848  case AtomicExpr::AO__c11_atomic_exchange:
849  case AtomicExpr::AO__opencl_atomic_store:
850  case AtomicExpr::AO__opencl_atomic_exchange:
851  case AtomicExpr::AO__atomic_store_n:
852  case AtomicExpr::AO__atomic_exchange_n:
853  case AtomicExpr::AO__c11_atomic_fetch_and:
854  case AtomicExpr::AO__c11_atomic_fetch_or:
855  case AtomicExpr::AO__c11_atomic_fetch_xor:
856  case AtomicExpr::AO__opencl_atomic_fetch_and:
857  case AtomicExpr::AO__opencl_atomic_fetch_or:
858  case AtomicExpr::AO__opencl_atomic_fetch_xor:
859  case AtomicExpr::AO__opencl_atomic_fetch_min:
860  case AtomicExpr::AO__opencl_atomic_fetch_max:
861  case AtomicExpr::AO__atomic_fetch_and:
862  case AtomicExpr::AO__atomic_fetch_or:
863  case AtomicExpr::AO__atomic_fetch_xor:
864  case AtomicExpr::AO__atomic_fetch_nand:
865  case AtomicExpr::AO__atomic_and_fetch:
866  case AtomicExpr::AO__atomic_or_fetch:
867  case AtomicExpr::AO__atomic_xor_fetch:
868  case AtomicExpr::AO__atomic_nand_fetch:
869  case AtomicExpr::AO__atomic_fetch_min:
870  case AtomicExpr::AO__atomic_fetch_max:
871  Val1 = EmitValToTemp(*this, E->getVal1());
872  break;
873  }
874 
875  QualType RValTy = E->getType().getUnqualifiedType();
876 
877  // The inlined atomics only function on iN types, where N is a power of 2. We
878  // need to make sure (via temporaries if necessary) that all incoming values
879  // are compatible.
880  LValue AtomicVal = MakeAddrLValue(Ptr, AtomicTy);
881  AtomicInfo Atomics(*this, AtomicVal);
882 
883  Ptr = Atomics.emitCastToAtomicIntPointer(Ptr);
884  if (Val1.isValid()) Val1 = Atomics.convertToAtomicIntPointer(Val1);
885  if (Val2.isValid()) Val2 = Atomics.convertToAtomicIntPointer(Val2);
886  if (Dest.isValid())
887  Dest = Atomics.emitCastToAtomicIntPointer(Dest);
888  else if (E->isCmpXChg())
889  Dest = CreateMemTemp(RValTy, "cmpxchg.bool");
890  else if (!RValTy->isVoidType())
891  Dest = Atomics.emitCastToAtomicIntPointer(Atomics.CreateTempAlloca());
892 
893  // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
894  if (UseLibcall) {
895  bool UseOptimizedLibcall = false;
896  switch (E->getOp()) {
897  case AtomicExpr::AO__c11_atomic_init:
898  case AtomicExpr::AO__opencl_atomic_init:
899  llvm_unreachable("Already handled above with EmitAtomicInit!");
900 
901  case AtomicExpr::AO__c11_atomic_fetch_add:
902  case AtomicExpr::AO__opencl_atomic_fetch_add:
903  case AtomicExpr::AO__atomic_fetch_add:
904  case AtomicExpr::AO__c11_atomic_fetch_and:
905  case AtomicExpr::AO__opencl_atomic_fetch_and:
906  case AtomicExpr::AO__atomic_fetch_and:
907  case AtomicExpr::AO__c11_atomic_fetch_or:
908  case AtomicExpr::AO__opencl_atomic_fetch_or:
909  case AtomicExpr::AO__atomic_fetch_or:
910  case AtomicExpr::AO__atomic_fetch_nand:
911  case AtomicExpr::AO__c11_atomic_fetch_sub:
912  case AtomicExpr::AO__opencl_atomic_fetch_sub:
913  case AtomicExpr::AO__atomic_fetch_sub:
914  case AtomicExpr::AO__c11_atomic_fetch_xor:
915  case AtomicExpr::AO__opencl_atomic_fetch_xor:
916  case AtomicExpr::AO__opencl_atomic_fetch_min:
917  case AtomicExpr::AO__opencl_atomic_fetch_max:
918  case AtomicExpr::AO__atomic_fetch_xor:
919  case AtomicExpr::AO__atomic_add_fetch:
920  case AtomicExpr::AO__atomic_and_fetch:
921  case AtomicExpr::AO__atomic_nand_fetch:
922  case AtomicExpr::AO__atomic_or_fetch:
923  case AtomicExpr::AO__atomic_sub_fetch:
924  case AtomicExpr::AO__atomic_xor_fetch:
925  case AtomicExpr::AO__atomic_fetch_min:
926  case AtomicExpr::AO__atomic_fetch_max:
927  // For these, only library calls for certain sizes exist.
928  UseOptimizedLibcall = true;
929  break;
930 
931  case AtomicExpr::AO__atomic_load:
932  case AtomicExpr::AO__atomic_store:
933  case AtomicExpr::AO__atomic_exchange:
934  case AtomicExpr::AO__atomic_compare_exchange:
935  // Use the generic version if we don't know that the operand will be
936  // suitably aligned for the optimized version.
937  if (Misaligned)
938  break;
939  LLVM_FALLTHROUGH;
940  case AtomicExpr::AO__c11_atomic_load:
941  case AtomicExpr::AO__c11_atomic_store:
942  case AtomicExpr::AO__c11_atomic_exchange:
943  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
944  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
945  case AtomicExpr::AO__opencl_atomic_load:
946  case AtomicExpr::AO__opencl_atomic_store:
947  case AtomicExpr::AO__opencl_atomic_exchange:
948  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
949  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
950  case AtomicExpr::AO__atomic_load_n:
951  case AtomicExpr::AO__atomic_store_n:
952  case AtomicExpr::AO__atomic_exchange_n:
953  case AtomicExpr::AO__atomic_compare_exchange_n:
954  // Only use optimized library calls for sizes for which they exist.
955  // FIXME: Size == 16 optimized library functions exist too.
956  if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
957  UseOptimizedLibcall = true;
958  break;
959  }
960 
961  CallArgList Args;
962  if (!UseOptimizedLibcall) {
963  // For non-optimized library calls, the size is the first parameter
964  Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
965  getContext().getSizeType());
966  }
967  // Atomic address is the first or second parameter
968  // The OpenCL atomic library functions only accept pointer arguments to
969  // generic address space.
970  auto CastToGenericAddrSpace = [&](llvm::Value *V, QualType PT) {
971  if (!E->isOpenCL())
972  return V;
973  auto AS = PT->castAs<PointerType>()->getPointeeType().getAddressSpace();
974  if (AS == LangAS::opencl_generic)
975  return V;
976  auto DestAS = getContext().getTargetAddressSpace(LangAS::opencl_generic);
977  auto T = V->getType();
978  auto *DestType = T->getPointerElementType()->getPointerTo(DestAS);
979 
980  return getTargetHooks().performAddrSpaceCast(
981  *this, V, AS, LangAS::opencl_generic, DestType, false);
982  };
983 
984  Args.add(RValue::get(CastToGenericAddrSpace(
985  EmitCastToVoidPtr(Ptr.getPointer()), E->getPtr()->getType())),
986  getContext().VoidPtrTy);
987 
988  std::string LibCallName;
989  QualType LoweredMemTy =
990  MemTy->isPointerType() ? getContext().getIntPtrType() : MemTy;
991  QualType RetTy;
992  bool HaveRetTy = false;
993  llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
994  switch (E->getOp()) {
995  case AtomicExpr::AO__c11_atomic_init:
996  case AtomicExpr::AO__opencl_atomic_init:
997  llvm_unreachable("Already handled!");
998 
999  // There is only one libcall for compare an exchange, because there is no
1000  // optimisation benefit possible from a libcall version of a weak compare
1001  // and exchange.
1002  // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
1003  // void *desired, int success, int failure)
1004  // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
1005  // int success, int failure)
1006  case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1007  case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1008  case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
1009  case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
1010  case AtomicExpr::AO__atomic_compare_exchange:
1011  case AtomicExpr::AO__atomic_compare_exchange_n:
1012  LibCallName = "__atomic_compare_exchange";
1013  RetTy = getContext().BoolTy;
1014  HaveRetTy = true;
1015  Args.add(
1016  RValue::get(CastToGenericAddrSpace(
1017  EmitCastToVoidPtr(Val1.getPointer()), E->getVal1()->getType())),
1018  getContext().VoidPtrTy);
1019  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2.getPointer(),
1020  MemTy, E->getExprLoc(), sizeChars);
1021  Args.add(RValue::get(Order), getContext().IntTy);
1022  Order = OrderFail;
1023  break;
1024  // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
1025  // int order)
1026  // T __atomic_exchange_N(T *mem, T val, int order)
1027  case AtomicExpr::AO__c11_atomic_exchange:
1028  case AtomicExpr::AO__opencl_atomic_exchange:
1029  case AtomicExpr::AO__atomic_exchange_n:
1030  case AtomicExpr::AO__atomic_exchange:
1031  LibCallName = "__atomic_exchange";
1032  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1033  MemTy, E->getExprLoc(), sizeChars);
1034  break;
1035  // void __atomic_store(size_t size, void *mem, void *val, int order)
1036  // void __atomic_store_N(T *mem, T val, int order)
1037  case AtomicExpr::AO__c11_atomic_store:
1038  case AtomicExpr::AO__opencl_atomic_store:
1039  case AtomicExpr::AO__atomic_store:
1040  case AtomicExpr::AO__atomic_store_n:
1041  LibCallName = "__atomic_store";
1042  RetTy = getContext().VoidTy;
1043  HaveRetTy = true;
1044  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1045  MemTy, E->getExprLoc(), sizeChars);
1046  break;
1047  // void __atomic_load(size_t size, void *mem, void *return, int order)
1048  // T __atomic_load_N(T *mem, int order)
1049  case AtomicExpr::AO__c11_atomic_load:
1050  case AtomicExpr::AO__opencl_atomic_load:
1051  case AtomicExpr::AO__atomic_load:
1052  case AtomicExpr::AO__atomic_load_n:
1053  LibCallName = "__atomic_load";
1054  break;
1055  // T __atomic_add_fetch_N(T *mem, T val, int order)
1056  // T __atomic_fetch_add_N(T *mem, T val, int order)
1057  case AtomicExpr::AO__atomic_add_fetch:
1058  PostOp = llvm::Instruction::Add;
1059  LLVM_FALLTHROUGH;
1060  case AtomicExpr::AO__c11_atomic_fetch_add:
1061  case AtomicExpr::AO__opencl_atomic_fetch_add:
1062  case AtomicExpr::AO__atomic_fetch_add:
1063  LibCallName = "__atomic_fetch_add";
1064  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1065  LoweredMemTy, E->getExprLoc(), sizeChars);
1066  break;
1067  // T __atomic_and_fetch_N(T *mem, T val, int order)
1068  // T __atomic_fetch_and_N(T *mem, T val, int order)
1069  case AtomicExpr::AO__atomic_and_fetch:
1070  PostOp = llvm::Instruction::And;
1071  LLVM_FALLTHROUGH;
1072  case AtomicExpr::AO__c11_atomic_fetch_and:
1073  case AtomicExpr::AO__opencl_atomic_fetch_and:
1074  case AtomicExpr::AO__atomic_fetch_and:
1075  LibCallName = "__atomic_fetch_and";
1076  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1077  MemTy, E->getExprLoc(), sizeChars);
1078  break;
1079  // T __atomic_or_fetch_N(T *mem, T val, int order)
1080  // T __atomic_fetch_or_N(T *mem, T val, int order)
1081  case AtomicExpr::AO__atomic_or_fetch:
1082  PostOp = llvm::Instruction::Or;
1083  LLVM_FALLTHROUGH;
1084  case AtomicExpr::AO__c11_atomic_fetch_or:
1085  case AtomicExpr::AO__opencl_atomic_fetch_or:
1086  case AtomicExpr::AO__atomic_fetch_or:
1087  LibCallName = "__atomic_fetch_or";
1088  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1089  MemTy, E->getExprLoc(), sizeChars);
1090  break;
1091  // T __atomic_sub_fetch_N(T *mem, T val, int order)
1092  // T __atomic_fetch_sub_N(T *mem, T val, int order)
1093  case AtomicExpr::AO__atomic_sub_fetch:
1094  PostOp = llvm::Instruction::Sub;
1095  LLVM_FALLTHROUGH;
1096  case AtomicExpr::AO__c11_atomic_fetch_sub:
1097  case AtomicExpr::AO__opencl_atomic_fetch_sub:
1098  case AtomicExpr::AO__atomic_fetch_sub:
1099  LibCallName = "__atomic_fetch_sub";
1100  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1101  LoweredMemTy, E->getExprLoc(), sizeChars);
1102  break;
1103  // T __atomic_xor_fetch_N(T *mem, T val, int order)
1104  // T __atomic_fetch_xor_N(T *mem, T val, int order)
1105  case AtomicExpr::AO__atomic_xor_fetch:
1106  PostOp = llvm::Instruction::Xor;
1107  LLVM_FALLTHROUGH;
1108  case AtomicExpr::AO__c11_atomic_fetch_xor:
1109  case AtomicExpr::AO__opencl_atomic_fetch_xor:
1110  case AtomicExpr::AO__atomic_fetch_xor:
1111  LibCallName = "__atomic_fetch_xor";
1112  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1113  MemTy, E->getExprLoc(), sizeChars);
1114  break;
1115  case AtomicExpr::AO__atomic_fetch_min:
1116  case AtomicExpr::AO__opencl_atomic_fetch_min:
1117  LibCallName = E->getValueType()->isSignedIntegerType()
1118  ? "__atomic_fetch_min"
1119  : "__atomic_fetch_umin";
1120  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1121  LoweredMemTy, E->getExprLoc(), sizeChars);
1122  break;
1123  case AtomicExpr::AO__atomic_fetch_max:
1124  case AtomicExpr::AO__opencl_atomic_fetch_max:
1125  LibCallName = E->getValueType()->isSignedIntegerType()
1126  ? "__atomic_fetch_max"
1127  : "__atomic_fetch_umax";
1128  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1129  LoweredMemTy, E->getExprLoc(), sizeChars);
1130  break;
1131  // T __atomic_nand_fetch_N(T *mem, T val, int order)
1132  // T __atomic_fetch_nand_N(T *mem, T val, int order)
1133  case AtomicExpr::AO__atomic_nand_fetch:
1134  PostOp = llvm::Instruction::And; // the NOT is special cased below
1135  LLVM_FALLTHROUGH;
1136  case AtomicExpr::AO__atomic_fetch_nand:
1137  LibCallName = "__atomic_fetch_nand";
1138  AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1.getPointer(),
1139  MemTy, E->getExprLoc(), sizeChars);
1140  break;
1141  }
1142 
1143  if (E->isOpenCL()) {
1144  LibCallName = std::string("__opencl") +
1145  StringRef(LibCallName).drop_front(1).str();
1146 
1147  }
1148  // Optimized functions have the size in their name.
1149  if (UseOptimizedLibcall)
1150  LibCallName += "_" + llvm::utostr(Size);
1151  // By default, assume we return a value of the atomic type.
1152  if (!HaveRetTy) {
1153  if (UseOptimizedLibcall) {
1154  // Value is returned directly.
1155  // The function returns an appropriately sized integer type.
1156  RetTy = getContext().getIntTypeForBitwidth(
1157  getContext().toBits(sizeChars), /*Signed=*/false);
1158  } else {
1159  // Value is returned through parameter before the order.
1160  RetTy = getContext().VoidTy;
1161  Args.add(RValue::get(EmitCastToVoidPtr(Dest.getPointer())),
1162  getContext().VoidPtrTy);
1163  }
1164  }
1165  // order is always the last parameter
1166  Args.add(RValue::get(Order),
1167  getContext().IntTy);
1168  if (E->isOpenCL())
1169  Args.add(RValue::get(Scope), getContext().IntTy);
1170 
1171  // PostOp is only needed for the atomic_*_fetch operations, and
1172  // thus is only needed for and implemented in the
1173  // UseOptimizedLibcall codepath.
1174  assert(UseOptimizedLibcall || !PostOp);
1175 
1176  RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
1177  // The value is returned directly from the libcall.
1178  if (E->isCmpXChg())
1179  return Res;
1180 
1181  // The value is returned directly for optimized libcalls but the expr
1182  // provided an out-param.
1183  if (UseOptimizedLibcall && Res.getScalarVal()) {
1184  llvm::Value *ResVal = Res.getScalarVal();
1185  if (PostOp) {
1186  llvm::Value *LoadVal1 = Args[1].getRValue(*this).getScalarVal();
1187  ResVal = Builder.CreateBinOp(PostOp, ResVal, LoadVal1);
1188  }
1189  if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
1190  ResVal = Builder.CreateNot(ResVal);
1191 
1192  Builder.CreateStore(
1193  ResVal,
1194  Builder.CreateBitCast(Dest, ResVal->getType()->getPointerTo()));
1195  }
1196 
1197  if (RValTy->isVoidType())
1198  return RValue::get(nullptr);
1199 
1200  return convertTempToRValue(
1201  Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo()),
1202  RValTy, E->getExprLoc());
1203  }
1204 
1205  bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
1206  E->getOp() == AtomicExpr::AO__opencl_atomic_store ||
1207  E->getOp() == AtomicExpr::AO__atomic_store ||
1208  E->getOp() == AtomicExpr::AO__atomic_store_n;
1209  bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
1210  E->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1211  E->getOp() == AtomicExpr::AO__atomic_load ||
1212  E->getOp() == AtomicExpr::AO__atomic_load_n;
1213 
1214  if (isa<llvm::ConstantInt>(Order)) {
1215  auto ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1216  // We should not ever get to a case where the ordering isn't a valid C ABI
1217  // value, but it's hard to enforce that in general.
1218  if (llvm::isValidAtomicOrderingCABI(ord))
1219  switch ((llvm::AtomicOrderingCABI)ord) {
1220  case llvm::AtomicOrderingCABI::relaxed:
1221  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1222  llvm::AtomicOrdering::Monotonic, Scope);
1223  break;
1224  case llvm::AtomicOrderingCABI::consume:
1225  case llvm::AtomicOrderingCABI::acquire:
1226  if (IsStore)
1227  break; // Avoid crashing on code with undefined behavior
1228  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1229  llvm::AtomicOrdering::Acquire, Scope);
1230  break;
1231  case llvm::AtomicOrderingCABI::release:
1232  if (IsLoad)
1233  break; // Avoid crashing on code with undefined behavior
1234  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1235  llvm::AtomicOrdering::Release, Scope);
1236  break;
1237  case llvm::AtomicOrderingCABI::acq_rel:
1238  if (IsLoad || IsStore)
1239  break; // Avoid crashing on code with undefined behavior
1240  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1241  llvm::AtomicOrdering::AcquireRelease, Scope);
1242  break;
1243  case llvm::AtomicOrderingCABI::seq_cst:
1244  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1245  llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1246  break;
1247  }
1248  if (RValTy->isVoidType())
1249  return RValue::get(nullptr);
1250 
1251  return convertTempToRValue(
1252  Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1253  Dest.getAddressSpace())),
1254  RValTy, E->getExprLoc());
1255  }
1256 
1257  // Long case, when Order isn't obviously constant.
1258 
1259  // Create all the relevant BB's
1260  llvm::BasicBlock *MonotonicBB = nullptr, *AcquireBB = nullptr,
1261  *ReleaseBB = nullptr, *AcqRelBB = nullptr,
1262  *SeqCstBB = nullptr;
1263  MonotonicBB = createBasicBlock("monotonic", CurFn);
1264  if (!IsStore)
1265  AcquireBB = createBasicBlock("acquire", CurFn);
1266  if (!IsLoad)
1267  ReleaseBB = createBasicBlock("release", CurFn);
1268  if (!IsLoad && !IsStore)
1269  AcqRelBB = createBasicBlock("acqrel", CurFn);
1270  SeqCstBB = createBasicBlock("seqcst", CurFn);
1271  llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1272 
1273  // Create the switch for the split
1274  // MonotonicBB is arbitrarily chosen as the default case; in practice, this
1275  // doesn't matter unless someone is crazy enough to use something that
1276  // doesn't fold to a constant for the ordering.
1277  Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1278  llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
1279 
1280  // Emit all the different atomics
1281  Builder.SetInsertPoint(MonotonicBB);
1282  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1283  llvm::AtomicOrdering::Monotonic, Scope);
1284  Builder.CreateBr(ContBB);
1285  if (!IsStore) {
1286  Builder.SetInsertPoint(AcquireBB);
1287  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1288  llvm::AtomicOrdering::Acquire, Scope);
1289  Builder.CreateBr(ContBB);
1290  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::consume),
1291  AcquireBB);
1292  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acquire),
1293  AcquireBB);
1294  }
1295  if (!IsLoad) {
1296  Builder.SetInsertPoint(ReleaseBB);
1297  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1298  llvm::AtomicOrdering::Release, Scope);
1299  Builder.CreateBr(ContBB);
1300  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::release),
1301  ReleaseBB);
1302  }
1303  if (!IsLoad && !IsStore) {
1304  Builder.SetInsertPoint(AcqRelBB);
1305  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1306  llvm::AtomicOrdering::AcquireRelease, Scope);
1307  Builder.CreateBr(ContBB);
1308  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::acq_rel),
1309  AcqRelBB);
1310  }
1311  Builder.SetInsertPoint(SeqCstBB);
1312  EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, IsWeak, OrderFail, Size,
1313  llvm::AtomicOrdering::SequentiallyConsistent, Scope);
1314  Builder.CreateBr(ContBB);
1315  SI->addCase(Builder.getInt32((int)llvm::AtomicOrderingCABI::seq_cst),
1316  SeqCstBB);
1317 
1318  // Cleanup and return
1319  Builder.SetInsertPoint(ContBB);
1320  if (RValTy->isVoidType())
1321  return RValue::get(nullptr);
1322 
1323  assert(Atomics.getValueSizeInBits() <= Atomics.getAtomicSizeInBits());
1324  return convertTempToRValue(
1325  Builder.CreateBitCast(Dest, ConvertTypeForMem(RValTy)->getPointerTo(
1326  Dest.getAddressSpace())),
1327  RValTy, E->getExprLoc());
1328 }
1329 
1330 Address AtomicInfo::emitCastToAtomicIntPointer(Address addr) const {
1331  unsigned addrspace =
1332  cast<llvm::PointerType>(addr.getPointer()->getType())->getAddressSpace();
1333  llvm::IntegerType *ty =
1334  llvm::IntegerType::get(CGF.getLLVMContext(), AtomicSizeInBits);
1335  return CGF.Builder.CreateBitCast(addr, ty->getPointerTo(addrspace));
1336 }
1337 
1338 Address AtomicInfo::convertToAtomicIntPointer(Address Addr) const {
1339  llvm::Type *Ty = Addr.getElementType();
1340  uint64_t SourceSizeInBits = CGF.CGM.getDataLayout().getTypeSizeInBits(Ty);
1341  if (SourceSizeInBits != AtomicSizeInBits) {
1342  Address Tmp = CreateTempAlloca();
1343  CGF.Builder.CreateMemCpy(Tmp, Addr,
1344  std::min(AtomicSizeInBits, SourceSizeInBits) / 8);
1345  Addr = Tmp;
1346  }
1347 
1348  return emitCastToAtomicIntPointer(Addr);
1349 }
1350 
1351 RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
1352  AggValueSlot resultSlot,
1353  SourceLocation loc,
1354  bool asValue) const {
1355  if (LVal.isSimple()) {
1356  if (EvaluationKind == TEK_Aggregate)
1357  return resultSlot.asRValue();
1358 
1359  // Drill into the padding structure if we have one.
1360  if (hasPadding())
1361  addr = CGF.Builder.CreateStructGEP(addr, 0);
1362 
1363  // Otherwise, just convert the temporary to an r-value using the
1364  // normal conversion routine.
1365  return CGF.convertTempToRValue(addr, getValueType(), loc);
1366  }
1367  if (!asValue)
1368  // Get RValue from temp memory as atomic for non-simple lvalues
1369  return RValue::get(CGF.Builder.CreateLoad(addr));
1370  if (LVal.isBitField())
1371  return CGF.EmitLoadOfBitfieldLValue(
1372  LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
1373  LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1374  if (LVal.isVectorElt())
1375  return CGF.EmitLoadOfLValue(
1376  LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
1377  LVal.getBaseInfo(), TBAAAccessInfo()), loc);
1378  assert(LVal.isExtVectorElt());
1380  addr, LVal.getExtVectorElts(), LVal.getType(),
1381  LVal.getBaseInfo(), TBAAAccessInfo()));
1382 }
1383 
1384 RValue AtomicInfo::ConvertIntToValueOrAtomic(llvm::Value *IntVal,
1385  AggValueSlot ResultSlot,
1386  SourceLocation Loc,
1387  bool AsValue) const {
1388  // Try not to in some easy cases.
1389  assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
1390  if (getEvaluationKind() == TEK_Scalar &&
1391  (((!LVal.isBitField() ||
1392  LVal.getBitFieldInfo().Size == ValueSizeInBits) &&
1393  !hasPadding()) ||
1394  !AsValue)) {
1395  auto *ValTy = AsValue
1396  ? CGF.ConvertTypeForMem(ValueTy)
1397  : getAtomicAddress().getType()->getPointerElementType();
1398  if (ValTy->isIntegerTy()) {
1399  assert(IntVal->getType() == ValTy && "Different integer types.");
1400  return RValue::get(CGF.EmitFromMemory(IntVal, ValueTy));
1401  } else if (ValTy->isPointerTy())
1402  return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
1403  else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
1404  return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
1405  }
1406 
1407  // Create a temporary. This needs to be big enough to hold the
1408  // atomic integer.
1409  Address Temp = Address::invalid();
1410  bool TempIsVolatile = false;
1411  if (AsValue && getEvaluationKind() == TEK_Aggregate) {
1412  assert(!ResultSlot.isIgnored());
1413  Temp = ResultSlot.getAddress();
1414  TempIsVolatile = ResultSlot.isVolatile();
1415  } else {
1416  Temp = CreateTempAlloca();
1417  }
1418 
1419  // Slam the integer into the temporary.
1420  Address CastTemp = emitCastToAtomicIntPointer(Temp);
1421  CGF.Builder.CreateStore(IntVal, CastTemp)
1422  ->setVolatile(TempIsVolatile);
1423 
1424  return convertAtomicTempToRValue(Temp, ResultSlot, Loc, AsValue);
1425 }
1426 
1427 void AtomicInfo::EmitAtomicLoadLibcall(llvm::Value *AddForLoaded,
1428  llvm::AtomicOrdering AO, bool) {
1429  // void __atomic_load(size_t size, void *mem, void *return, int order);
1430  CallArgList Args;
1431  Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1432  Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1433  CGF.getContext().VoidPtrTy);
1434  Args.add(RValue::get(CGF.EmitCastToVoidPtr(AddForLoaded)),
1435  CGF.getContext().VoidPtrTy);
1436  Args.add(
1437  RValue::get(llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(AO))),
1438  CGF.getContext().IntTy);
1439  emitAtomicLibcall(CGF, "__atomic_load", CGF.getContext().VoidTy, Args);
1440 }
1441 
1442 llvm::Value *AtomicInfo::EmitAtomicLoadOp(llvm::AtomicOrdering AO,
1443  bool IsVolatile) {
1444  // Okay, we're doing this natively.
1445  Address Addr = getAtomicAddressAsAtomicIntPointer();
1446  llvm::LoadInst *Load = CGF.Builder.CreateLoad(Addr, "atomic-load");
1447  Load->setAtomic(AO);
1448 
1449  // Other decoration.
1450  if (IsVolatile)
1451  Load->setVolatile(true);
1452  CGF.CGM.DecorateInstructionWithTBAA(Load, LVal.getTBAAInfo());
1453  return Load;
1454 }
1455 
1456 /// An LValue is a candidate for having its loads and stores be made atomic if
1457 /// we are operating under /volatile:ms *and* the LValue itself is volatile and
1458 /// performing such an operation can be performed without a libcall.
1460  if (!CGM.getCodeGenOpts().MSVolatile) return false;
1461  AtomicInfo AI(*this, LV);
1462  bool IsVolatile = LV.isVolatile() || hasVolatileMember(LV.getType());
1463  // An atomic is inline if we don't need to use a libcall.
1464  bool AtomicIsInline = !AI.shouldUseLibcall();
1465  // MSVC doesn't seem to do this for types wider than a pointer.
1466  if (getContext().getTypeSize(LV.getType()) >
1467  getContext().getTypeSize(getContext().getIntPtrType()))
1468  return false;
1469  return IsVolatile && AtomicIsInline;
1470 }
1471 
1473  AggValueSlot Slot) {
1474  llvm::AtomicOrdering AO;
1475  bool IsVolatile = LV.isVolatileQualified();
1476  if (LV.getType()->isAtomicType()) {
1477  AO = llvm::AtomicOrdering::SequentiallyConsistent;
1478  } else {
1479  AO = llvm::AtomicOrdering::Acquire;
1480  IsVolatile = true;
1481  }
1482  return EmitAtomicLoad(LV, SL, AO, IsVolatile, Slot);
1483 }
1484 
1485 RValue AtomicInfo::EmitAtomicLoad(AggValueSlot ResultSlot, SourceLocation Loc,
1486  bool AsValue, llvm::AtomicOrdering AO,
1487  bool IsVolatile) {
1488  // Check whether we should use a library call.
1489  if (shouldUseLibcall()) {
1490  Address TempAddr = Address::invalid();
1491  if (LVal.isSimple() && !ResultSlot.isIgnored()) {
1492  assert(getEvaluationKind() == TEK_Aggregate);
1493  TempAddr = ResultSlot.getAddress();
1494  } else
1495  TempAddr = CreateTempAlloca();
1496 
1497  EmitAtomicLoadLibcall(TempAddr.getPointer(), AO, IsVolatile);
1498 
1499  // Okay, turn that back into the original value or whole atomic (for
1500  // non-simple lvalues) type.
1501  return convertAtomicTempToRValue(TempAddr, ResultSlot, Loc, AsValue);
1502  }
1503 
1504  // Okay, we're doing this natively.
1505  auto *Load = EmitAtomicLoadOp(AO, IsVolatile);
1506 
1507  // If we're ignoring an aggregate return, don't do anything.
1508  if (getEvaluationKind() == TEK_Aggregate && ResultSlot.isIgnored())
1509  return RValue::getAggregate(Address::invalid(), false);
1510 
1511  // Okay, turn that back into the original value or atomic (for non-simple
1512  // lvalues) type.
1513  return ConvertIntToValueOrAtomic(Load, ResultSlot, Loc, AsValue);
1514 }
1515 
1516 /// Emit a load from an l-value of atomic type. Note that the r-value
1517 /// we produce is an r-value of the atomic *value* type.
1519  llvm::AtomicOrdering AO, bool IsVolatile,
1520  AggValueSlot resultSlot) {
1521  AtomicInfo Atomics(*this, src);
1522  return Atomics.EmitAtomicLoad(resultSlot, loc, /*AsValue=*/true, AO,
1523  IsVolatile);
1524 }
1525 
1526 /// Copy an r-value into memory as part of storing to an atomic type.
1527 /// This needs to create a bit-pattern suitable for atomic operations.
1528 void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
1529  assert(LVal.isSimple());
1530  // If we have an r-value, the rvalue should be of the atomic type,
1531  // which means that the caller is responsible for having zeroed
1532  // any padding. Just do an aggregate copy of that type.
1533  if (rvalue.isAggregate()) {
1534  LValue Dest = CGF.MakeAddrLValue(getAtomicAddress(), getAtomicType());
1535  LValue Src = CGF.MakeAddrLValue(rvalue.getAggregateAddress(),
1536  getAtomicType());
1537  bool IsVolatile = rvalue.isVolatileQualified() ||
1538  LVal.isVolatileQualified();
1539  CGF.EmitAggregateCopy(Dest, Src, getAtomicType(),
1540  AggValueSlot::DoesNotOverlap, IsVolatile);
1541  return;
1542  }
1543 
1544  // Okay, otherwise we're copying stuff.
1545 
1546  // Zero out the buffer if necessary.
1547  emitMemSetZeroIfNecessary();
1548 
1549  // Drill past the padding if present.
1550  LValue TempLVal = projectValue();
1551 
1552  // Okay, store the rvalue in.
1553  if (rvalue.isScalar()) {
1554  CGF.EmitStoreOfScalar(rvalue.getScalarVal(), TempLVal, /*init*/ true);
1555  } else {
1556  CGF.EmitStoreOfComplex(rvalue.getComplexVal(), TempLVal, /*init*/ true);
1557  }
1558 }
1559 
1560 
1561 /// Materialize an r-value into memory for the purposes of storing it
1562 /// to an atomic type.
1563 Address AtomicInfo::materializeRValue(RValue rvalue) const {
1564  // Aggregate r-values are already in memory, and EmitAtomicStore
1565  // requires them to be values of the atomic type.
1566  if (rvalue.isAggregate())
1567  return rvalue.getAggregateAddress();
1568 
1569  // Otherwise, make a temporary and materialize into it.
1570  LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
1571  AtomicInfo Atomics(CGF, TempLV);
1572  Atomics.emitCopyIntoMemory(rvalue);
1573  return TempLV.getAddress();
1574 }
1575 
1576 llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
1577  // If we've got a scalar value of the right size, try to avoid going
1578  // through memory.
1579  if (RVal.isScalar() && (!hasPadding() || !LVal.isSimple())) {
1580  llvm::Value *Value = RVal.getScalarVal();
1581  if (isa<llvm::IntegerType>(Value->getType()))
1582  return CGF.EmitToMemory(Value, ValueTy);
1583  else {
1584  llvm::IntegerType *InputIntTy = llvm::IntegerType::get(
1585  CGF.getLLVMContext(),
1586  LVal.isSimple() ? getValueSizeInBits() : getAtomicSizeInBits());
1587  if (isa<llvm::PointerType>(Value->getType()))
1588  return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
1589  else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
1590  return CGF.Builder.CreateBitCast(Value, InputIntTy);
1591  }
1592  }
1593  // Otherwise, we need to go through memory.
1594  // Put the r-value in memory.
1595  Address Addr = materializeRValue(RVal);
1596 
1597  // Cast the temporary to the atomic int type and pull a value out.
1598  Addr = emitCastToAtomicIntPointer(Addr);
1599  return CGF.Builder.CreateLoad(Addr);
1600 }
1601 
1602 std::pair<llvm::Value *, llvm::Value *> AtomicInfo::EmitAtomicCompareExchangeOp(
1603  llvm::Value *ExpectedVal, llvm::Value *DesiredVal,
1604  llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak) {
1605  // Do the atomic store.
1606  Address Addr = getAtomicAddressAsAtomicIntPointer();
1607  auto *Inst = CGF.Builder.CreateAtomicCmpXchg(Addr.getPointer(),
1608  ExpectedVal, DesiredVal,
1609  Success, Failure);
1610  // Other decoration.
1611  Inst->setVolatile(LVal.isVolatileQualified());
1612  Inst->setWeak(IsWeak);
1613 
1614  // Okay, turn that back into the original value type.
1615  auto *PreviousVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/0);
1616  auto *SuccessFailureVal = CGF.Builder.CreateExtractValue(Inst, /*Idxs=*/1);
1617  return std::make_pair(PreviousVal, SuccessFailureVal);
1618 }
1619 
1620 llvm::Value *
1621 AtomicInfo::EmitAtomicCompareExchangeLibcall(llvm::Value *ExpectedAddr,
1622  llvm::Value *DesiredAddr,
1623  llvm::AtomicOrdering Success,
1624  llvm::AtomicOrdering Failure) {
1625  // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
1626  // void *desired, int success, int failure);
1627  CallArgList Args;
1628  Args.add(RValue::get(getAtomicSizeValue()), CGF.getContext().getSizeType());
1629  Args.add(RValue::get(CGF.EmitCastToVoidPtr(getAtomicPointer())),
1630  CGF.getContext().VoidPtrTy);
1631  Args.add(RValue::get(CGF.EmitCastToVoidPtr(ExpectedAddr)),
1632  CGF.getContext().VoidPtrTy);
1633  Args.add(RValue::get(CGF.EmitCastToVoidPtr(DesiredAddr)),
1634  CGF.getContext().VoidPtrTy);
1635  Args.add(RValue::get(
1636  llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Success))),
1637  CGF.getContext().IntTy);
1638  Args.add(RValue::get(
1639  llvm::ConstantInt::get(CGF.IntTy, (int)llvm::toCABI(Failure))),
1640  CGF.getContext().IntTy);
1641  auto SuccessFailureRVal = emitAtomicLibcall(CGF, "__atomic_compare_exchange",
1642  CGF.getContext().BoolTy, Args);
1643 
1644  return SuccessFailureRVal.getScalarVal();
1645 }
1646 
1647 std::pair<RValue, llvm::Value *> AtomicInfo::EmitAtomicCompareExchange(
1648  RValue Expected, RValue Desired, llvm::AtomicOrdering Success,
1649  llvm::AtomicOrdering Failure, bool IsWeak) {
1650  if (isStrongerThan(Failure, Success))
1651  // Don't assert on undefined behavior "failure argument shall be no stronger
1652  // than the success argument".
1653  Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
1654 
1655  // Check whether we should use a library call.
1656  if (shouldUseLibcall()) {
1657  // Produce a source address.
1658  Address ExpectedAddr = materializeRValue(Expected);
1659  Address DesiredAddr = materializeRValue(Desired);
1660  auto *Res = EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1661  DesiredAddr.getPointer(),
1662  Success, Failure);
1663  return std::make_pair(
1664  convertAtomicTempToRValue(ExpectedAddr, AggValueSlot::ignored(),
1665  SourceLocation(), /*AsValue=*/false),
1666  Res);
1667  }
1668 
1669  // If we've got a scalar value of the right size, try to avoid going
1670  // through memory.
1671  auto *ExpectedVal = convertRValueToInt(Expected);
1672  auto *DesiredVal = convertRValueToInt(Desired);
1673  auto Res = EmitAtomicCompareExchangeOp(ExpectedVal, DesiredVal, Success,
1674  Failure, IsWeak);
1675  return std::make_pair(
1676  ConvertIntToValueOrAtomic(Res.first, AggValueSlot::ignored(),
1677  SourceLocation(), /*AsValue=*/false),
1678  Res.second);
1679 }
1680 
1681 static void
1682 EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal,
1683  const llvm::function_ref<RValue(RValue)> &UpdateOp,
1684  Address DesiredAddr) {
1685  RValue UpRVal;
1686  LValue AtomicLVal = Atomics.getAtomicLValue();
1687  LValue DesiredLVal;
1688  if (AtomicLVal.isSimple()) {
1689  UpRVal = OldRVal;
1690  DesiredLVal = CGF.MakeAddrLValue(DesiredAddr, AtomicLVal.getType());
1691  } else {
1692  // Build new lvalue for temp address.
1693  Address Ptr = Atomics.materializeRValue(OldRVal);
1694  LValue UpdateLVal;
1695  if (AtomicLVal.isBitField()) {
1696  UpdateLVal =
1697  LValue::MakeBitfield(Ptr, AtomicLVal.getBitFieldInfo(),
1698  AtomicLVal.getType(),
1699  AtomicLVal.getBaseInfo(),
1700  AtomicLVal.getTBAAInfo());
1701  DesiredLVal =
1702  LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1703  AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1704  AtomicLVal.getTBAAInfo());
1705  } else if (AtomicLVal.isVectorElt()) {
1706  UpdateLVal = LValue::MakeVectorElt(Ptr, AtomicLVal.getVectorIdx(),
1707  AtomicLVal.getType(),
1708  AtomicLVal.getBaseInfo(),
1709  AtomicLVal.getTBAAInfo());
1710  DesiredLVal = LValue::MakeVectorElt(
1711  DesiredAddr, AtomicLVal.getVectorIdx(), AtomicLVal.getType(),
1712  AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1713  } else {
1714  assert(AtomicLVal.isExtVectorElt());
1715  UpdateLVal = LValue::MakeExtVectorElt(Ptr, AtomicLVal.getExtVectorElts(),
1716  AtomicLVal.getType(),
1717  AtomicLVal.getBaseInfo(),
1718  AtomicLVal.getTBAAInfo());
1719  DesiredLVal = LValue::MakeExtVectorElt(
1720  DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1721  AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1722  }
1723  UpRVal = CGF.EmitLoadOfLValue(UpdateLVal, SourceLocation());
1724  }
1725  // Store new value in the corresponding memory area.
1726  RValue NewRVal = UpdateOp(UpRVal);
1727  if (NewRVal.isScalar()) {
1728  CGF.EmitStoreThroughLValue(NewRVal, DesiredLVal);
1729  } else {
1730  assert(NewRVal.isComplex());
1731  CGF.EmitStoreOfComplex(NewRVal.getComplexVal(), DesiredLVal,
1732  /*isInit=*/false);
1733  }
1734 }
1735 
1736 void AtomicInfo::EmitAtomicUpdateLibcall(
1737  llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1738  bool IsVolatile) {
1739  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1740 
1741  Address ExpectedAddr = CreateTempAlloca();
1742 
1743  EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1744  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1745  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1746  CGF.EmitBlock(ContBB);
1747  Address DesiredAddr = CreateTempAlloca();
1748  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1749  requiresMemSetZero(getAtomicAddress().getElementType())) {
1750  auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1751  CGF.Builder.CreateStore(OldVal, DesiredAddr);
1752  }
1753  auto OldRVal = convertAtomicTempToRValue(ExpectedAddr,
1755  SourceLocation(), /*AsValue=*/false);
1756  EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, DesiredAddr);
1757  auto *Res =
1758  EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1759  DesiredAddr.getPointer(),
1760  AO, Failure);
1761  CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1762  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1763 }
1764 
1765 void AtomicInfo::EmitAtomicUpdateOp(
1766  llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1767  bool IsVolatile) {
1768  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1769 
1770  // Do the atomic load.
1771  auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1772  // For non-simple lvalues perform compare-and-swap procedure.
1773  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1774  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1775  auto *CurBB = CGF.Builder.GetInsertBlock();
1776  CGF.EmitBlock(ContBB);
1777  llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1778  /*NumReservedValues=*/2);
1779  PHI->addIncoming(OldVal, CurBB);
1780  Address NewAtomicAddr = CreateTempAlloca();
1781  Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1782  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1783  requiresMemSetZero(getAtomicAddress().getElementType())) {
1784  CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1785  }
1786  auto OldRVal = ConvertIntToValueOrAtomic(PHI, AggValueSlot::ignored(),
1787  SourceLocation(), /*AsValue=*/false);
1788  EmitAtomicUpdateValue(CGF, *this, OldRVal, UpdateOp, NewAtomicAddr);
1789  auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1790  // Try to write new value using cmpxchg operation.
1791  auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1792  PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1793  CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1794  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1795 }
1796 
1797 static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics,
1798  RValue UpdateRVal, Address DesiredAddr) {
1799  LValue AtomicLVal = Atomics.getAtomicLValue();
1800  LValue DesiredLVal;
1801  // Build new lvalue for temp address.
1802  if (AtomicLVal.isBitField()) {
1803  DesiredLVal =
1804  LValue::MakeBitfield(DesiredAddr, AtomicLVal.getBitFieldInfo(),
1805  AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1806  AtomicLVal.getTBAAInfo());
1807  } else if (AtomicLVal.isVectorElt()) {
1808  DesiredLVal =
1809  LValue::MakeVectorElt(DesiredAddr, AtomicLVal.getVectorIdx(),
1810  AtomicLVal.getType(), AtomicLVal.getBaseInfo(),
1811  AtomicLVal.getTBAAInfo());
1812  } else {
1813  assert(AtomicLVal.isExtVectorElt());
1814  DesiredLVal = LValue::MakeExtVectorElt(
1815  DesiredAddr, AtomicLVal.getExtVectorElts(), AtomicLVal.getType(),
1816  AtomicLVal.getBaseInfo(), AtomicLVal.getTBAAInfo());
1817  }
1818  // Store new value in the corresponding memory area.
1819  assert(UpdateRVal.isScalar());
1820  CGF.EmitStoreThroughLValue(UpdateRVal, DesiredLVal);
1821 }
1822 
1823 void AtomicInfo::EmitAtomicUpdateLibcall(llvm::AtomicOrdering AO,
1824  RValue UpdateRVal, bool IsVolatile) {
1825  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1826 
1827  Address ExpectedAddr = CreateTempAlloca();
1828 
1829  EmitAtomicLoadLibcall(ExpectedAddr.getPointer(), AO, IsVolatile);
1830  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1831  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1832  CGF.EmitBlock(ContBB);
1833  Address DesiredAddr = CreateTempAlloca();
1834  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1835  requiresMemSetZero(getAtomicAddress().getElementType())) {
1836  auto *OldVal = CGF.Builder.CreateLoad(ExpectedAddr);
1837  CGF.Builder.CreateStore(OldVal, DesiredAddr);
1838  }
1839  EmitAtomicUpdateValue(CGF, *this, UpdateRVal, DesiredAddr);
1840  auto *Res =
1841  EmitAtomicCompareExchangeLibcall(ExpectedAddr.getPointer(),
1842  DesiredAddr.getPointer(),
1843  AO, Failure);
1844  CGF.Builder.CreateCondBr(Res, ExitBB, ContBB);
1845  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1846 }
1847 
1848 void AtomicInfo::EmitAtomicUpdateOp(llvm::AtomicOrdering AO, RValue UpdateRVal,
1849  bool IsVolatile) {
1850  auto Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO);
1851 
1852  // Do the atomic load.
1853  auto *OldVal = EmitAtomicLoadOp(AO, IsVolatile);
1854  // For non-simple lvalues perform compare-and-swap procedure.
1855  auto *ContBB = CGF.createBasicBlock("atomic_cont");
1856  auto *ExitBB = CGF.createBasicBlock("atomic_exit");
1857  auto *CurBB = CGF.Builder.GetInsertBlock();
1858  CGF.EmitBlock(ContBB);
1859  llvm::PHINode *PHI = CGF.Builder.CreatePHI(OldVal->getType(),
1860  /*NumReservedValues=*/2);
1861  PHI->addIncoming(OldVal, CurBB);
1862  Address NewAtomicAddr = CreateTempAlloca();
1863  Address NewAtomicIntAddr = emitCastToAtomicIntPointer(NewAtomicAddr);
1864  if ((LVal.isBitField() && BFI.Size != ValueSizeInBits) ||
1865  requiresMemSetZero(getAtomicAddress().getElementType())) {
1866  CGF.Builder.CreateStore(PHI, NewAtomicIntAddr);
1867  }
1868  EmitAtomicUpdateValue(CGF, *this, UpdateRVal, NewAtomicAddr);
1869  auto *DesiredVal = CGF.Builder.CreateLoad(NewAtomicIntAddr);
1870  // Try to write new value using cmpxchg operation.
1871  auto Res = EmitAtomicCompareExchangeOp(PHI, DesiredVal, AO, Failure);
1872  PHI->addIncoming(Res.first, CGF.Builder.GetInsertBlock());
1873  CGF.Builder.CreateCondBr(Res.second, ExitBB, ContBB);
1874  CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
1875 }
1876 
1877 void AtomicInfo::EmitAtomicUpdate(
1878  llvm::AtomicOrdering AO, const llvm::function_ref<RValue(RValue)> &UpdateOp,
1879  bool IsVolatile) {
1880  if (shouldUseLibcall()) {
1881  EmitAtomicUpdateLibcall(AO, UpdateOp, IsVolatile);
1882  } else {
1883  EmitAtomicUpdateOp(AO, UpdateOp, IsVolatile);
1884  }
1885 }
1886 
1887 void AtomicInfo::EmitAtomicUpdate(llvm::AtomicOrdering AO, RValue UpdateRVal,
1888  bool IsVolatile) {
1889  if (shouldUseLibcall()) {
1890  EmitAtomicUpdateLibcall(AO, UpdateRVal, IsVolatile);
1891  } else {
1892  EmitAtomicUpdateOp(AO, UpdateRVal, IsVolatile);
1893  }
1894 }
1895 
1897  bool isInit) {
1898  bool IsVolatile = lvalue.isVolatileQualified();
1899  llvm::AtomicOrdering AO;
1900  if (lvalue.getType()->isAtomicType()) {
1901  AO = llvm::AtomicOrdering::SequentiallyConsistent;
1902  } else {
1903  AO = llvm::AtomicOrdering::Release;
1904  IsVolatile = true;
1905  }
1906  return EmitAtomicStore(rvalue, lvalue, AO, IsVolatile, isInit);
1907 }
1908 
1909 /// Emit a store to an l-value of atomic type.
1910 ///
1911 /// Note that the r-value is expected to be an r-value *of the atomic
1912 /// type*; this means that for aggregate r-values, it should include
1913 /// storage for any padding that was necessary.
1915  llvm::AtomicOrdering AO, bool IsVolatile,
1916  bool isInit) {
1917  // If this is an aggregate r-value, it should agree in type except
1918  // maybe for address-space qualification.
1919  assert(!rvalue.isAggregate() ||
1921  == dest.getAddress().getElementType());
1922 
1923  AtomicInfo atomics(*this, dest);
1924  LValue LVal = atomics.getAtomicLValue();
1925 
1926  // If this is an initialization, just put the value there normally.
1927  if (LVal.isSimple()) {
1928  if (isInit) {
1929  atomics.emitCopyIntoMemory(rvalue);
1930  return;
1931  }
1932 
1933  // Check whether we should use a library call.
1934  if (atomics.shouldUseLibcall()) {
1935  // Produce a source address.
1936  Address srcAddr = atomics.materializeRValue(rvalue);
1937 
1938  // void __atomic_store(size_t size, void *mem, void *val, int order)
1939  CallArgList args;
1940  args.add(RValue::get(atomics.getAtomicSizeValue()),
1941  getContext().getSizeType());
1942  args.add(RValue::get(EmitCastToVoidPtr(atomics.getAtomicPointer())),
1943  getContext().VoidPtrTy);
1944  args.add(RValue::get(EmitCastToVoidPtr(srcAddr.getPointer())),
1945  getContext().VoidPtrTy);
1946  args.add(
1947  RValue::get(llvm::ConstantInt::get(IntTy, (int)llvm::toCABI(AO))),
1948  getContext().IntTy);
1949  emitAtomicLibcall(*this, "__atomic_store", getContext().VoidTy, args);
1950  return;
1951  }
1952 
1953  // Okay, we're doing this natively.
1954  llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
1955 
1956  // Do the atomic store.
1957  Address addr =
1958  atomics.emitCastToAtomicIntPointer(atomics.getAtomicAddress());
1959  intValue = Builder.CreateIntCast(
1960  intValue, addr.getElementType(), /*isSigned=*/false);
1961  llvm::StoreInst *store = Builder.CreateStore(intValue, addr);
1962 
1963  // Initializations don't need to be atomic.
1964  if (!isInit)
1965  store->setAtomic(AO);
1966 
1967  // Other decoration.
1968  if (IsVolatile)
1969  store->setVolatile(true);
1970  CGM.DecorateInstructionWithTBAA(store, dest.getTBAAInfo());
1971  return;
1972  }
1973 
1974  // Emit simple atomic update operation.
1975  atomics.EmitAtomicUpdate(AO, rvalue, IsVolatile);
1976 }
1977 
1978 /// Emit a compare-and-exchange op for atomic type.
1979 ///
1980 std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
1981  LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
1982  llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
1983  AggValueSlot Slot) {
1984  // If this is an aggregate r-value, it should agree in type except
1985  // maybe for address-space qualification.
1986  assert(!Expected.isAggregate() ||
1987  Expected.getAggregateAddress().getElementType() ==
1988  Obj.getAddress().getElementType());
1989  assert(!Desired.isAggregate() ||
1990  Desired.getAggregateAddress().getElementType() ==
1991  Obj.getAddress().getElementType());
1992  AtomicInfo Atomics(*this, Obj);
1993 
1994  return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
1995  IsWeak);
1996 }
1997 
1999  LValue LVal, llvm::AtomicOrdering AO,
2000  const llvm::function_ref<RValue(RValue)> &UpdateOp, bool IsVolatile) {
2001  AtomicInfo Atomics(*this, LVal);
2002  Atomics.EmitAtomicUpdate(AO, UpdateOp, IsVolatile);
2003 }
2004 
2006  AtomicInfo atomics(*this, dest);
2007 
2008  switch (atomics.getEvaluationKind()) {
2009  case TEK_Scalar: {
2010  llvm::Value *value = EmitScalarExpr(init);
2011  atomics.emitCopyIntoMemory(RValue::get(value));
2012  return;
2013  }
2014 
2015  case TEK_Complex: {
2016  ComplexPairTy value = EmitComplexExpr(init);
2017  atomics.emitCopyIntoMemory(RValue::getComplex(value));
2018  return;
2019  }
2020 
2021  case TEK_Aggregate: {
2022  // Fix up the destination if the initializer isn't an expression
2023  // of atomic type.
2024  bool Zeroed = false;
2025  if (!init->getType()->isAtomicType()) {
2026  Zeroed = atomics.emitMemSetZeroIfNecessary();
2027  dest = atomics.projectValue();
2028  }
2029 
2030  // Evaluate the expression directly into the destination.
2036  Zeroed ? AggValueSlot::IsZeroed :
2038 
2039  EmitAggExpr(init, slot);
2040  return;
2041  }
2042  }
2043  llvm_unreachable("bad evaluation kind");
2044 }
bool isAggregate() const
Definition: CGValue.h:53
const llvm::DataLayout & getDataLayout() const
llvm::Value * getVectorPointer() const
Definition: CGValue.h:337
ReturnValueSlot - Contains the address where the return value of a function can be stored...
Definition: CGCall.h:363
Expr * getVal2() const
Definition: Expr.h:5859
Address CreateStructGEP(Address Addr, unsigned Index, const llvm::Twine &Name="")
Definition: CGBuilder.h:178
Defines the clang::ASTContext interface.
std::pair< RValue, llvm::Value * > EmitAtomicCompareExchange(LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc, llvm::AtomicOrdering Success=llvm::AtomicOrdering::SequentiallyConsistent, llvm::AtomicOrdering Failure=llvm::AtomicOrdering::SequentiallyConsistent, bool IsWeak=false, AggValueSlot Slot=AggValueSlot::ignored())
Emit a compare-and-exchange op for atomic type.
Definition: CGAtomic.cpp:1980
llvm::IntegerType * IntTy
int
Address getAddress() const
Definition: CGValue.h:582
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2585
CanQualType VoidPtrTy
Definition: ASTContext.h:1043
A (possibly-)qualified type.
Definition: Type.h:643
llvm::Type * ConvertTypeForMem(QualType T)
virtual llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts, SyncScope Scope, llvm::AtomicOrdering Ordering, llvm::LLVMContext &Ctx) const
Get the syncscope used in LLVM IR.
Definition: TargetInfo.cpp:467
Address CreateMemTemp(QualType T, const Twine &Name="tmp", Address *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition: CGExpr.cpp:139
llvm::LLVMContext & getLLVMContext()
void setAlignment(CharUnits A)
Definition: CGValue.h:316
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee...
Definition: Type.cpp:557
llvm::Expected< T > Expected
bool isVolatile() const
Definition: CGValue.h:300
void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false)
EmitStoreThroughLValue - Store the specified rvalue into the specified lvalue, where both are guarant...
Definition: CGExpr.cpp:1925
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:116
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit)
Definition: CGAtomic.cpp:1896
static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *IsWeak, llvm::Value *FailureOrder, uint64_t Size, llvm::AtomicOrdering Order, llvm::SyncScope::ID Scope)
Definition: CGAtomic.cpp:491
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
const T * getAs() const
Member-template getAs<specific type>&#39;.
Definition: Type.h:6858
const void * Store
Store - This opaque type encapsulates an immutable mapping from locations to values.
Definition: StoreRef.h:27
static LValue MakeVectorElt(Address vecAddress, llvm::Value *Idx, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:379
Expr * getVal1() const
Definition: Expr.h:5849
llvm::Value * getPointer() const
Definition: Address.h:37
unsigned getAddressSpace() const
Return the address space that this address resides in.
Definition: Address.h:56
void add(RValue rvalue, QualType type)
Definition: CGCall.h:287
Address getAddress() const
Definition: CGValue.h:326
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:160
bool isVolatileQualified() const
Definition: CGValue.h:257
CharUnits getAlignment() const
Definition: CGValue.h:315
void EmitAtomicUpdate(LValue LVal, llvm::AtomicOrdering AO, const llvm::function_ref< RValue(RValue)> &UpdateOp, bool IsVolatile)
Definition: CGAtomic.cpp:1998
RValue EmitLoadOfExtVectorElementLValue(LValue V)
Definition: CGExpr.cpp:1850
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
unsigned Size
The total size of the bit-field, in bits.
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
Expr * getPtr() const
Definition: Expr.h:5839
CharUnits StorageOffset
The offset of the bitfield storage from the start of the struct.
CharUnits getAlignment() const
Return the alignment of this pointer.
Definition: Address.h:66
static LValue MakeExtVectorElt(Address vecAddress, llvm::Constant *Elts, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:391
bool isComplex() const
Definition: CGValue.h:52
llvm::CallInst * CreateMemCpy(Address Dest, Address Src, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:274
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:40
static void EmitAtomicUpdateValue(CodeGenFunction &CGF, AtomicInfo &Atomics, RValue OldRVal, const llvm::function_ref< RValue(RValue)> &UpdateOp, Address DesiredAddr)
Definition: CGAtomic.cpp:1682
unsigned Align
Definition: ASTContext.h:150
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void EmitAtomicInit(Expr *E, LValue lvalue)
Definition: CGAtomic.cpp:2005
unsigned Offset
The offset within a contiguous run of bitfields that are represented as a single "field" within the L...
bool isSimple() const
Definition: CGValue.h:251
RValue EmitAtomicExpr(AtomicExpr *E)
Definition: CGAtomic.cpp:746
RValue convertTempToRValue(Address addr, QualType type, SourceLocation Loc)
Given the address of a temporary variable, produce an r-value of its type.
Definition: CGExpr.cpp:4980
bool isCmpXChg() const
Definition: Expr.h:5883
Address getAggregateAddress() const
getAggregateAddr() - Return the Value* of the address of the aggregate.
Definition: CGValue.h:70
static bool isFullSizeType(CodeGenModule &CGM, llvm::Type *type, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
Definition: CGAtomic.cpp:315
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
bool isVectorElt() const
Definition: CGValue.h:252
bool isValid() const
Definition: Address.h:35
Expr * getScope() const
Definition: Expr.h:5845
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
void EmitAnyExprToMem(const Expr *E, Address Location, Qualifiers Quals, bool IsInitializer)
EmitAnyExprToMem - Emits the code necessary to evaluate an arbitrary expression into the given memory...
Definition: CGExpr.cpp:223
LValueBaseInfo getBaseInfo() const
Definition: CGValue.h:318
RValue - This trivial value class is used to represent the result of an expression that is evaluated...
Definition: CGValue.h:38
Address getExtVectorAddress() const
Definition: CGValue.h:341
static std::unique_ptr< AtomicScopeModel > getScopeModel(AtomicOp Op)
Get atomic scope model for the atomic op code.
Definition: Expr.h:5918
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:179
unsigned Offset
Definition: Format.cpp:1809
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
This represents one expression.
Definition: Expr.h:108
llvm::StringRef getAsString(SyncScope S)
Definition: SyncScope.h:50
static Address invalid()
Definition: Address.h:34
static void emitAtomicCmpXchg(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::AtomicOrdering FailureOrder, llvm::SyncScope::ID Scope)
Definition: CGAtomic.cpp:357
std::pair< llvm::Value *, llvm::Value * > getComplexVal() const
getComplexVal - Return the real/imag components of this complex value.
Definition: CGValue.h:65
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:6923
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition: CGCall.h:133
#define V(N, I)
Definition: ASTContext.h:2921
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:43
Expr * getOrder() const
Definition: Expr.h:5842
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation...
Definition: CGExpr.cpp:1689
static LValue MakeBitfield(Address Addr, const CGBitFieldInfo &Info, QualType type, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Create a new object to represent a bit-field access.
Definition: CGValue.h:409
llvm::LLVMContext & getLLVMContext()
const CGFunctionInfo & arrangeBuiltinFunctionCall(QualType resultType, const CallArgList &args)
Definition: CGCall.cpp:639
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char, signed char, short, int, long..], or an enum decl which has a signed representation.
Definition: Type.cpp:1931
QualType getType() const
Definition: Expr.h:137
bool isVolatile() const
Definition: Expr.h:5879
TBAAAccessInfo getTBAAInfo() const
Definition: CGValue.h:307
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition: CGBuilder.h:296
Represents a GCC generic vector type.
Definition: Type.h:3206
AtomicOp getOp() const
Definition: Expr.h:5871
llvm::Value * EmitCastToVoidPtr(llvm::Value *value)
Emit a cast to void* in the appropriate address space.
Definition: CGExpr.cpp:50
const LangOptions & getLangOpts() const
RValue EmitLoadOfBitfieldLValue(LValue LV, SourceLocation Loc)
Definition: CGExpr.cpp:1818
RValue EmitAtomicLoad(LValue LV, SourceLocation SL, AggValueSlot Slot=AggValueSlot::ignored())
Definition: CGAtomic.cpp:1472
The l-value was considered opaque, so the alignment was determined from a type.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:141
Encodes a location in the source.
bool LValueIsSuitableForInlineAtomic(LValue Src)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
Definition: CGAtomic.cpp:1459
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation...
Definition: CGExpr.cpp:1703
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke, SourceLocation Loc)
EmitCall - Generate a call of the given function, expecting the given result type, and using the given argument list which specifies both the LLVM arguments and the types they were derived from.
Definition: CGCall.cpp:3779
const CGBitFieldInfo & getBitFieldInfo() const
Definition: CGValue.h:358
An aggregate value slot.
Definition: CGValue.h:436
QualType getExtVectorType(QualType VectorType, unsigned NumElts) const
Return the unique reference to an extended vector type of the specified element type and size...
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load, __atomic_store, and __atomic_compare_exchange_*, for the similarly-named C++11 instructions, and __c11 variants for <stdatomic.h>, and corresponding __opencl_atomic_* for OpenCL 2.0.
Definition: Expr.h:5807
CanQualType VoidTy
Definition: ASTContext.h:1015
static void emitAtomicCmpXchgFailureSet(CodeGenFunction &CGF, AtomicExpr *E, bool IsWeak, Address Dest, Address Ptr, Address Val1, Address Val2, llvm::Value *FailureOrderVal, uint64_t Size, llvm::AtomicOrdering SuccessOrder, llvm::SyncScope::ID Scope)
Given an ordering required on success, emit all possible cmpxchg instructions to cope with the provid...
Definition: CGAtomic.cpp:407
An aligned address.
Definition: Address.h:24
llvm::APInt APInt
Definition: Integral.h:27
QualType getType() const
Definition: CGValue.h:263
const TargetCodeGenInfo & getTargetHooks() const
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:221
llvm::Value * getScalarVal() const
getScalarVal() - Return the Value* of this scalar value.
Definition: CGValue.h:58
CGFunctionInfo - Class to encapsulate the information about a function definition.
This class organizes the cross-function state that is used while generating LLVM code.
Dataflow Directional Tag Classes.
SourceLocation getBeginLoc() const LLVM_READONLY
Definition: Expr.h:5900
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition: CGValue.h:92
static AggValueSlot ignored()
ignored - Returns an aggregate value slot indicating that the aggregate value is being ignored...
Definition: CGValue.h:498
bool isOpenCL() const
Definition: Expr.h:5892
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:69
unsigned StorageSize
The storage size in bits which should be used when accessing this bitfield.
bool isBitField() const
Definition: CGValue.h:253
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:107
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, NeedsGCBarriers_t needsGC, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed, IsSanitizerChecked_t isChecked=IsNotSanitizerChecked)
Definition: CGValue.h:539
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
llvm::Value * getExtVectorPointer() const
Definition: CGValue.h:344
Expr * getOrderFail() const
Definition: Expr.h:5855
bool isVolatileQualified() const
Definition: CGValue.h:55
static void AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args, bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy, SourceLocation Loc, CharUnits SizeInChars)
Definition: CGAtomic.cpp:722
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit)
EmitStoreOfComplex - Store a complex number into the specified l-value.
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition: Address.h:51
bool isAtomicType() const
Definition: Type.h:6508
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false)
Create or return a runtime function declaration with the specified type and name. ...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:461
llvm::ConstantInt * getSize(CharUnits numChars)
Emit the given number of characters as a value of type size_t.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition: Type.h:6228
llvm::Value * getBitFieldPointer() const
Definition: CGValue.h:357
true
A convenience builder class for complex constant initializers, especially for anonymous global struct...
bool isVoidType() const
Definition: Type.h:6650
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: Type.h:6175
bool Add(InterpState &S, CodePtr OpPC)
Definition: Interp.h:132
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
__DEVICE__ int min(int __a, int __b)
RValue EmitLoadOfLValue(LValue V, SourceLocation Loc)
EmitLoadOfLValue - Given an expression that represents a value lvalue, this method emits the address ...
Definition: CGExpr.cpp:1772
bool isGlobalReg() const
Definition: CGValue.h:255
uint64_t Width
Definition: ASTContext.h:149
CanQualType IntTy
Definition: ASTContext.h:1024
void EmitAggregateCopy(LValue Dest, LValue Src, QualType EltTy, AggValueSlot::Overlap_t MayOverlap, bool isVolatile=false)
EmitAggregateCopy - Emit an aggregate copy.
Definition: CGExprAgg.cpp:1911
static RValue get(llvm::Value *V)
Definition: CGValue.h:85
bool isPointerType() const
Definition: Type.h:6391
bool isExtVectorElt() const
Definition: CGValue.h:254
static RValue emitAtomicLibcall(CodeGenFunction &CGF, StringRef fnName, QualType resultType, CallArgList &args)
Definition: CGAtomic.cpp:302
static RValue getAggregate(Address addr, bool isVolatile=false)
Definition: CGValue.h:106
bool Load(InterpState &S, CodePtr OpPC)
Definition: Interp.h:619
LValue - This represents an lvalue references.
Definition: CGValue.h:166
RValue asRValue() const
Definition: CGValue.h:606
CanQualType BoolTy
Definition: ASTContext.h:1016
QualType getValueType() const
Definition: Expr.cpp:4641
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:163
static LValue MakeAddr(Address address, QualType type, ASTContext &Context, LValueBaseInfo BaseInfo, TBAAAccessInfo TBAAInfo)
Definition: CGValue.h:366
llvm::Value * getVectorIdx() const
Definition: CGValue.h:338
CallArgList - Type for representing both the value and type of arguments in a call.
Definition: CGCall.h:262
static Address EmitValToTemp(CodeGenFunction &CGF, Expr *E)
Definition: CGAtomic.cpp:656
llvm::Value * getPointer() const
Definition: CGValue.h:322
bool Sub(InterpState &S, CodePtr OpPC)
Definition: Interp.h:140
bool isScalar() const
Definition: CGValue.h:51
Expr * getWeak() const
Definition: Expr.h:5865
llvm::Constant * getExtVectorElts() const
Definition: CGValue.h:348
Structure with information about how a bitfield should be accessed.
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth, signed/unsigned.
CanQualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1541