clang 22.0.0git
CIRGenAtomic.cpp
Go to the documentation of this file.
1//===--- CIRGenAtomic.cpp - Emit CIR for atomic operations ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the code for emitting atomic operations.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
15
16using namespace clang;
17using namespace clang::CIRGen;
18using namespace cir;
19
20namespace {
21class AtomicInfo {
22 CIRGenFunction &cgf;
23 QualType atomicTy;
24 QualType valueTy;
25 uint64_t atomicSizeInBits = 0;
26 uint64_t valueSizeInBits = 0;
27 CharUnits atomicAlign;
28 CharUnits valueAlign;
29 TypeEvaluationKind evaluationKind = cir::TEK_Scalar;
30 bool useLibCall = true;
31 LValue lvalue;
32 mlir::Location loc;
33
34public:
35 AtomicInfo(CIRGenFunction &cgf, LValue &lvalue, mlir::Location loc)
36 : cgf(cgf), loc(loc) {
37 assert(!lvalue.isGlobalReg());
38 ASTContext &ctx = cgf.getContext();
39 if (lvalue.isSimple()) {
40 atomicTy = lvalue.getType();
41 if (auto *ty = atomicTy->getAs<AtomicType>())
42 valueTy = ty->getValueType();
43 else
44 valueTy = atomicTy;
45 evaluationKind = cgf.getEvaluationKind(valueTy);
46
47 TypeInfo valueTypeInfo = ctx.getTypeInfo(valueTy);
48 TypeInfo atomicTypeInfo = ctx.getTypeInfo(atomicTy);
49 uint64_t valueAlignInBits = valueTypeInfo.Align;
50 uint64_t atomicAlignInBits = atomicTypeInfo.Align;
51 valueSizeInBits = valueTypeInfo.Width;
52 atomicSizeInBits = atomicTypeInfo.Width;
53 assert(valueSizeInBits <= atomicSizeInBits);
54 assert(valueAlignInBits <= atomicAlignInBits);
55
56 atomicAlign = ctx.toCharUnitsFromBits(atomicAlignInBits);
57 valueAlign = ctx.toCharUnitsFromBits(valueAlignInBits);
58 if (lvalue.getAlignment().isZero())
59 lvalue.setAlignment(atomicAlign);
60
61 this->lvalue = lvalue;
62 } else {
64 cgf.cgm.errorNYI(loc, "AtomicInfo: non-simple lvalue");
65 }
66 useLibCall = !ctx.getTargetInfo().hasBuiltinAtomic(
67 atomicSizeInBits, ctx.toBits(lvalue.getAlignment()));
68 }
69
70 QualType getValueType() const { return valueTy; }
71 CharUnits getAtomicAlignment() const { return atomicAlign; }
72 TypeEvaluationKind getEvaluationKind() const { return evaluationKind; }
73 mlir::Value getAtomicPointer() const {
74 if (lvalue.isSimple())
75 return lvalue.getPointer();
77 return nullptr;
78 }
79 bool shouldUseLibCall() const { return useLibCall; }
80 const LValue &getAtomicLValue() const { return lvalue; }
81 Address getAtomicAddress() const {
82 mlir::Type elemTy;
83 if (lvalue.isSimple()) {
84 elemTy = lvalue.getAddress().getElementType();
85 } else {
87 cgf.cgm.errorNYI(loc, "AtomicInfo::getAtomicAddress: non-simple lvalue");
88 }
89 return Address(getAtomicPointer(), elemTy, getAtomicAlignment());
90 }
91
92 /// Is the atomic size larger than the underlying value type?
93 ///
94 /// Note that the absence of padding does not mean that atomic
95 /// objects are completely interchangeable with non-atomic
96 /// objects: we might have promoted the alignment of a type
97 /// without making it bigger.
98 bool hasPadding() const { return (valueSizeInBits != atomicSizeInBits); }
99
100 bool emitMemSetZeroIfNecessary() const;
101
102 mlir::Value getScalarRValValueOrNull(RValue rvalue) const;
103
104 /// Cast the given pointer to an integer pointer suitable for atomic
105 /// operations on the source.
106 Address castToAtomicIntPointer(Address addr) const;
107
108 /// If addr is compatible with the iN that will be used for an atomic
109 /// operation, bitcast it. Otherwise, create a temporary that is suitable and
110 /// copy the value across.
111 Address convertToAtomicIntPointer(Address addr) const;
112
113 /// Converts a rvalue to integer value.
114 mlir::Value convertRValueToInt(RValue rvalue, bool cmpxchg = false) const;
115
116 /// Copy an atomic r-value into atomic-layout memory.
117 void emitCopyIntoMemory(RValue rvalue) const;
118
119 /// Project an l-value down to the value field.
120 LValue projectValue() const {
121 assert(lvalue.isSimple());
122 Address addr = getAtomicAddress();
123 if (hasPadding()) {
124 cgf.cgm.errorNYI(loc, "AtomicInfo::projectValue: padding");
125 }
126
128 return LValue::makeAddr(addr, getValueType(), lvalue.getBaseInfo());
129 }
130
131 /// Creates temp alloca for intermediate operations on atomic value.
132 Address createTempAlloca() const;
133
134private:
135 bool requiresMemSetZero(mlir::Type ty) const;
136};
137} // namespace
138
139// This function emits any expression (scalar, complex, or aggregate)
140// into a temporary alloca.
142 Address declPtr = cgf.createMemTemp(
143 e->getType(), cgf.getLoc(e->getSourceRange()), ".atomictmp");
144 cgf.emitAnyExprToMem(e, declPtr, e->getType().getQualifiers(),
145 /*Init*/ true);
146 return declPtr;
147}
148
149/// Does a store of the given IR type modify the full expected width?
150static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty,
151 uint64_t expectedSize) {
152 return cgm.getDataLayout().getTypeStoreSize(ty) * 8 == expectedSize;
153}
154
155/// Does the atomic type require memsetting to zero before initialization?
156///
157/// The IR type is provided as a way of making certain queries faster.
158bool AtomicInfo::requiresMemSetZero(mlir::Type ty) const {
159 // If the atomic type has size padding, we definitely need a memset.
160 if (hasPadding())
161 return true;
162
163 // Otherwise, do some simple heuristics to try to avoid it:
164 switch (getEvaluationKind()) {
165 // For scalars and complexes, check whether the store size of the
166 // type uses the full size.
167 case cir::TEK_Scalar:
168 return !isFullSizeType(cgf.cgm, ty, atomicSizeInBits);
169 case cir::TEK_Complex:
170 return !isFullSizeType(cgf.cgm,
171 mlir::cast<cir::ComplexType>(ty).getElementType(),
172 atomicSizeInBits / 2);
173 // Padding in structs has an undefined bit pattern. User beware.
175 return false;
176 }
177 llvm_unreachable("bad evaluation kind");
178}
179
180Address AtomicInfo::convertToAtomicIntPointer(Address addr) const {
181 mlir::Type ty = addr.getElementType();
182 uint64_t sourceSizeInBits = cgf.cgm.getDataLayout().getTypeSizeInBits(ty);
183 if (sourceSizeInBits != atomicSizeInBits) {
184 cgf.cgm.errorNYI(
185 loc,
186 "AtomicInfo::convertToAtomicIntPointer: convert through temp alloca");
187 }
188
189 return castToAtomicIntPointer(addr);
190}
191
192Address AtomicInfo::createTempAlloca() const {
193 Address tempAlloca = cgf.createMemTemp(
194 (lvalue.isBitField() && valueSizeInBits > atomicSizeInBits) ? valueTy
195 : atomicTy,
196 getAtomicAlignment(), loc, "atomic-temp");
197
198 // Cast to pointer to value type for bitfields.
199 if (lvalue.isBitField()) {
200 cgf.cgm.errorNYI(loc, "AtomicInfo::createTempAlloca: bitfield lvalue");
201 }
202
203 return tempAlloca;
204}
205
206mlir::Value AtomicInfo::getScalarRValValueOrNull(RValue rvalue) const {
207 if (rvalue.isScalar() && (!hasPadding() || !lvalue.isSimple()))
208 return rvalue.getValue();
209 return nullptr;
210}
211
212Address AtomicInfo::castToAtomicIntPointer(Address addr) const {
213 auto intTy = mlir::dyn_cast<cir::IntType>(addr.getElementType());
214 // Don't bother with int casts if the integer size is the same.
215 if (intTy && intTy.getWidth() == atomicSizeInBits)
216 return addr;
217 auto ty = cgf.getBuilder().getUIntNTy(atomicSizeInBits);
218 return addr.withElementType(cgf.getBuilder(), ty);
219}
220
221bool AtomicInfo::emitMemSetZeroIfNecessary() const {
222 assert(lvalue.isSimple());
223 Address addr = lvalue.getAddress();
224 if (!requiresMemSetZero(addr.getElementType()))
225 return false;
226
227 cgf.cgm.errorNYI(loc,
228 "AtomicInfo::emitMemSetZeroIfNecaessary: emit memset zero");
229 return false;
230}
231
232/// Return true if \param valueTy is a type that should be casted to integer
233/// around the atomic memory operation. If \param cmpxchg is true, then the
234/// cast of a floating point type is made as that instruction can not have
235/// floating point operands. TODO: Allow compare-and-exchange and FP - see
236/// comment in CIRGenAtomicExpandPass.cpp.
237static bool shouldCastToInt(mlir::Type valueTy, bool cmpxchg) {
238 if (cir::isAnyFloatingPointType(valueTy))
239 return isa<cir::FP80Type>(valueTy) || cmpxchg;
240 return !isa<cir::IntType>(valueTy) && !isa<cir::PointerType>(valueTy);
241}
242
243mlir::Value AtomicInfo::convertRValueToInt(RValue rvalue, bool cmpxchg) const {
244 // If we've got a scalar value of the right size, try to avoid going
245 // through memory. Floats get casted if needed by AtomicExpandPass.
246 if (mlir::Value value = getScalarRValValueOrNull(rvalue)) {
247 if (!shouldCastToInt(value.getType(), cmpxchg))
248 return cgf.emitToMemory(value, valueTy);
249
250 cgf.cgm.errorNYI(
251 loc, "AtomicInfo::convertRValueToInt: cast scalar rvalue to int");
252 return nullptr;
253 }
254
255 cgf.cgm.errorNYI(
256 loc, "AtomicInfo::convertRValueToInt: cast non-scalar rvalue to int");
257 return nullptr;
258}
259
260/// Copy an r-value into memory as part of storing to an atomic type.
261/// This needs to create a bit-pattern suitable for atomic operations.
262void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
263 assert(lvalue.isSimple());
264
265 // If we have an r-value, the rvalue should be of the atomic type,
266 // which means that the caller is responsible for having zeroed
267 // any padding. Just do an aggregate copy of that type.
268 if (rvalue.isAggregate()) {
269 cgf.cgm.errorNYI("copying aggregate into atomic lvalue");
270 return;
271 }
272
273 // Okay, otherwise we're copying stuff.
274
275 // Zero out the buffer if necessary.
276 emitMemSetZeroIfNecessary();
277
278 // Drill past the padding if present.
279 LValue tempLValue = projectValue();
280
281 // Okay, store the rvalue in.
282 if (rvalue.isScalar()) {
283 cgf.emitStoreOfScalar(rvalue.getValue(), tempLValue, /*isInit=*/true);
284 } else {
285 cgf.cgm.errorNYI("copying complex into atomic lvalue");
286 }
287}
288
289static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak,
290 Address dest, Address ptr, Address val1,
291 Address val2, uint64_t size,
292 cir::MemOrder successOrder,
293 cir::MemOrder failureOrder) {
294 mlir::Location loc = cgf.getLoc(e->getSourceRange());
295
296 CIRGenBuilderTy &builder = cgf.getBuilder();
297 mlir::Value expected = builder.createLoad(loc, val1);
298 mlir::Value desired = builder.createLoad(loc, val2);
299
300 auto cmpxchg = cir::AtomicCmpXchgOp::create(
301 builder, loc, expected.getType(), builder.getBoolTy(), ptr.getPointer(),
302 expected, desired,
303 cir::MemOrderAttr::get(&cgf.getMLIRContext(), successOrder),
304 cir::MemOrderAttr::get(&cgf.getMLIRContext(), failureOrder),
305 builder.getI64IntegerAttr(ptr.getAlignment().getAsAlign().value()));
306
307 cmpxchg.setIsVolatile(e->isVolatile());
308 cmpxchg.setWeak(isWeak);
309
310 mlir::Value failed = builder.createNot(cmpxchg.getSuccess());
311 cir::IfOp::create(builder, loc, failed, /*withElseRegion=*/false,
312 [&](mlir::OpBuilder &, mlir::Location) {
313 auto ptrTy = mlir::cast<cir::PointerType>(
314 val1.getPointer().getType());
315 if (val1.getElementType() != ptrTy.getPointee()) {
316 val1 = val1.withPointer(builder.createPtrBitcast(
317 val1.getPointer(), val1.getElementType()));
318 }
319 builder.createStore(loc, cmpxchg.getOld(), val1);
320 builder.createYield(loc);
321 });
322
323 // Update the memory at Dest with Success's value.
324 cgf.emitStoreOfScalar(cmpxchg.getSuccess(),
325 cgf.makeAddrLValue(dest, e->getType()),
326 /*isInit=*/false);
327}
328
330 bool isWeak, Address dest, Address ptr,
331 Address val1, Address val2,
332 Expr *failureOrderExpr, uint64_t size,
333 cir::MemOrder successOrder) {
334 Expr::EvalResult failureOrderEval;
335 if (failureOrderExpr->EvaluateAsInt(failureOrderEval, cgf.getContext())) {
336 uint64_t failureOrderInt = failureOrderEval.Val.getInt().getZExtValue();
337
338 cir::MemOrder failureOrder;
339 if (!cir::isValidCIRAtomicOrderingCABI(failureOrderInt)) {
340 failureOrder = cir::MemOrder::Relaxed;
341 } else {
342 switch ((cir::MemOrder)failureOrderInt) {
343 case cir::MemOrder::Relaxed:
344 // 31.7.2.18: "The failure argument shall not be memory_order_release
345 // nor memory_order_acq_rel". Fallback to monotonic.
346 case cir::MemOrder::Release:
347 case cir::MemOrder::AcquireRelease:
348 failureOrder = cir::MemOrder::Relaxed;
349 break;
350 case cir::MemOrder::Consume:
351 case cir::MemOrder::Acquire:
352 failureOrder = cir::MemOrder::Acquire;
353 break;
354 case cir::MemOrder::SequentiallyConsistent:
355 failureOrder = cir::MemOrder::SequentiallyConsistent;
356 break;
357 }
358 }
359
360 // Prior to c++17, "the failure argument shall be no stronger than the
361 // success argument". This condition has been lifted and the only
362 // precondition is 31.7.2.18. Effectively treat this as a DR and skip
363 // language version checks.
364 emitAtomicCmpXchg(cgf, e, isWeak, dest, ptr, val1, val2, size, successOrder,
365 failureOrder);
366 return;
367 }
368
370 cgf.cgm.errorNYI(e->getSourceRange(),
371 "emitAtomicCmpXchgFailureSet: non-constant failure order");
372}
373
375 Address ptr, Address val1, Address val2,
376 Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size,
377 cir::MemOrder order) {
378 std::unique_ptr<AtomicScopeModel> scopeModel = expr->getScopeModel();
379 if (scopeModel) {
381 cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: atomic scope");
382 return;
383 }
384
386 llvm::StringRef opName;
387
388 CIRGenBuilderTy &builder = cgf.getBuilder();
389 mlir::Location loc = cgf.getLoc(expr->getSourceRange());
390 auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
391 cir::AtomicFetchKindAttr fetchAttr;
392 bool fetchFirst = true;
393
394 switch (expr->getOp()) {
395 case AtomicExpr::AO__c11_atomic_init:
396 llvm_unreachable("already handled!");
397
398 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
399 emitAtomicCmpXchgFailureSet(cgf, expr, /*isWeak=*/false, dest, ptr, val1,
400 val2, failureOrderExpr, size, order);
401 return;
402
403 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
404 emitAtomicCmpXchgFailureSet(cgf, expr, /*isWeak=*/true, dest, ptr, val1,
405 val2, failureOrderExpr, size, order);
406 return;
407
408 case AtomicExpr::AO__atomic_compare_exchange:
409 case AtomicExpr::AO__atomic_compare_exchange_n: {
410 bool isWeak = false;
411 if (isWeakExpr->EvaluateAsBooleanCondition(isWeak, cgf.getContext())) {
412 emitAtomicCmpXchgFailureSet(cgf, expr, isWeak, dest, ptr, val1, val2,
413 failureOrderExpr, size, order);
414 } else {
416 cgf.cgm.errorNYI(expr->getSourceRange(),
417 "emitAtomicOp: non-constant isWeak");
418 }
419 return;
420 }
421
422 case AtomicExpr::AO__c11_atomic_load:
423 case AtomicExpr::AO__atomic_load_n:
424 case AtomicExpr::AO__atomic_load: {
425 cir::LoadOp load =
426 builder.createLoad(loc, ptr, /*isVolatile=*/expr->isVolatile());
427
429
430 load->setAttr("mem_order", orderAttr);
431
432 builder.createStore(loc, load->getResult(0), dest);
433 return;
434 }
435
436 case AtomicExpr::AO__c11_atomic_store:
437 case AtomicExpr::AO__atomic_store_n:
438 case AtomicExpr::AO__atomic_store: {
439 cir::LoadOp loadVal1 = builder.createLoad(loc, val1);
440
442
443 builder.createStore(loc, loadVal1, ptr, expr->isVolatile(),
444 /*align=*/mlir::IntegerAttr{}, orderAttr);
445 return;
446 }
447
448 case AtomicExpr::AO__c11_atomic_exchange:
449 case AtomicExpr::AO__atomic_exchange_n:
450 case AtomicExpr::AO__atomic_exchange:
451 opName = cir::AtomicXchgOp::getOperationName();
452 break;
453
454 case AtomicExpr::AO__atomic_add_fetch:
455 fetchFirst = false;
456 [[fallthrough]];
457 case AtomicExpr::AO__c11_atomic_fetch_add:
458 case AtomicExpr::AO__atomic_fetch_add:
459 opName = cir::AtomicFetchOp::getOperationName();
460 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
461 cir::AtomicFetchKind::Add);
462 break;
463
464 case AtomicExpr::AO__atomic_sub_fetch:
465 fetchFirst = false;
466 [[fallthrough]];
467 case AtomicExpr::AO__c11_atomic_fetch_sub:
468 case AtomicExpr::AO__atomic_fetch_sub:
469 opName = cir::AtomicFetchOp::getOperationName();
470 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
471 cir::AtomicFetchKind::Sub);
472 break;
473
474 case AtomicExpr::AO__atomic_min_fetch:
475 fetchFirst = false;
476 [[fallthrough]];
477 case AtomicExpr::AO__c11_atomic_fetch_min:
478 case AtomicExpr::AO__atomic_fetch_min:
479 opName = cir::AtomicFetchOp::getOperationName();
480 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
481 cir::AtomicFetchKind::Min);
482 break;
483
484 case AtomicExpr::AO__atomic_max_fetch:
485 fetchFirst = false;
486 [[fallthrough]];
487 case AtomicExpr::AO__c11_atomic_fetch_max:
488 case AtomicExpr::AO__atomic_fetch_max:
489 opName = cir::AtomicFetchOp::getOperationName();
490 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
491 cir::AtomicFetchKind::Max);
492 break;
493
494 case AtomicExpr::AO__atomic_and_fetch:
495 fetchFirst = false;
496 [[fallthrough]];
497 case AtomicExpr::AO__c11_atomic_fetch_and:
498 case AtomicExpr::AO__atomic_fetch_and:
499 opName = cir::AtomicFetchOp::getOperationName();
500 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
501 cir::AtomicFetchKind::And);
502 break;
503
504 case AtomicExpr::AO__atomic_or_fetch:
505 fetchFirst = false;
506 [[fallthrough]];
507 case AtomicExpr::AO__c11_atomic_fetch_or:
508 case AtomicExpr::AO__atomic_fetch_or:
509 opName = cir::AtomicFetchOp::getOperationName();
510 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
511 cir::AtomicFetchKind::Or);
512 break;
513
514 case AtomicExpr::AO__atomic_xor_fetch:
515 fetchFirst = false;
516 [[fallthrough]];
517 case AtomicExpr::AO__c11_atomic_fetch_xor:
518 case AtomicExpr::AO__atomic_fetch_xor:
519 opName = cir::AtomicFetchOp::getOperationName();
520 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
521 cir::AtomicFetchKind::Xor);
522 break;
523
524 case AtomicExpr::AO__atomic_nand_fetch:
525 fetchFirst = false;
526 [[fallthrough]];
527 case AtomicExpr::AO__c11_atomic_fetch_nand:
528 case AtomicExpr::AO__atomic_fetch_nand:
529 opName = cir::AtomicFetchOp::getOperationName();
530 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
531 cir::AtomicFetchKind::Nand);
532 break;
533
534 case AtomicExpr::AO__atomic_test_and_set: {
535 auto op = cir::AtomicTestAndSetOp::create(
536 builder, loc, ptr.getPointer(), order,
537 builder.getI64IntegerAttr(ptr.getAlignment().getQuantity()),
538 expr->isVolatile());
539 builder.createStore(loc, op, dest);
540 return;
541 }
542
543 case AtomicExpr::AO__atomic_clear: {
544 cir::AtomicClearOp::create(
545 builder, loc, ptr.getPointer(), order,
546 builder.getI64IntegerAttr(ptr.getAlignment().getQuantity()),
547 expr->isVolatile());
548 return;
549 }
550
551 case AtomicExpr::AO__opencl_atomic_init:
552
553 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
554 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
555
556 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
557 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
558
559 case AtomicExpr::AO__scoped_atomic_compare_exchange:
560 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
561
562 case AtomicExpr::AO__opencl_atomic_load:
563 case AtomicExpr::AO__hip_atomic_load:
564 case AtomicExpr::AO__scoped_atomic_load_n:
565 case AtomicExpr::AO__scoped_atomic_load:
566
567 case AtomicExpr::AO__opencl_atomic_store:
568 case AtomicExpr::AO__hip_atomic_store:
569 case AtomicExpr::AO__scoped_atomic_store:
570 case AtomicExpr::AO__scoped_atomic_store_n:
571
572 case AtomicExpr::AO__hip_atomic_exchange:
573 case AtomicExpr::AO__opencl_atomic_exchange:
574 case AtomicExpr::AO__scoped_atomic_exchange_n:
575 case AtomicExpr::AO__scoped_atomic_exchange:
576
577 case AtomicExpr::AO__scoped_atomic_add_fetch:
578
579 case AtomicExpr::AO__hip_atomic_fetch_add:
580 case AtomicExpr::AO__opencl_atomic_fetch_add:
581 case AtomicExpr::AO__scoped_atomic_fetch_add:
582
583 case AtomicExpr::AO__scoped_atomic_sub_fetch:
584
585 case AtomicExpr::AO__hip_atomic_fetch_sub:
586 case AtomicExpr::AO__opencl_atomic_fetch_sub:
587 case AtomicExpr::AO__scoped_atomic_fetch_sub:
588
589 case AtomicExpr::AO__scoped_atomic_min_fetch:
590
591 case AtomicExpr::AO__hip_atomic_fetch_min:
592 case AtomicExpr::AO__opencl_atomic_fetch_min:
593 case AtomicExpr::AO__scoped_atomic_fetch_min:
594
595 case AtomicExpr::AO__scoped_atomic_max_fetch:
596
597 case AtomicExpr::AO__hip_atomic_fetch_max:
598 case AtomicExpr::AO__opencl_atomic_fetch_max:
599 case AtomicExpr::AO__scoped_atomic_fetch_max:
600
601 case AtomicExpr::AO__scoped_atomic_and_fetch:
602
603 case AtomicExpr::AO__hip_atomic_fetch_and:
604 case AtomicExpr::AO__opencl_atomic_fetch_and:
605 case AtomicExpr::AO__scoped_atomic_fetch_and:
606
607 case AtomicExpr::AO__scoped_atomic_or_fetch:
608
609 case AtomicExpr::AO__hip_atomic_fetch_or:
610 case AtomicExpr::AO__opencl_atomic_fetch_or:
611 case AtomicExpr::AO__scoped_atomic_fetch_or:
612
613 case AtomicExpr::AO__scoped_atomic_xor_fetch:
614
615 case AtomicExpr::AO__hip_atomic_fetch_xor:
616 case AtomicExpr::AO__opencl_atomic_fetch_xor:
617 case AtomicExpr::AO__scoped_atomic_fetch_xor:
618
619 case AtomicExpr::AO__scoped_atomic_nand_fetch:
620
621 case AtomicExpr::AO__scoped_atomic_fetch_nand:
622 cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: expr op NYI");
623 return;
624 }
625
626 assert(!opName.empty() && "expected operation name to build");
627 mlir::Value loadVal1 = builder.createLoad(loc, val1);
628
629 SmallVector<mlir::Value> atomicOperands = {ptr.getPointer(), loadVal1};
630 SmallVector<mlir::Type> atomicResTys = {loadVal1.getType()};
631 mlir::Operation *rmwOp = builder.create(loc, builder.getStringAttr(opName),
632 atomicOperands, atomicResTys);
633
634 if (fetchAttr)
635 rmwOp->setAttr("binop", fetchAttr);
636 rmwOp->setAttr("mem_order", orderAttr);
637 if (expr->isVolatile())
638 rmwOp->setAttr("is_volatile", builder.getUnitAttr());
639 if (fetchFirst && opName == cir::AtomicFetchOp::getOperationName())
640 rmwOp->setAttr("fetch_first", builder.getUnitAttr());
641
642 mlir::Value result = rmwOp->getResult(0);
643 builder.createStore(loc, result, dest);
644}
645
646static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad) {
648 return false;
649 auto memOrder = static_cast<cir::MemOrder>(order);
650 if (isStore)
651 return memOrder != cir::MemOrder::Consume &&
652 memOrder != cir::MemOrder::Acquire &&
653 memOrder != cir::MemOrder::AcquireRelease;
654 if (isLoad)
655 return memOrder != cir::MemOrder::Release &&
656 memOrder != cir::MemOrder::AcquireRelease;
657 return true;
658}
659
661 QualType atomicTy = e->getPtr()->getType()->getPointeeType();
662 QualType memTy = atomicTy;
663 if (const auto *ty = atomicTy->getAs<AtomicType>())
664 memTy = ty->getValueType();
665
666 Expr *isWeakExpr = nullptr;
667 Expr *orderFailExpr = nullptr;
668
669 Address val1 = Address::invalid();
670 Address val2 = Address::invalid();
671 Address dest = Address::invalid();
673
675 if (e->getOp() == AtomicExpr::AO__c11_atomic_init) {
676 LValue lvalue = makeAddrLValue(ptr, atomicTy);
677 emitAtomicInit(e->getVal1(), lvalue);
678 return RValue::get(nullptr);
679 }
680
681 TypeInfoChars typeInfo = getContext().getTypeInfoInChars(atomicTy);
682 uint64_t size = typeInfo.Width.getQuantity();
683
684 Expr::EvalResult orderConst;
685 mlir::Value order;
686 if (!e->getOrder()->EvaluateAsInt(orderConst, getContext()))
687 order = emitScalarExpr(e->getOrder());
688
689 bool shouldCastToIntPtrTy = true;
690
691 switch (e->getOp()) {
692 default:
693 cgm.errorNYI(e->getSourceRange(), "atomic op NYI");
694 return RValue::get(nullptr);
695
696 case AtomicExpr::AO__c11_atomic_init:
697 llvm_unreachable("already handled above with emitAtomicInit");
698
699 case AtomicExpr::AO__atomic_load_n:
700 case AtomicExpr::AO__c11_atomic_load:
701 case AtomicExpr::AO__atomic_test_and_set:
702 case AtomicExpr::AO__atomic_clear:
703 break;
704
705 case AtomicExpr::AO__atomic_load:
707 break;
708
709 case AtomicExpr::AO__atomic_store:
711 break;
712
713 case AtomicExpr::AO__atomic_exchange:
716 break;
717
718 case AtomicExpr::AO__atomic_compare_exchange:
719 case AtomicExpr::AO__atomic_compare_exchange_n:
720 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
721 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
723 if (e->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
724 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
726 else
727 val2 = emitValToTemp(*this, e->getVal2());
728 orderFailExpr = e->getOrderFail();
729 if (e->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
730 e->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
731 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
732 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
733 isWeakExpr = e->getWeak();
734 break;
735
736 case AtomicExpr::AO__c11_atomic_fetch_add:
737 case AtomicExpr::AO__c11_atomic_fetch_sub:
738 if (memTy->isPointerType()) {
739 cgm.errorNYI(e->getSourceRange(),
740 "atomic fetch-and-add and fetch-and-sub for pointers");
741 return RValue::get(nullptr);
742 }
743 [[fallthrough]];
744 case AtomicExpr::AO__atomic_fetch_add:
745 case AtomicExpr::AO__atomic_fetch_max:
746 case AtomicExpr::AO__atomic_fetch_min:
747 case AtomicExpr::AO__atomic_fetch_sub:
748 case AtomicExpr::AO__atomic_add_fetch:
749 case AtomicExpr::AO__atomic_max_fetch:
750 case AtomicExpr::AO__atomic_min_fetch:
751 case AtomicExpr::AO__atomic_sub_fetch:
752 case AtomicExpr::AO__c11_atomic_fetch_max:
753 case AtomicExpr::AO__c11_atomic_fetch_min:
754 shouldCastToIntPtrTy = !memTy->isFloatingType();
755 [[fallthrough]];
756
757 case AtomicExpr::AO__atomic_fetch_and:
758 case AtomicExpr::AO__atomic_fetch_nand:
759 case AtomicExpr::AO__atomic_fetch_or:
760 case AtomicExpr::AO__atomic_fetch_xor:
761 case AtomicExpr::AO__atomic_and_fetch:
762 case AtomicExpr::AO__atomic_nand_fetch:
763 case AtomicExpr::AO__atomic_or_fetch:
764 case AtomicExpr::AO__atomic_xor_fetch:
765 case AtomicExpr::AO__atomic_exchange_n:
766 case AtomicExpr::AO__atomic_store_n:
767 case AtomicExpr::AO__c11_atomic_fetch_and:
768 case AtomicExpr::AO__c11_atomic_fetch_nand:
769 case AtomicExpr::AO__c11_atomic_fetch_or:
770 case AtomicExpr::AO__c11_atomic_fetch_xor:
771 case AtomicExpr::AO__c11_atomic_exchange:
772 case AtomicExpr::AO__c11_atomic_store:
773 val1 = emitValToTemp(*this, e->getVal1());
774 break;
775 }
776
777 QualType resultTy = e->getType().getUnqualifiedType();
778
779 // The inlined atomics only function on iN types, where N is a power of 2. We
780 // need to make sure (via temporaries if necessary) that all incoming values
781 // are compatible.
782 LValue atomicValue = makeAddrLValue(ptr, atomicTy);
783 AtomicInfo atomics(*this, atomicValue, getLoc(e->getSourceRange()));
784
785 if (shouldCastToIntPtrTy) {
786 ptr = atomics.castToAtomicIntPointer(ptr);
787 if (val1.isValid())
788 val1 = atomics.convertToAtomicIntPointer(val1);
789 }
790 if (dest.isValid()) {
791 if (shouldCastToIntPtrTy)
792 dest = atomics.castToAtomicIntPointer(dest);
793 } else if (e->isCmpXChg()) {
794 dest = createMemTemp(resultTy, getLoc(e->getSourceRange()), "cmpxchg.bool");
795 } else if (e->getOp() == AtomicExpr::AO__atomic_test_and_set) {
796 dest = createMemTemp(resultTy, getLoc(e->getSourceRange()),
797 "test_and_set.bool");
798 } else if (!resultTy->isVoidType()) {
799 dest = atomics.createTempAlloca();
800 if (shouldCastToIntPtrTy)
801 dest = atomics.castToAtomicIntPointer(dest);
802 }
803
804 bool powerOf2Size = (size & (size - 1)) == 0;
805 bool useLibCall = !powerOf2Size || (size > 16);
806
807 // For atomics larger than 16 bytes, emit a libcall from the frontend. This
808 // avoids the overhead of dealing with excessively-large value types in IR.
809 // Non-power-of-2 values also lower to libcall here, as they are not currently
810 // permitted in IR instructions (although that constraint could be relaxed in
811 // the future). For other cases where a libcall is required on a given
812 // platform, we let the backend handle it (this includes handling for all of
813 // the size-optimized libcall variants, which are only valid up to 16 bytes.)
814 //
815 // See: https://llvm.org/docs/Atomics.html#libcalls-atomic
816 if (useLibCall) {
818 cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: emit atomic lib call");
819 return RValue::get(nullptr);
820 }
821
822 bool isStore = e->getOp() == AtomicExpr::AO__c11_atomic_store ||
823 e->getOp() == AtomicExpr::AO__opencl_atomic_store ||
824 e->getOp() == AtomicExpr::AO__hip_atomic_store ||
825 e->getOp() == AtomicExpr::AO__atomic_store ||
826 e->getOp() == AtomicExpr::AO__atomic_store_n ||
827 e->getOp() == AtomicExpr::AO__scoped_atomic_store ||
828 e->getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
829 e->getOp() == AtomicExpr::AO__atomic_clear;
830 bool isLoad = e->getOp() == AtomicExpr::AO__c11_atomic_load ||
831 e->getOp() == AtomicExpr::AO__opencl_atomic_load ||
832 e->getOp() == AtomicExpr::AO__hip_atomic_load ||
833 e->getOp() == AtomicExpr::AO__atomic_load ||
834 e->getOp() == AtomicExpr::AO__atomic_load_n ||
835 e->getOp() == AtomicExpr::AO__scoped_atomic_load ||
836 e->getOp() == AtomicExpr::AO__scoped_atomic_load_n;
837
838 if (!order) {
839 // We have evaluated the memory order as an integer constant in orderConst.
840 // We should not ever get to a case where the ordering isn't a valid CABI
841 // value, but it's hard to enforce that in general.
842 uint64_t ord = orderConst.Val.getInt().getZExtValue();
843 if (isMemOrderValid(ord, isStore, isLoad))
844 emitAtomicOp(*this, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
845 size, static_cast<cir::MemOrder>(ord));
846 } else {
848 cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: dynamic memory order");
849 return RValue::get(nullptr);
850 }
851
852 if (resultTy->isVoidType())
853 return RValue::get(nullptr);
854
855 return convertTempToRValue(
856 dest.withElementType(builder, convertTypeForMem(resultTy)), resultTy,
857 e->getExprLoc());
858}
859
860void CIRGenFunction::emitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
861 bool isVolatile = dest.isVolatileQualified();
862 auto order = cir::MemOrder::SequentiallyConsistent;
863 if (!dest.getType()->isAtomicType()) {
865 }
866 return emitAtomicStore(rvalue, dest, order, isVolatile, isInit);
867}
868
869/// Emit a store to an l-value of atomic type.
870///
871/// Note that the r-value is expected to be an r-value of the atomic type; this
872/// means that for aggregate r-values, it should include storage for any padding
873/// that was necessary.
875 cir::MemOrder order, bool isVolatile,
876 bool isInit) {
877 // If this is an aggregate r-value, it should agree in type except
878 // maybe for address-space qualification.
879 mlir::Location loc = dest.getPointer().getLoc();
880 assert(!rvalue.isAggregate() ||
882 dest.getAddress().getElementType());
883
884 AtomicInfo atomics(*this, dest, loc);
885 LValue lvalue = atomics.getAtomicLValue();
886
887 if (lvalue.isSimple()) {
888 // If this is an initialization, just put the value there normally.
889 if (isInit) {
890 atomics.emitCopyIntoMemory(rvalue);
891 return;
892 }
893
894 // Check whether we should use a library call.
895 if (atomics.shouldUseLibCall()) {
897 cgm.errorNYI(loc, "emitAtomicStore: atomic store with library call");
898 return;
899 }
900
901 // Okay, we're doing this natively.
902 mlir::Value valueToStore = atomics.convertRValueToInt(rvalue);
903
904 // Do the atomic store.
905 Address addr = atomics.getAtomicAddress();
906 if (mlir::Value value = atomics.getScalarRValValueOrNull(rvalue)) {
907 if (shouldCastToInt(value.getType(), /*CmpXchg=*/false)) {
908 addr = atomics.castToAtomicIntPointer(addr);
909 valueToStore =
910 builder.createIntCast(valueToStore, addr.getElementType());
911 }
912 }
913 cir::StoreOp store = builder.createStore(loc, valueToStore, addr);
914
915 // Initializations don't need to be atomic.
916 if (!isInit) {
918 store.setMemOrder(order);
919 }
920
921 // Other decoration.
922 if (isVolatile)
923 store.setIsVolatile(true);
924
926 return;
927 }
928
929 cgm.errorNYI(loc, "emitAtomicStore: non-simple atomic lvalue");
931}
932
934 AtomicInfo atomics(*this, dest, getLoc(init->getSourceRange()));
935
936 switch (atomics.getEvaluationKind()) {
937 case cir::TEK_Scalar: {
938 mlir::Value value = emitScalarExpr(init);
939 atomics.emitCopyIntoMemory(RValue::get(value));
940 return;
941 }
942
943 case cir::TEK_Complex: {
944 mlir::Value value = emitComplexExpr(init);
945 atomics.emitCopyIntoMemory(RValue::get(value));
946 return;
947 }
948
949 case cir::TEK_Aggregate: {
950 // Fix up the destination if the initializer isn't an expression
951 // of atomic type.
952 bool zeroed = false;
953 if (!init->getType()->isAtomicType()) {
954 zeroed = atomics.emitMemSetZeroIfNecessary();
955 dest = atomics.projectValue();
956 }
957
958 // Evaluate the expression directly into the destination.
964
965 emitAggExpr(init, slot);
966 return;
967 }
968 }
969
970 llvm_unreachable("bad evaluation kind");
971}
static bool shouldCastToInt(mlir::Type valueTy, bool cmpxchg)
Return true if.
static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, uint64_t size, cir::MemOrder successOrder, cir::MemOrder failureOrder)
static Address emitValToTemp(CIRGenFunction &cgf, Expr *e)
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size, cir::MemOrder order)
static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad)
static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, Expr *failureOrderExpr, uint64_t size, cir::MemOrder successOrder)
static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
mlir::Value createNot(mlir::Value value)
mlir::Value createPtrBitcast(mlir::Value src, mlir::Type newPointeeTy)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
cir::BoolType getBoolTy()
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
llvm::TypeSize getTypeStoreSize(mlir::Type ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
APSInt & getInt()
Definition APValue.h:489
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:891
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition Expr.h:6814
Expr * getVal2() const
Definition Expr.h:6865
Expr * getOrder() const
Definition Expr.h:6848
bool isCmpXChg() const
Definition Expr.h:6898
AtomicOp getOp() const
Definition Expr.h:6877
Expr * getVal1() const
Definition Expr.h:6855
Expr * getPtr() const
Definition Expr.h:6845
Expr * getWeak() const
Definition Expr.h:6871
Expr * getOrderFail() const
Definition Expr.h:6861
bool isVolatile() const
Definition Expr.h:6894
Address withPointer(mlir::Value newPtr) const
Return address with different pointer, but same element type and alignment.
Definition Address.h:74
mlir::Value getPointer() const
Definition Address.h:82
mlir::Type getElementType() const
Definition Address.h:109
static Address invalid()
Definition Address.h:67
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
Definition Address.h:117
bool isValid() const
Definition Address.h:68
An aggregate value slot.
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
cir::IntType getUIntNTy(int n)
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile=false, mlir::IntegerAttr align={}, cir::MemOrderAttr order={})
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitAtomicExpr(AtomicExpr *e)
mlir::Type convertTypeForMem(QualType t)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
void emitAtomicInit(Expr *init, LValue dest)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
clang::ASTContext & getContext() const
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const cir::CIRDataLayout getDataLayout() const
Address getAddress() const
clang::QualType getType() const
mlir::Value getPointer() const
bool isVolatileQualified() const
bool isSimple() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
Address getAggregateAddress() const
Return the value of the address of the aggregate.
Definition CIRGenValue.h:68
bool isAggregate() const
Definition CIRGenValue.h:51
static RValue get(mlir::Value v)
Definition CIRGenValue.h:82
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:56
bool isScalar() const
Definition CIRGenValue.h:49
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsBooleanCondition - Return true if this is a constant which we can fold and convert to a boo...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
A (possibly-)qualified type.
Definition TypeBase.h:937
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8318
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition TypeBase.h:8372
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
virtual bool hasBuiltinAtomic(uint64_t AtomicSizeInBits, uint64_t AlignmentInBits) const
Returns true if the given target supports lock-free atomic operations at the specified width and alig...
Definition TargetInfo.h:862
bool isVoidType() const
Definition TypeBase.h:8871
bool isPointerType() const
Definition TypeBase.h:8515
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isAtomicType() const
Definition TypeBase.h:8697
bool isFloatingType() const
Definition Type.cpp:2304
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9091
bool isValidCIRAtomicOrderingCABI(Int value)
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
unsigned long uint64_t
static bool atomicInfoGetAtomicPointer()
static bool aggValueSlotGC()
static bool atomicScope()
static bool opLoadStoreAtomic()
static bool opLoadStoreTbaa()
static bool atomicUseLibCall()
static bool atomicOpenMP()
static bool atomicMicrosoftVolatile()
static bool atomicSyncScopeID()
static bool atomicInfoGetAtomicAddress()
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647