clang 23.0.0git
CIRGenAtomic.cpp
Go to the documentation of this file.
1//===--- CIRGenAtomic.cpp - Emit CIR for atomic operations ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the code for emitting atomic operations.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
15
16using namespace clang;
17using namespace clang::CIRGen;
18using namespace cir;
19
20namespace {
21class AtomicInfo {
22 CIRGenFunction &cgf;
23 QualType atomicTy;
24 QualType valueTy;
25 uint64_t atomicSizeInBits = 0;
26 uint64_t valueSizeInBits = 0;
27 CharUnits atomicAlign;
28 CharUnits valueAlign;
29 TypeEvaluationKind evaluationKind = cir::TEK_Scalar;
30 bool useLibCall = true;
31 LValue lvalue;
32 mlir::Location loc;
33
34public:
35 AtomicInfo(CIRGenFunction &cgf, LValue &lvalue, mlir::Location loc)
36 : cgf(cgf), loc(loc) {
37 assert(!lvalue.isGlobalReg());
38 ASTContext &ctx = cgf.getContext();
39 if (lvalue.isSimple()) {
40 atomicTy = lvalue.getType();
41 if (auto *ty = atomicTy->getAs<AtomicType>())
42 valueTy = ty->getValueType();
43 else
44 valueTy = atomicTy;
45 evaluationKind = cgf.getEvaluationKind(valueTy);
46
47 TypeInfo valueTypeInfo = ctx.getTypeInfo(valueTy);
48 TypeInfo atomicTypeInfo = ctx.getTypeInfo(atomicTy);
49 uint64_t valueAlignInBits = valueTypeInfo.Align;
50 uint64_t atomicAlignInBits = atomicTypeInfo.Align;
51 valueSizeInBits = valueTypeInfo.Width;
52 atomicSizeInBits = atomicTypeInfo.Width;
53 assert(valueSizeInBits <= atomicSizeInBits);
54 assert(valueAlignInBits <= atomicAlignInBits);
55
56 atomicAlign = ctx.toCharUnitsFromBits(atomicAlignInBits);
57 valueAlign = ctx.toCharUnitsFromBits(valueAlignInBits);
58 if (lvalue.getAlignment().isZero())
59 lvalue.setAlignment(atomicAlign);
60
61 this->lvalue = lvalue;
62 } else {
64 cgf.cgm.errorNYI(loc, "AtomicInfo: non-simple lvalue");
65 }
66 useLibCall = !ctx.getTargetInfo().hasBuiltinAtomic(
67 atomicSizeInBits, ctx.toBits(lvalue.getAlignment()));
68 }
69
70 QualType getValueType() const { return valueTy; }
71 CharUnits getAtomicAlignment() const { return atomicAlign; }
72 TypeEvaluationKind getEvaluationKind() const { return evaluationKind; }
73 mlir::Value getAtomicPointer() const {
74 if (lvalue.isSimple())
75 return lvalue.getPointer();
77 return nullptr;
78 }
79 bool shouldUseLibCall() const { return useLibCall; }
80 const LValue &getAtomicLValue() const { return lvalue; }
81 Address getAtomicAddress() const {
82 mlir::Type elemTy;
83 if (lvalue.isSimple()) {
84 elemTy = lvalue.getAddress().getElementType();
85 } else {
87 cgf.cgm.errorNYI(loc, "AtomicInfo::getAtomicAddress: non-simple lvalue");
88 }
89 return Address(getAtomicPointer(), elemTy, getAtomicAlignment());
90 }
91
92 /// Is the atomic size larger than the underlying value type?
93 ///
94 /// Note that the absence of padding does not mean that atomic
95 /// objects are completely interchangeable with non-atomic
96 /// objects: we might have promoted the alignment of a type
97 /// without making it bigger.
98 bool hasPadding() const { return (valueSizeInBits != atomicSizeInBits); }
99
100 bool emitMemSetZeroIfNecessary() const;
101
102 mlir::Value getScalarRValValueOrNull(RValue rvalue) const;
103
104 /// Cast the given pointer to an integer pointer suitable for atomic
105 /// operations on the source.
106 Address castToAtomicIntPointer(Address addr) const;
107
108 /// If addr is compatible with the iN that will be used for an atomic
109 /// operation, bitcast it. Otherwise, create a temporary that is suitable and
110 /// copy the value across.
111 Address convertToAtomicIntPointer(Address addr) const;
112
113 /// Converts a rvalue to integer value.
114 mlir::Value convertRValueToInt(RValue rvalue, bool cmpxchg = false) const;
115
116 RValue convertToValueOrAtomic(mlir::Value intVal, AggValueSlot resultSlot,
117 SourceLocation loc, bool asValue,
118 bool cmpxchg = false) const;
119
120 /// Copy an atomic r-value into atomic-layout memory.
121 void emitCopyIntoMemory(RValue rvalue) const;
122
123 /// Project an l-value down to the value field.
124 LValue projectValue() const {
125 assert(lvalue.isSimple());
126 Address addr = getAtomicAddress();
127 if (hasPadding()) {
128 cgf.cgm.errorNYI(loc, "AtomicInfo::projectValue: padding");
129 }
130
132 return LValue::makeAddr(addr, getValueType(), lvalue.getBaseInfo());
133 }
134
135 /// Emits atomic load.
136 /// \returns Loaded value.
137 RValue emitAtomicLoad(AggValueSlot resultSlot, SourceLocation loc,
138 bool asValue, cir::MemOrder order, bool isVolatile);
139
140 /// Creates temp alloca for intermediate operations on atomic value.
141 Address createTempAlloca() const;
142
143private:
144 bool requiresMemSetZero(mlir::Type ty) const;
145
146 /// Emits atomic load as a CIR operation.
147 mlir::Value emitAtomicLoadOp(cir::MemOrder order, bool isVolatile,
148 bool cmpxchg = false);
149};
150} // namespace
151
152// This function emits any expression (scalar, complex, or aggregate)
153// into a temporary alloca.
155 Address declPtr = cgf.createMemTemp(
156 e->getType(), cgf.getLoc(e->getSourceRange()), ".atomictmp");
157 cgf.emitAnyExprToMem(e, declPtr, e->getType().getQualifiers(),
158 /*Init*/ true);
159 return declPtr;
160}
161
162/// Does a store of the given IR type modify the full expected width?
163static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty,
164 uint64_t expectedSize) {
165 return cgm.getDataLayout().getTypeStoreSize(ty) * 8 == expectedSize;
166}
167
168/// Does the atomic type require memsetting to zero before initialization?
169///
170/// The IR type is provided as a way of making certain queries faster.
171bool AtomicInfo::requiresMemSetZero(mlir::Type ty) const {
172 // If the atomic type has size padding, we definitely need a memset.
173 if (hasPadding())
174 return true;
175
176 // Otherwise, do some simple heuristics to try to avoid it:
177 switch (getEvaluationKind()) {
178 // For scalars and complexes, check whether the store size of the
179 // type uses the full size.
180 case cir::TEK_Scalar:
181 return !isFullSizeType(cgf.cgm, ty, atomicSizeInBits);
182 case cir::TEK_Complex:
183 return !isFullSizeType(cgf.cgm,
184 mlir::cast<cir::ComplexType>(ty).getElementType(),
185 atomicSizeInBits / 2);
186 // Padding in structs has an undefined bit pattern. User beware.
188 return false;
189 }
190 llvm_unreachable("bad evaluation kind");
191}
192
193Address AtomicInfo::convertToAtomicIntPointer(Address addr) const {
194 mlir::Type ty = addr.getElementType();
195 uint64_t sourceSizeInBits = cgf.cgm.getDataLayout().getTypeSizeInBits(ty);
196 if (sourceSizeInBits != atomicSizeInBits) {
197 cgf.cgm.errorNYI(
198 loc,
199 "AtomicInfo::convertToAtomicIntPointer: convert through temp alloca");
200 }
201
202 return castToAtomicIntPointer(addr);
203}
204
205RValue AtomicInfo::emitAtomicLoad(AggValueSlot resultSlot, SourceLocation loc,
206 bool asValue, cir::MemOrder order,
207 bool isVolatile) {
208 // Check whether we should use a library call.
209 if (shouldUseLibCall()) {
211 cgf.cgm.errorNYI(loc, "emitAtomicLoad: emit atomic lib call");
212 return RValue::get(nullptr);
213 }
214
215 // Okay, we're doing this natively.
216 mlir::Value loadOp = emitAtomicLoadOp(order, isVolatile);
217
218 // If we're ignoring an aggregate return, don't do anything.
219 if (getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
220 return RValue::getAggregate(Address::invalid(), false);
221
222 // Okay, turn that back into the original value or atomic (for non-simple
223 // lvalues) type.
224 return convertToValueOrAtomic(loadOp, resultSlot, loc, asValue);
225}
226
227Address AtomicInfo::createTempAlloca() const {
228 Address tempAlloca = cgf.createMemTemp(
229 (lvalue.isBitField() && valueSizeInBits > atomicSizeInBits) ? valueTy
230 : atomicTy,
231 getAtomicAlignment(), loc, "atomic-temp");
232
233 // Cast to pointer to value type for bitfields.
234 if (lvalue.isBitField()) {
235 cgf.cgm.errorNYI(loc, "AtomicInfo::createTempAlloca: bitfield lvalue");
236 }
237
238 return tempAlloca;
239}
240
241mlir::Value AtomicInfo::getScalarRValValueOrNull(RValue rvalue) const {
242 if (rvalue.isScalar() && (!hasPadding() || !lvalue.isSimple()))
243 return rvalue.getValue();
244 return nullptr;
245}
246
247Address AtomicInfo::castToAtomicIntPointer(Address addr) const {
248 auto intTy = mlir::dyn_cast<cir::IntType>(addr.getElementType());
249 // Don't bother with int casts if the integer size is the same.
250 if (intTy && intTy.getWidth() == atomicSizeInBits)
251 return addr;
252 auto ty = cgf.getBuilder().getUIntNTy(atomicSizeInBits);
253 return addr.withElementType(cgf.getBuilder(), ty);
254}
255
256bool AtomicInfo::emitMemSetZeroIfNecessary() const {
257 assert(lvalue.isSimple());
258 Address addr = lvalue.getAddress();
259 if (!requiresMemSetZero(addr.getElementType()))
260 return false;
261
262 cgf.cgm.errorNYI(loc,
263 "AtomicInfo::emitMemSetZeroIfNecaessary: emit memset zero");
264 return false;
265}
266
267/// Return true if \param valueTy is a type that should be casted to integer
268/// around the atomic memory operation. If \param cmpxchg is true, then the
269/// cast of a floating point type is made as that instruction can not have
270/// floating point operands. TODO: Allow compare-and-exchange and FP - see
271/// comment in CIRGenAtomicExpandPass.cpp.
272static bool shouldCastToInt(mlir::Type valueTy, bool cmpxchg) {
273 if (cir::isAnyFloatingPointType(valueTy))
274 return isa<cir::FP80Type>(valueTy) || cmpxchg;
275 return !isa<cir::IntType>(valueTy) && !isa<cir::PointerType>(valueTy);
276}
277
278mlir::Value AtomicInfo::emitAtomicLoadOp(cir::MemOrder order, bool isVolatile,
279 bool cmpxchg) {
280 Address addr = getAtomicAddress();
281 if (shouldCastToInt(addr.getElementType(), cmpxchg))
282 addr = castToAtomicIntPointer(addr);
283
284 cir::LoadOp op =
285 cgf.getBuilder().createLoad(loc, addr, /*isVolatile=*/isVolatile);
286 op.setMemOrder(order);
287
289 return op;
290}
291
292mlir::Value AtomicInfo::convertRValueToInt(RValue rvalue, bool cmpxchg) const {
293 // If we've got a scalar value of the right size, try to avoid going
294 // through memory. Floats get casted if needed by AtomicExpandPass.
295 if (mlir::Value value = getScalarRValValueOrNull(rvalue)) {
296 if (!shouldCastToInt(value.getType(), cmpxchg))
297 return cgf.emitToMemory(value, valueTy);
298
299 cgf.cgm.errorNYI(
300 loc, "AtomicInfo::convertRValueToInt: cast scalar rvalue to int");
301 return nullptr;
302 }
303
304 cgf.cgm.errorNYI(
305 loc, "AtomicInfo::convertRValueToInt: cast non-scalar rvalue to int");
306 return nullptr;
307}
308
309RValue AtomicInfo::convertToValueOrAtomic(mlir::Value intVal,
310 AggValueSlot resultSlot,
311 SourceLocation loc, bool asValue,
312 bool cmpxchg) const {
313 // Try not to in some easy cases.
314 assert((mlir::isa<cir::IntType, cir::PointerType, cir::FPTypeInterface>(
315 intVal.getType())) &&
316 "Expected integer, pointer or floating point value when converting "
317 "result.");
318 bool isWholeValue =
319 !lvalue.isBitField() || lvalue.getBitFieldInfo().size == valueSizeInBits;
320 if (getEvaluationKind() == TEK_Scalar &&
321 ((isWholeValue && !hasPadding()) || !asValue)) {
322 mlir::Type valTy = asValue ? cgf.convertTypeForMem(valueTy)
323 : getAtomicAddress().getElementType();
324 if (!shouldCastToInt(valTy, cmpxchg)) {
325 assert((!mlir::isa<cir::IntType>(valTy) || intVal.getType() == valTy) &&
326 "Different integer types.");
327 return RValue::get(cgf.emitFromMemory(intVal, valueTy));
328 }
329
330 cgf.cgm.errorNYI("convertToValueOrAtomic: convert through bitcast");
331 return RValue::get(nullptr);
332 }
333
334 cgf.cgm.errorNYI("convertToValueOrAtomic: convert through temp");
335 return RValue::get(nullptr);
336}
337
338/// Copy an r-value into memory as part of storing to an atomic type.
339/// This needs to create a bit-pattern suitable for atomic operations.
340void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
341 assert(lvalue.isSimple());
342
343 // If we have an r-value, the rvalue should be of the atomic type,
344 // which means that the caller is responsible for having zeroed
345 // any padding. Just do an aggregate copy of that type.
346 if (rvalue.isAggregate()) {
347 cgf.cgm.errorNYI("copying aggregate into atomic lvalue");
348 return;
349 }
350
351 // Okay, otherwise we're copying stuff.
352
353 // Zero out the buffer if necessary.
354 emitMemSetZeroIfNecessary();
355
356 // Drill past the padding if present.
357 LValue tempLValue = projectValue();
358
359 // Okay, store the rvalue in.
360 if (rvalue.isScalar()) {
361 cgf.emitStoreOfScalar(rvalue.getValue(), tempLValue, /*isInit=*/true);
362 } else {
363 cgf.cgm.errorNYI("copying complex into atomic lvalue");
364 }
365}
366
367static void emitDefaultCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc) {
368 mlir::ArrayAttr valuesAttr = builder.getArrayAttr({});
369 mlir::OpBuilder::InsertPoint insertPoint;
370 cir::CaseOp::create(builder, loc, valuesAttr, cir::CaseOpKind::Default,
371 insertPoint);
372 builder.restoreInsertionPoint(insertPoint);
373}
374
375// Create a "case" operation with the given list of orders as its values. Also
376// create the region that will hold the body of the switch-case label.
377static void emitMemOrderCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc,
378 mlir::Type orderType,
381 for (cir::MemOrder order : orders)
382 orderAttrs.push_back(cir::IntAttr::get(orderType, static_cast<int>(order)));
383 mlir::ArrayAttr ordersAttr = builder.getArrayAttr(orderAttrs);
384
385 mlir::OpBuilder::InsertPoint insertPoint;
386 cir::CaseOp::create(builder, loc, ordersAttr, cir::CaseOpKind::Anyof,
387 insertPoint);
388 builder.restoreInsertionPoint(insertPoint);
389}
390
391static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak,
392 Address dest, Address ptr, Address val1,
393 Address val2, uint64_t size,
394 cir::MemOrder successOrder,
395 cir::MemOrder failureOrder,
396 cir::SyncScopeKind scope) {
397 mlir::Location loc = cgf.getLoc(e->getSourceRange());
398
399 CIRGenBuilderTy &builder = cgf.getBuilder();
400 mlir::Value expected = builder.createLoad(loc, val1);
401 mlir::Value desired = builder.createLoad(loc, val2);
402
403 auto cmpxchg = cir::AtomicCmpXchgOp::create(
404 builder, loc, expected.getType(), builder.getBoolTy(), ptr.getPointer(),
405 expected, desired,
406 cir::MemOrderAttr::get(&cgf.getMLIRContext(), successOrder),
407 cir::MemOrderAttr::get(&cgf.getMLIRContext(), failureOrder),
408 cir::SyncScopeKindAttr::get(&cgf.getMLIRContext(), scope),
409 builder.getI64IntegerAttr(ptr.getAlignment().getAsAlign().value()));
410
411 cmpxchg.setIsVolatile(e->isVolatile());
412 cmpxchg.setWeak(isWeak);
413
414 mlir::Value failed = builder.createNot(cmpxchg.getSuccess());
415 cir::IfOp::create(builder, loc, failed, /*withElseRegion=*/false,
416 [&](mlir::OpBuilder &, mlir::Location) {
417 auto ptrTy = mlir::cast<cir::PointerType>(
418 val1.getPointer().getType());
419 if (val1.getElementType() != ptrTy.getPointee()) {
420 val1 = val1.withPointer(builder.createPtrBitcast(
421 val1.getPointer(), val1.getElementType()));
422 }
423 builder.createStore(loc, cmpxchg.getOld(), val1);
424 builder.createYield(loc);
425 });
426
427 // Update the memory at Dest with Success's value.
428 cgf.emitStoreOfScalar(cmpxchg.getSuccess(),
429 cgf.makeAddrLValue(dest, e->getType()),
430 /*isInit=*/false);
431}
432
434 bool isWeak, Address dest, Address ptr,
435 Address val1, Address val2,
436 Expr *failureOrderExpr, uint64_t size,
437 cir::MemOrder successOrder,
438 cir::SyncScopeKind scope) {
439 Expr::EvalResult failureOrderEval;
440 if (failureOrderExpr->EvaluateAsInt(failureOrderEval, cgf.getContext())) {
441 uint64_t failureOrderInt = failureOrderEval.Val.getInt().getZExtValue();
442
443 cir::MemOrder failureOrder;
444 if (!cir::isValidCIRAtomicOrderingCABI(failureOrderInt)) {
445 failureOrder = cir::MemOrder::Relaxed;
446 } else {
447 switch ((cir::MemOrder)failureOrderInt) {
448 case cir::MemOrder::Relaxed:
449 // 31.7.2.18: "The failure argument shall not be memory_order_release
450 // nor memory_order_acq_rel". Fallback to monotonic.
451 case cir::MemOrder::Release:
452 case cir::MemOrder::AcquireRelease:
453 failureOrder = cir::MemOrder::Relaxed;
454 break;
455 case cir::MemOrder::Consume:
456 case cir::MemOrder::Acquire:
457 failureOrder = cir::MemOrder::Acquire;
458 break;
459 case cir::MemOrder::SequentiallyConsistent:
460 failureOrder = cir::MemOrder::SequentiallyConsistent;
461 break;
462 }
463 }
464
465 // Prior to c++17, "the failure argument shall be no stronger than the
466 // success argument". This condition has been lifted and the only
467 // precondition is 31.7.2.18. Effectively treat this as a DR and skip
468 // language version checks.
469 emitAtomicCmpXchg(cgf, e, isWeak, dest, ptr, val1, val2, size, successOrder,
470 failureOrder, scope);
471 return;
472 }
473
474 // The failure memory order is not a compile time constant. The CIR atomic ops
475 // require a constant value, so that memory order is known at compile time. In
476 // this case, we can switch based on the memory order and call each variant
477 // individually.
478 mlir::Value failureOrderVal = cgf.emitScalarExpr(failureOrderExpr);
479 mlir::Location atomicLoc = cgf.getLoc(e->getSourceRange());
480 cir::SwitchOp::create(
481 cgf.getBuilder(), atomicLoc, failureOrderVal,
482 [&](mlir::OpBuilder &b, mlir::Location loc, mlir::OperationState &os) {
483 mlir::Block *switchBlock = cgf.getBuilder().getBlock();
484
485 // case cir::MemOrder::Relaxed:
486 // // 31.7.2.18: "The failure argument shall not be
487 // memory_order_release
488 // // nor memory_order_acq_rel". Fallback to monotonic.
489 // case cir::MemOrder::Release:
490 // case cir::MemOrder::AcquireRelease:
491 // Note: Since there are 3 options, this makes sense to just emit as a
492 // 'default', which prevents user code from 'falling off' of this,
493 // which seems reasonable. Also, 'relaxed' being the default behavior
494 // is also probably the least harmful.
495 emitDefaultCaseLabel(cgf.getBuilder(), atomicLoc);
496 emitAtomicCmpXchg(cgf, e, isWeak, dest, ptr, val1, val2, size,
497 successOrder, cir::MemOrder::Relaxed, scope);
498 cgf.getBuilder().createBreak(atomicLoc);
499 cgf.getBuilder().setInsertionPointToEnd(switchBlock);
500
501 // case cir::MemOrder::Consume:
502 // case cir::MemOrder::Acquire:
503 emitMemOrderCaseLabel(cgf.getBuilder(), loc, failureOrderVal.getType(),
504 {cir::MemOrder::Consume, cir::MemOrder::Acquire});
505 emitAtomicCmpXchg(cgf, e, isWeak, dest, ptr, val1, val2, size,
506 successOrder, cir::MemOrder::Acquire, scope);
507 cgf.getBuilder().createBreak(atomicLoc);
508 cgf.getBuilder().setInsertionPointToEnd(switchBlock);
509
510 // case cir::MemOrder::SequentiallyConsistent:
511 emitMemOrderCaseLabel(cgf.getBuilder(), loc, failureOrderVal.getType(),
512 {cir::MemOrder::SequentiallyConsistent});
513 emitAtomicCmpXchg(cgf, e, isWeak, dest, ptr, val1, val2, size,
514 successOrder, cir::MemOrder::SequentiallyConsistent,
515 scope);
516 cgf.getBuilder().createBreak(atomicLoc);
517 cgf.getBuilder().setInsertionPointToEnd(switchBlock);
518
519 cgf.getBuilder().createYield(atomicLoc);
520 });
521}
522
524 Address ptr, Address val1, Address val2,
525 Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size,
526 cir::MemOrder order, cir::SyncScopeKind scope) {
528 llvm::StringRef opName;
529
530 CIRGenBuilderTy &builder = cgf.getBuilder();
531 mlir::Location loc = cgf.getLoc(expr->getSourceRange());
532 auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
533 auto scopeAttr = cir::SyncScopeKindAttr::get(builder.getContext(), scope);
534 cir::AtomicFetchKindAttr fetchAttr;
535 bool fetchFirst = true;
536
537 switch (expr->getOp()) {
538 case AtomicExpr::AO__c11_atomic_init:
539 llvm_unreachable("already handled!");
540
541 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
542 emitAtomicCmpXchgFailureSet(cgf, expr, /*isWeak=*/false, dest, ptr, val1,
543 val2, failureOrderExpr, size, order, scope);
544 return;
545
546 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
547 emitAtomicCmpXchgFailureSet(cgf, expr, /*isWeak=*/true, dest, ptr, val1,
548 val2, failureOrderExpr, size, order, scope);
549 return;
550
551 case AtomicExpr::AO__atomic_compare_exchange:
552 case AtomicExpr::AO__atomic_compare_exchange_n:
553 case AtomicExpr::AO__scoped_atomic_compare_exchange:
554 case AtomicExpr::AO__scoped_atomic_compare_exchange_n: {
555 bool isWeak = false;
556 if (isWeakExpr->EvaluateAsBooleanCondition(isWeak, cgf.getContext())) {
557 emitAtomicCmpXchgFailureSet(cgf, expr, isWeak, dest, ptr, val1, val2,
558 failureOrderExpr, size, order, scope);
559 } else {
561 cgf.cgm.errorNYI(expr->getSourceRange(),
562 "emitAtomicOp: non-constant isWeak");
563 }
564 return;
565 }
566
567 case AtomicExpr::AO__c11_atomic_load:
568 case AtomicExpr::AO__atomic_load_n:
569 case AtomicExpr::AO__atomic_load:
570 case AtomicExpr::AO__scoped_atomic_load_n:
571 case AtomicExpr::AO__scoped_atomic_load: {
572 cir::LoadOp load =
573 builder.createLoad(loc, ptr, /*isVolatile=*/expr->isVolatile());
574
575 load->setAttr("mem_order", orderAttr);
576 load->setAttr("sync_scope", scopeAttr);
577
578 builder.createStore(loc, load->getResult(0), dest);
579 return;
580 }
581
582 case AtomicExpr::AO__c11_atomic_store:
583 case AtomicExpr::AO__atomic_store_n:
584 case AtomicExpr::AO__atomic_store:
585 case AtomicExpr::AO__scoped_atomic_store:
586 case AtomicExpr::AO__scoped_atomic_store_n: {
587 cir::LoadOp loadVal1 = builder.createLoad(loc, val1);
588
590
591 builder.createStore(loc, loadVal1, ptr, expr->isVolatile(),
592 /*align=*/mlir::IntegerAttr{}, scopeAttr, orderAttr);
593 return;
594 }
595
596 case AtomicExpr::AO__c11_atomic_exchange:
597 case AtomicExpr::AO__atomic_exchange_n:
598 case AtomicExpr::AO__atomic_exchange:
599 case AtomicExpr::AO__scoped_atomic_exchange_n:
600 case AtomicExpr::AO__scoped_atomic_exchange:
601 opName = cir::AtomicXchgOp::getOperationName();
602 break;
603
604 case AtomicExpr::AO__atomic_add_fetch:
605 case AtomicExpr::AO__scoped_atomic_add_fetch:
606 fetchFirst = false;
607 [[fallthrough]];
608 case AtomicExpr::AO__c11_atomic_fetch_add:
609 case AtomicExpr::AO__atomic_fetch_add:
610 case AtomicExpr::AO__scoped_atomic_fetch_add:
611 opName = cir::AtomicFetchOp::getOperationName();
612 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
613 cir::AtomicFetchKind::Add);
614 break;
615
616 case AtomicExpr::AO__atomic_sub_fetch:
617 case AtomicExpr::AO__scoped_atomic_sub_fetch:
618 fetchFirst = false;
619 [[fallthrough]];
620 case AtomicExpr::AO__c11_atomic_fetch_sub:
621 case AtomicExpr::AO__atomic_fetch_sub:
622 case AtomicExpr::AO__scoped_atomic_fetch_sub:
623 opName = cir::AtomicFetchOp::getOperationName();
624 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
625 cir::AtomicFetchKind::Sub);
626 break;
627
628 case AtomicExpr::AO__atomic_min_fetch:
629 case AtomicExpr::AO__scoped_atomic_min_fetch:
630 fetchFirst = false;
631 [[fallthrough]];
632 case AtomicExpr::AO__c11_atomic_fetch_min:
633 case AtomicExpr::AO__atomic_fetch_min:
634 case AtomicExpr::AO__scoped_atomic_fetch_min:
635 opName = cir::AtomicFetchOp::getOperationName();
636 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
637 cir::AtomicFetchKind::Min);
638 break;
639
640 case AtomicExpr::AO__atomic_max_fetch:
641 case AtomicExpr::AO__scoped_atomic_max_fetch:
642 fetchFirst = false;
643 [[fallthrough]];
644 case AtomicExpr::AO__c11_atomic_fetch_max:
645 case AtomicExpr::AO__atomic_fetch_max:
646 case AtomicExpr::AO__scoped_atomic_fetch_max:
647 opName = cir::AtomicFetchOp::getOperationName();
648 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
649 cir::AtomicFetchKind::Max);
650 break;
651
652 case AtomicExpr::AO__atomic_and_fetch:
653 case AtomicExpr::AO__scoped_atomic_and_fetch:
654 fetchFirst = false;
655 [[fallthrough]];
656 case AtomicExpr::AO__c11_atomic_fetch_and:
657 case AtomicExpr::AO__atomic_fetch_and:
658 case AtomicExpr::AO__scoped_atomic_fetch_and:
659 opName = cir::AtomicFetchOp::getOperationName();
660 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
661 cir::AtomicFetchKind::And);
662 break;
663
664 case AtomicExpr::AO__atomic_or_fetch:
665 case AtomicExpr::AO__scoped_atomic_or_fetch:
666 fetchFirst = false;
667 [[fallthrough]];
668 case AtomicExpr::AO__c11_atomic_fetch_or:
669 case AtomicExpr::AO__atomic_fetch_or:
670 case AtomicExpr::AO__scoped_atomic_fetch_or:
671 opName = cir::AtomicFetchOp::getOperationName();
672 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
673 cir::AtomicFetchKind::Or);
674 break;
675
676 case AtomicExpr::AO__atomic_xor_fetch:
677 case AtomicExpr::AO__scoped_atomic_xor_fetch:
678 fetchFirst = false;
679 [[fallthrough]];
680 case AtomicExpr::AO__c11_atomic_fetch_xor:
681 case AtomicExpr::AO__atomic_fetch_xor:
682 case AtomicExpr::AO__scoped_atomic_fetch_xor:
683 opName = cir::AtomicFetchOp::getOperationName();
684 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
685 cir::AtomicFetchKind::Xor);
686 break;
687
688 case AtomicExpr::AO__atomic_nand_fetch:
689 case AtomicExpr::AO__scoped_atomic_nand_fetch:
690 fetchFirst = false;
691 [[fallthrough]];
692 case AtomicExpr::AO__c11_atomic_fetch_nand:
693 case AtomicExpr::AO__atomic_fetch_nand:
694 case AtomicExpr::AO__scoped_atomic_fetch_nand:
695 opName = cir::AtomicFetchOp::getOperationName();
696 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
697 cir::AtomicFetchKind::Nand);
698 break;
699
700 case AtomicExpr::AO__atomic_test_and_set: {
701 auto op = cir::AtomicTestAndSetOp::create(
702 builder, loc, ptr.getPointer(), order,
703 builder.getI64IntegerAttr(ptr.getAlignment().getQuantity()),
704 expr->isVolatile());
705 builder.createStore(loc, op, dest);
706 return;
707 }
708
709 case AtomicExpr::AO__atomic_clear: {
710 cir::AtomicClearOp::create(
711 builder, loc, ptr.getPointer(), order,
712 builder.getI64IntegerAttr(ptr.getAlignment().getQuantity()),
713 expr->isVolatile());
714 return;
715 }
716
717 case AtomicExpr::AO__atomic_fetch_uinc:
718 case AtomicExpr::AO__scoped_atomic_fetch_uinc:
719 opName = cir::AtomicFetchOp::getOperationName();
720 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
721 cir::AtomicFetchKind::UIncWrap);
722 break;
723
724 case AtomicExpr::AO__atomic_fetch_udec:
725 case AtomicExpr::AO__scoped_atomic_fetch_udec:
726 opName = cir::AtomicFetchOp::getOperationName();
727 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
728 cir::AtomicFetchKind::UDecWrap);
729 break;
730
731 case AtomicExpr::AO__opencl_atomic_init:
732
733 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
734 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
735
736 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
737 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
738
739 case AtomicExpr::AO__opencl_atomic_load:
740 case AtomicExpr::AO__hip_atomic_load:
741
742 case AtomicExpr::AO__opencl_atomic_store:
743 case AtomicExpr::AO__hip_atomic_store:
744
745 case AtomicExpr::AO__hip_atomic_exchange:
746 case AtomicExpr::AO__opencl_atomic_exchange:
747
748 case AtomicExpr::AO__hip_atomic_fetch_add:
749 case AtomicExpr::AO__opencl_atomic_fetch_add:
750
751 case AtomicExpr::AO__hip_atomic_fetch_sub:
752 case AtomicExpr::AO__opencl_atomic_fetch_sub:
753
754 case AtomicExpr::AO__hip_atomic_fetch_min:
755 case AtomicExpr::AO__opencl_atomic_fetch_min:
756
757 case AtomicExpr::AO__hip_atomic_fetch_max:
758 case AtomicExpr::AO__opencl_atomic_fetch_max:
759
760 case AtomicExpr::AO__hip_atomic_fetch_and:
761 case AtomicExpr::AO__opencl_atomic_fetch_and:
762
763 case AtomicExpr::AO__hip_atomic_fetch_or:
764 case AtomicExpr::AO__opencl_atomic_fetch_or:
765
766 case AtomicExpr::AO__hip_atomic_fetch_xor:
767 case AtomicExpr::AO__opencl_atomic_fetch_xor:
768 cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: expr op NYI");
769 return;
770 }
771
772 assert(!opName.empty() && "expected operation name to build");
773 mlir::Value loadVal1 = builder.createLoad(loc, val1);
774
775 SmallVector<mlir::Value> atomicOperands = {ptr.getPointer(), loadVal1};
776 SmallVector<mlir::Type> atomicResTys = {loadVal1.getType()};
777 mlir::Operation *rmwOp = builder.create(loc, builder.getStringAttr(opName),
778 atomicOperands, atomicResTys);
779
780 if (fetchAttr)
781 rmwOp->setAttr("binop", fetchAttr);
782 rmwOp->setAttr("mem_order", orderAttr);
783 rmwOp->setAttr("sync_scope", scopeAttr);
784 if (expr->isVolatile())
785 rmwOp->setAttr("is_volatile", builder.getUnitAttr());
786 if (fetchFirst && opName == cir::AtomicFetchOp::getOperationName())
787 rmwOp->setAttr("fetch_first", builder.getUnitAttr());
788
789 mlir::Value result = rmwOp->getResult(0);
790 builder.createStore(loc, result, dest);
791}
792
793// Map clang sync scope to CIR sync scope.
794static cir::SyncScopeKind convertSyncScopeToCIR(CIRGenFunction &cgf,
795 SourceRange range,
796 clang::SyncScope scope) {
797 switch (scope) {
799 return cir::SyncScopeKind::SingleThread;
801 return cir::SyncScopeKind::System;
803 return cir::SyncScopeKind::Device;
805 return cir::SyncScopeKind::Workgroup;
807 return cir::SyncScopeKind::Wavefront;
809 return cir::SyncScopeKind::Cluster;
810
812 return cir::SyncScopeKind::HIPSingleThread;
814 return cir::SyncScopeKind::HIPSystem;
816 return cir::SyncScopeKind::HIPAgent;
818 return cir::SyncScopeKind::HIPWorkgroup;
820 return cir::SyncScopeKind::HIPWavefront;
822 return cir::SyncScopeKind::HIPCluster;
823
825 return cir::SyncScopeKind::OpenCLWorkGroup;
827 return cir::SyncScopeKind::OpenCLDevice;
829 return cir::SyncScopeKind::OpenCLAllSVMDevices;
831 return cir::SyncScopeKind::OpenCLSubGroup;
832 }
833
834 llvm_unreachable("unhandled sync scope");
835}
836
838 Address ptr, Address val1, Address val2,
839 Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size,
840 cir::MemOrder order,
841 const std::optional<Expr::EvalResult> &scopeConst,
842 mlir::Value scopeValue) {
843 std::unique_ptr<AtomicScopeModel> scopeModel = expr->getScopeModel();
844
845 if (!scopeModel) {
846 emitAtomicOp(cgf, expr, dest, ptr, val1, val2, isWeakExpr, failureOrderExpr,
847 size, order, cir::SyncScopeKind::System);
848 return;
849 }
850
851 if (scopeConst.has_value()) {
852 cir::SyncScopeKind mappedScope = convertSyncScopeToCIR(
853 cgf, expr->getScope()->getSourceRange(),
854 scopeModel->map(scopeConst->Val.getInt().getZExtValue()));
855 emitAtomicOp(cgf, expr, dest, ptr, val1, val2, isWeakExpr, failureOrderExpr,
856 size, order, mappedScope);
857 return;
858 }
859
860 // The sync scope is not a compile-time constant. Emit a switch statement to
861 // handle each possible value of the sync scope.
862 CIRGenBuilderTy &builder = cgf.getBuilder();
863 mlir::Location loc = cgf.getLoc(expr->getSourceRange());
864 llvm::ArrayRef<unsigned> allScopes = scopeModel->getRuntimeValues();
865 unsigned fallback = scopeModel->getFallBackValue();
866
867 cir::SwitchOp::create(
868 builder, loc, scopeValue,
869 [&](mlir::OpBuilder &, mlir::Location loc, mlir::OperationState &) {
870 mlir::Block *switchBlock = builder.getBlock();
871
872 // Default case -- use fallback scope
873 cir::SyncScopeKind fallbackScope = convertSyncScopeToCIR(
874 cgf, expr->getScope()->getSourceRange(), scopeModel->map(fallback));
875 emitDefaultCaseLabel(builder, loc);
876 emitAtomicOp(cgf, expr, dest, ptr, val1, val2, isWeakExpr,
877 failureOrderExpr, size, order, fallbackScope);
878 builder.createBreak(loc);
879 builder.setInsertionPointToEnd(switchBlock);
880
881 // Emit a switch case for each non-fallback runtime scope value
882 for (unsigned scope : allScopes) {
883 if (scope == fallback)
884 continue;
885
886 cir::SyncScopeKind cirScope = convertSyncScopeToCIR(
887 cgf, expr->getScope()->getSourceRange(), scopeModel->map(scope));
888
889 mlir::ArrayAttr casesAttr = builder.getArrayAttr(
890 {cir::IntAttr::get(scopeValue.getType(), scope)});
891 mlir::OpBuilder::InsertPoint insertPoint;
892 cir::CaseOp::create(builder, loc, casesAttr, cir::CaseOpKind::Equal,
893 insertPoint);
894
895 builder.restoreInsertionPoint(insertPoint);
896 emitAtomicOp(cgf, expr, dest, ptr, val1, val2, isWeakExpr,
897 failureOrderExpr, size, order, cirScope);
898 builder.createBreak(loc);
899 builder.setInsertionPointToEnd(switchBlock);
900 }
901
902 builder.createYield(loc);
903 });
904}
905
906static std::optional<cir::MemOrder>
907getEffectiveAtomicMemOrder(cir::MemOrder oriOrder, bool isStore, bool isLoad,
908 bool isFence) {
909 // Some memory orders are not supported by partial atomic operation:
910 // {memory_order_releaxed} is not valid for fence operations.
911 // {memory_order_consume, memory_order_acquire} are not valid for write-only
912 // operations.
913 // {memory_order_release} is not valid for read-only operations.
914 // {memory_order_acq_rel} is only valid for read-write operations.
915 if (isStore) {
916 if (oriOrder == cir::MemOrder::Consume ||
917 oriOrder == cir::MemOrder::Acquire ||
918 oriOrder == cir::MemOrder::AcquireRelease)
919 return std::nullopt;
920 } else if (isLoad) {
921 if (oriOrder == cir::MemOrder::Release ||
922 oriOrder == cir::MemOrder::AcquireRelease)
923 return std::nullopt;
924 } else if (isFence) {
925 if (oriOrder == cir::MemOrder::Relaxed)
926 return std::nullopt;
927 }
928 // memory_order_consume is not implemented, it is always treated like
929 // memory_order_acquire
930 if (oriOrder == cir::MemOrder::Consume)
931 return cir::MemOrder::Acquire;
932 return oriOrder;
933}
934
936 CIRGenFunction &cgf, mlir::Value order, bool isStore, bool isLoad,
937 bool isFence, llvm::function_ref<void(cir::MemOrder)> emitAtomicOpFn) {
938 if (!order)
939 return;
940 // The memory order is not known at compile-time. The atomic operations
941 // can't handle runtime memory orders; the memory order must be hard coded.
942 // Generate a "switch" statement that converts a runtime value into a
943 // compile-time value.
944 CIRGenBuilderTy &builder = cgf.getBuilder();
945 cir::SwitchOp::create(
946 builder, order.getLoc(), order,
947 [&](mlir::OpBuilder &, mlir::Location loc, mlir::OperationState &) {
948 mlir::Block *switchBlock = builder.getBlock();
949
950 auto emitMemOrderCase = [&](llvm::ArrayRef<cir::MemOrder> caseOrders) {
951 // Checking there are same effective memory order for each case.
952 for (int i = 1, e = caseOrders.size(); i < e; i++)
953 assert((getEffectiveAtomicMemOrder(caseOrders[i - 1], isStore,
954 isLoad, isFence) ==
955 getEffectiveAtomicMemOrder(caseOrders[i], isStore, isLoad,
956 isFence)) &&
957 "Effective memory order must be same!");
958 // Emit case label and atomic opeartion if neccessary.
959 if (caseOrders.empty()) {
960 emitDefaultCaseLabel(builder, loc);
961 // There is no good way to report an unsupported memory order at
962 // runtime, hence the fallback to memory_order_relaxed.
963 if (!isFence)
964 emitAtomicOpFn(cir::MemOrder::Relaxed);
965 } else if (std::optional<cir::MemOrder> actualOrder =
966 getEffectiveAtomicMemOrder(caseOrders[0], isStore,
967 isLoad, isFence)) {
968 // Included in default case.
969 if (!isFence && actualOrder == cir::MemOrder::Relaxed)
970 return;
971 // Creating case operation for effective memory order. If there are
972 // multiple cases in `caseOrders`, the actual order of each case
973 // must be same, this needs to be guaranteed by the caller.
974 emitMemOrderCaseLabel(builder, loc, order.getType(), caseOrders);
975 emitAtomicOpFn(actualOrder.value());
976 } else {
977 // Do nothing if (!caseOrders.empty() && !actualOrder)
978 return;
979 }
980 builder.createBreak(loc);
981 builder.setInsertionPointToEnd(switchBlock);
982 };
983
984 emitMemOrderCase(/*default:*/ {});
985 emitMemOrderCase({cir::MemOrder::Relaxed});
986 emitMemOrderCase({cir::MemOrder::Consume, cir::MemOrder::Acquire});
987 emitMemOrderCase({cir::MemOrder::Release});
988 emitMemOrderCase({cir::MemOrder::AcquireRelease});
989 emitMemOrderCase({cir::MemOrder::SequentiallyConsistent});
990
991 builder.createYield(loc);
992 });
993}
994
996 const Expr *memOrder, bool isStore, bool isLoad, bool isFence,
997 llvm::function_ref<void(cir::MemOrder)> emitAtomicOpFn) {
998 // Emit the memory order operand, and try to evaluate it as a constant.
999 Expr::EvalResult eval;
1000 if (memOrder->EvaluateAsInt(eval, getContext())) {
1001 uint64_t constOrder = eval.Val.getInt().getZExtValue();
1002 // We should not ever get to a case where the ordering isn't a valid CABI
1003 // value, but it's hard to enforce that in general.
1004 if (!cir::isValidCIRAtomicOrderingCABI(constOrder))
1005 return;
1006 cir::MemOrder oriOrder = static_cast<cir::MemOrder>(constOrder);
1007 if (std::optional<cir::MemOrder> actualOrder =
1008 getEffectiveAtomicMemOrder(oriOrder, isStore, isLoad, isFence))
1009 emitAtomicOpFn(actualOrder.value());
1010 return;
1011 }
1012
1013 // Otherwise, handle variable memory ordering. Emit `SwitchOp` to convert
1014 // dynamic value to static value.
1015 mlir::Value dynOrder = emitScalarExpr(memOrder);
1016 emitAtomicExprWithDynamicMemOrder(*this, dynOrder, isStore, isLoad, isFence,
1017 emitAtomicOpFn);
1018}
1019
1021 QualType atomicTy = e->getPtr()->getType()->getPointeeType();
1022 QualType memTy = atomicTy;
1023 if (const auto *ty = atomicTy->getAs<AtomicType>())
1024 memTy = ty->getValueType();
1025
1026 Expr *isWeakExpr = nullptr;
1027 Expr *orderFailExpr = nullptr;
1028
1029 Address val1 = Address::invalid();
1030 Address val2 = Address::invalid();
1031 Address dest = Address::invalid();
1033
1035 if (e->getOp() == AtomicExpr::AO__c11_atomic_init) {
1036 LValue lvalue = makeAddrLValue(ptr, atomicTy);
1037 emitAtomicInit(e->getVal1(), lvalue);
1038 return RValue::get(nullptr);
1039 }
1040
1041 TypeInfoChars typeInfo = getContext().getTypeInfoInChars(atomicTy);
1042 uint64_t size = typeInfo.Width.getQuantity();
1043
1044 // Emit the sync scope operand, and try to evaluate it as a constant.
1045 mlir::Value scope =
1046 e->getScopeModel() ? emitScalarExpr(e->getScope()) : nullptr;
1047 std::optional<Expr::EvalResult> scopeConst;
1048 if (Expr::EvalResult eval;
1049 e->getScopeModel() && e->getScope()->EvaluateAsInt(eval, getContext()))
1050 scopeConst.emplace(std::move(eval));
1051
1052 switch (e->getOp()) {
1053 default:
1054 cgm.errorNYI(e->getSourceRange(), "atomic op NYI");
1055 return RValue::get(nullptr);
1056
1057 case AtomicExpr::AO__c11_atomic_init:
1058 llvm_unreachable("already handled above with emitAtomicInit");
1059
1060 case AtomicExpr::AO__atomic_load_n:
1061 case AtomicExpr::AO__scoped_atomic_load_n:
1062 case AtomicExpr::AO__c11_atomic_load:
1063 case AtomicExpr::AO__atomic_test_and_set:
1064 case AtomicExpr::AO__atomic_clear:
1065 break;
1066
1067 case AtomicExpr::AO__atomic_load:
1068 case AtomicExpr::AO__scoped_atomic_load:
1069 dest = emitPointerWithAlignment(e->getVal1());
1070 break;
1071
1072 case AtomicExpr::AO__atomic_store:
1073 case AtomicExpr::AO__scoped_atomic_store:
1074 val1 = emitPointerWithAlignment(e->getVal1());
1075 break;
1076
1077 case AtomicExpr::AO__atomic_exchange:
1078 case AtomicExpr::AO__scoped_atomic_exchange:
1079 val1 = emitPointerWithAlignment(e->getVal1());
1080 dest = emitPointerWithAlignment(e->getVal2());
1081 break;
1082
1083 case AtomicExpr::AO__atomic_compare_exchange:
1084 case AtomicExpr::AO__atomic_compare_exchange_n:
1085 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
1086 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
1087 case AtomicExpr::AO__scoped_atomic_compare_exchange:
1088 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
1089 val1 = emitPointerWithAlignment(e->getVal1());
1090 if (e->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
1091 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
1092 val2 = emitPointerWithAlignment(e->getVal2());
1093 else
1094 val2 = emitValToTemp(*this, e->getVal2());
1095 orderFailExpr = e->getOrderFail();
1096 if (e->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
1097 e->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
1098 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
1099 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
1100 isWeakExpr = e->getWeak();
1101 break;
1102
1103 case AtomicExpr::AO__c11_atomic_fetch_add:
1104 case AtomicExpr::AO__c11_atomic_fetch_sub:
1105 if (memTy->isPointerType()) {
1106 cgm.errorNYI(e->getSourceRange(),
1107 "atomic fetch-and-add and fetch-and-sub for pointers");
1108 return RValue::get(nullptr);
1109 }
1110 [[fallthrough]];
1111 case AtomicExpr::AO__atomic_fetch_add:
1112 case AtomicExpr::AO__atomic_fetch_max:
1113 case AtomicExpr::AO__atomic_fetch_min:
1114 case AtomicExpr::AO__atomic_fetch_sub:
1115 case AtomicExpr::AO__atomic_add_fetch:
1116 case AtomicExpr::AO__atomic_max_fetch:
1117 case AtomicExpr::AO__atomic_min_fetch:
1118 case AtomicExpr::AO__atomic_sub_fetch:
1119 case AtomicExpr::AO__c11_atomic_fetch_max:
1120 case AtomicExpr::AO__c11_atomic_fetch_min:
1121 case AtomicExpr::AO__scoped_atomic_fetch_add:
1122 case AtomicExpr::AO__scoped_atomic_fetch_max:
1123 case AtomicExpr::AO__scoped_atomic_fetch_min:
1124 case AtomicExpr::AO__scoped_atomic_fetch_sub:
1125 case AtomicExpr::AO__scoped_atomic_add_fetch:
1126 case AtomicExpr::AO__scoped_atomic_max_fetch:
1127 case AtomicExpr::AO__scoped_atomic_min_fetch:
1128 case AtomicExpr::AO__scoped_atomic_sub_fetch:
1129 [[fallthrough]];
1130
1131 case AtomicExpr::AO__atomic_fetch_and:
1132 case AtomicExpr::AO__atomic_fetch_nand:
1133 case AtomicExpr::AO__atomic_fetch_or:
1134 case AtomicExpr::AO__atomic_fetch_xor:
1135 case AtomicExpr::AO__atomic_and_fetch:
1136 case AtomicExpr::AO__atomic_nand_fetch:
1137 case AtomicExpr::AO__atomic_or_fetch:
1138 case AtomicExpr::AO__atomic_xor_fetch:
1139 case AtomicExpr::AO__atomic_exchange_n:
1140 case AtomicExpr::AO__atomic_store_n:
1141 case AtomicExpr::AO__c11_atomic_fetch_and:
1142 case AtomicExpr::AO__c11_atomic_fetch_nand:
1143 case AtomicExpr::AO__c11_atomic_fetch_or:
1144 case AtomicExpr::AO__c11_atomic_fetch_xor:
1145 case AtomicExpr::AO__c11_atomic_exchange:
1146 case AtomicExpr::AO__c11_atomic_store:
1147 case AtomicExpr::AO__scoped_atomic_fetch_and:
1148 case AtomicExpr::AO__scoped_atomic_fetch_nand:
1149 case AtomicExpr::AO__scoped_atomic_fetch_or:
1150 case AtomicExpr::AO__scoped_atomic_fetch_xor:
1151 case AtomicExpr::AO__scoped_atomic_and_fetch:
1152 case AtomicExpr::AO__scoped_atomic_nand_fetch:
1153 case AtomicExpr::AO__scoped_atomic_or_fetch:
1154 case AtomicExpr::AO__scoped_atomic_xor_fetch:
1155 case AtomicExpr::AO__scoped_atomic_store_n:
1156 case AtomicExpr::AO__scoped_atomic_exchange_n:
1157 case AtomicExpr::AO__atomic_fetch_uinc:
1158 case AtomicExpr::AO__atomic_fetch_udec:
1159 case AtomicExpr::AO__scoped_atomic_fetch_uinc:
1160 case AtomicExpr::AO__scoped_atomic_fetch_udec:
1161 val1 = emitValToTemp(*this, e->getVal1());
1162 break;
1163 }
1164
1165 QualType resultTy = e->getType().getUnqualifiedType();
1166
1167 bool shouldCastToIntPtrTy =
1169
1170 // The inlined atomics only function on iN types, where N is a power of 2. We
1171 // need to make sure (via temporaries if necessary) that all incoming values
1172 // are compatible.
1173 LValue atomicValue = makeAddrLValue(ptr, atomicTy);
1174 AtomicInfo atomics(*this, atomicValue, getLoc(e->getSourceRange()));
1175
1176 if (shouldCastToIntPtrTy) {
1177 ptr = atomics.castToAtomicIntPointer(ptr);
1178 if (val1.isValid())
1179 val1 = atomics.convertToAtomicIntPointer(val1);
1180 }
1181 if (dest.isValid()) {
1182 if (shouldCastToIntPtrTy)
1183 dest = atomics.castToAtomicIntPointer(dest);
1184 } else if (e->isCmpXChg()) {
1185 dest = createMemTemp(resultTy, getLoc(e->getSourceRange()), "cmpxchg.bool");
1186 } else if (e->getOp() == AtomicExpr::AO__atomic_test_and_set) {
1187 dest = createMemTemp(resultTy, getLoc(e->getSourceRange()),
1188 "test_and_set.bool");
1189 } else if (!resultTy->isVoidType()) {
1190 dest = atomics.createTempAlloca();
1191 if (shouldCastToIntPtrTy)
1192 dest = atomics.castToAtomicIntPointer(dest);
1193 }
1194
1195 bool powerOf2Size = (size & (size - 1)) == 0;
1196 bool useLibCall = !powerOf2Size || (size > 16);
1197
1198 // For atomics larger than 16 bytes, emit a libcall from the frontend. This
1199 // avoids the overhead of dealing with excessively-large value types in IR.
1200 // Non-power-of-2 values also lower to libcall here, as they are not currently
1201 // permitted in IR instructions (although that constraint could be relaxed in
1202 // the future). For other cases where a libcall is required on a given
1203 // platform, we let the backend handle it (this includes handling for all of
1204 // the size-optimized libcall variants, which are only valid up to 16 bytes.)
1205 //
1206 // See: https://llvm.org/docs/Atomics.html#libcalls-atomic
1207 if (useLibCall) {
1209 cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: emit atomic lib call");
1210 return RValue::get(nullptr);
1211 }
1212
1213 bool isStore = e->getOp() == AtomicExpr::AO__c11_atomic_store ||
1214 e->getOp() == AtomicExpr::AO__opencl_atomic_store ||
1215 e->getOp() == AtomicExpr::AO__hip_atomic_store ||
1216 e->getOp() == AtomicExpr::AO__atomic_store ||
1217 e->getOp() == AtomicExpr::AO__atomic_store_n ||
1218 e->getOp() == AtomicExpr::AO__scoped_atomic_store ||
1219 e->getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
1220 e->getOp() == AtomicExpr::AO__atomic_clear;
1221 bool isLoad = e->getOp() == AtomicExpr::AO__c11_atomic_load ||
1222 e->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1223 e->getOp() == AtomicExpr::AO__hip_atomic_load ||
1224 e->getOp() == AtomicExpr::AO__atomic_load ||
1225 e->getOp() == AtomicExpr::AO__atomic_load_n ||
1226 e->getOp() == AtomicExpr::AO__scoped_atomic_load ||
1227 e->getOp() == AtomicExpr::AO__scoped_atomic_load_n;
1228
1229 auto emitAtomicOpCallBackFn = [&](cir::MemOrder memOrder) {
1230 emitAtomicOp(*this, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
1231 size, memOrder, scopeConst, scope);
1232 };
1233 emitAtomicExprWithMemOrder(e->getOrder(), isStore, isLoad, /*isFence*/ false,
1234 emitAtomicOpCallBackFn);
1235
1236 if (resultTy->isVoidType())
1237 return RValue::get(nullptr);
1238
1239 return convertTempToRValue(
1240 dest.withElementType(builder, convertTypeForMem(resultTy)), resultTy,
1241 e->getExprLoc());
1242}
1243
1245 AggValueSlot slot) {
1246 if (lvalue.getType()->isAtomicType())
1247 return emitAtomicLoad(lvalue, loc, cir::MemOrder::SequentiallyConsistent,
1248 /*isVolatile=*/lvalue.isVolatileQualified(), slot);
1249 return emitAtomicLoad(lvalue, loc, cir::MemOrder::Acquire,
1250 /*isVolatile=*/true, slot);
1251}
1252
1254 cir::MemOrder order, bool isVolatile,
1255 AggValueSlot slot) {
1256 AtomicInfo info(*this, lvalue, getLoc(loc));
1257 return info.emitAtomicLoad(slot, loc, /*asValue=*/true, order, isVolatile);
1258}
1259
1260void CIRGenFunction::emitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
1261 bool isVolatile = dest.isVolatileQualified();
1262 auto order = cir::MemOrder::SequentiallyConsistent;
1263 if (!dest.getType()->isAtomicType()) {
1265 }
1266 return emitAtomicStore(rvalue, dest, order, isVolatile, isInit);
1267}
1268
1269/// Emit a store to an l-value of atomic type.
1270///
1271/// Note that the r-value is expected to be an r-value of the atomic type; this
1272/// means that for aggregate r-values, it should include storage for any padding
1273/// that was necessary.
1275 cir::MemOrder order, bool isVolatile,
1276 bool isInit) {
1277 // If this is an aggregate r-value, it should agree in type except
1278 // maybe for address-space qualification.
1279 mlir::Location loc = dest.getPointer().getLoc();
1280 assert(!rvalue.isAggregate() ||
1282 dest.getAddress().getElementType());
1283
1284 AtomicInfo atomics(*this, dest, loc);
1285 LValue lvalue = atomics.getAtomicLValue();
1286
1287 if (lvalue.isSimple()) {
1288 // If this is an initialization, just put the value there normally.
1289 if (isInit) {
1290 atomics.emitCopyIntoMemory(rvalue);
1291 return;
1292 }
1293
1294 // Check whether we should use a library call.
1295 if (atomics.shouldUseLibCall()) {
1297 cgm.errorNYI(loc, "emitAtomicStore: atomic store with library call");
1298 return;
1299 }
1300
1301 // Okay, we're doing this natively.
1302 mlir::Value valueToStore = atomics.convertRValueToInt(rvalue);
1303
1304 // Do the atomic store.
1305 Address addr = atomics.getAtomicAddress();
1306 if (mlir::Value value = atomics.getScalarRValValueOrNull(rvalue)) {
1307 if (shouldCastToInt(value.getType(), /*CmpXchg=*/false)) {
1308 addr = atomics.castToAtomicIntPointer(addr);
1309 valueToStore =
1310 builder.createIntCast(valueToStore, addr.getElementType());
1311 }
1312 }
1313 cir::StoreOp store = builder.createStore(loc, valueToStore, addr);
1314
1315 // Initializations don't need to be atomic.
1316 if (!isInit) {
1318 store.setMemOrder(order);
1319 }
1320
1321 // Other decoration.
1322 if (isVolatile)
1323 store.setIsVolatile(true);
1324
1326 return;
1327 }
1328
1329 cgm.errorNYI(loc, "emitAtomicStore: non-simple atomic lvalue");
1331}
1332
1334 AtomicInfo atomics(*this, dest, getLoc(init->getSourceRange()));
1335
1336 switch (atomics.getEvaluationKind()) {
1337 case cir::TEK_Scalar: {
1338 mlir::Value value = emitScalarExpr(init);
1339 atomics.emitCopyIntoMemory(RValue::get(value));
1340 return;
1341 }
1342
1343 case cir::TEK_Complex: {
1344 mlir::Value value = emitComplexExpr(init);
1345 atomics.emitCopyIntoMemory(RValue::get(value));
1346 return;
1347 }
1348
1349 case cir::TEK_Aggregate: {
1350 // Fix up the destination if the initializer isn't an expression
1351 // of atomic type.
1352 bool zeroed = false;
1353 if (!init->getType()->isAtomicType()) {
1354 zeroed = atomics.emitMemSetZeroIfNecessary();
1355 dest = atomics.projectValue();
1356 }
1357
1358 // Evaluate the expression directly into the destination.
1364
1365 emitAggExpr(init, slot);
1366 return;
1367 }
1368 }
1369
1370 llvm_unreachable("bad evaluation kind");
1371}
static bool shouldCastToInt(mlir::Type valueTy, bool cmpxchg)
Return true if.
static Address emitValToTemp(CIRGenFunction &cgf, Expr *e)
static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, uint64_t size, cir::MemOrder successOrder, cir::MemOrder failureOrder, cir::SyncScopeKind scope)
static void emitMemOrderCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc, mlir::Type orderType, llvm::ArrayRef< cir::MemOrder > orders)
static cir::SyncScopeKind convertSyncScopeToCIR(CIRGenFunction &cgf, SourceRange range, clang::SyncScope scope)
static void emitAtomicExprWithDynamicMemOrder(CIRGenFunction &cgf, mlir::Value order, bool isStore, bool isLoad, bool isFence, llvm::function_ref< void(cir::MemOrder)> emitAtomicOpFn)
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size, cir::MemOrder order, cir::SyncScopeKind scope)
static void emitDefaultCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc)
static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
static std::optional< cir::MemOrder > getEffectiveAtomicMemOrder(cir::MemOrder oriOrder, bool isStore, bool isLoad, bool isFence)
static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, Expr *failureOrderExpr, uint64_t size, cir::MemOrder successOrder, cir::SyncScopeKind scope)
__device__ __2f16 b
cir::BreakOp createBreak(mlir::Location loc)
Create a break operation.
mlir::Value createPtrBitcast(mlir::Value src, mlir::Type newPointeeTy)
mlir::Value createNot(mlir::Location loc, mlir::Value value)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
cir::BoolType getBoolTy()
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
llvm::TypeSize getTypeStoreSize(mlir::Type ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
APSInt & getInt()
Definition APValue.h:508
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:924
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition Expr.h:6929
static std::unique_ptr< AtomicScopeModel > getScopeModel(AtomicOp Op)
Get atomic scope model for the atomic op code.
Definition Expr.h:7078
Expr * getVal2() const
Definition Expr.h:6980
Expr * getOrder() const
Definition Expr.h:6963
Expr * getScope() const
Definition Expr.h:6966
bool isCmpXChg() const
Definition Expr.h:7013
AtomicOp getOp() const
Definition Expr.h:6992
Expr * getVal1() const
Definition Expr.h:6970
Expr * getPtr() const
Definition Expr.h:6960
Expr * getWeak() const
Definition Expr.h:6986
Expr * getOrderFail() const
Definition Expr.h:6976
bool isVolatile() const
Definition Expr.h:7009
Address withPointer(mlir::Value newPtr) const
Return address with different pointer, but same element type and alignment.
Definition Address.h:81
mlir::Value getPointer() const
Definition Address.h:96
mlir::Type getElementType() const
Definition Address.h:123
static Address invalid()
Definition Address.h:74
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
Definition Address.h:136
bool isValid() const
Definition Address.h:75
An aggregate value slot.
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile=false, mlir::IntegerAttr align={}, cir::SyncScopeKindAttr scope={}, cir::MemOrderAttr order={})
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
cir::IntType getUIntNTy(int n)
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitAtomicExpr(AtomicExpr *e)
RValue emitAtomicLoad(LValue lvalue, SourceLocation loc, AggValueSlot slot=AggValueSlot::ignored())
mlir::Type convertTypeForMem(QualType t)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
void emitAtomicExprWithMemOrder(const Expr *memOrder, bool isStore, bool isLoad, bool isFence, llvm::function_ref< void(cir::MemOrder)> emitAtomicOp)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
void emitAtomicInit(Expr *init, LValue dest)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
clang::ASTContext & getContext() const
mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const cir::CIRDataLayout getDataLayout() const
Address getAddress() const
clang::QualType getType() const
mlir::Value getPointer() const
bool isVolatileQualified() const
bool isSimple() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
Address getAggregateAddress() const
Return the value of the address of the aggregate.
Definition CIRGenValue.h:69
bool isAggregate() const
Definition CIRGenValue.h:51
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
bool isScalar() const
Definition CIRGenValue.h:49
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsBooleanCondition - Return true if this is a constant which we can fold and convert to a boo...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
A (possibly-)qualified type.
Definition TypeBase.h:937
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8476
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition TypeBase.h:8530
Encodes a location in the source.
A trivial tuple used to represent a source range.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
virtual bool hasBuiltinAtomic(uint64_t AtomicSizeInBits, uint64_t AlignmentInBits) const
Returns true if the given target supports lock-free atomic operations at the specified width and alig...
Definition TargetInfo.h:865
bool isVoidType() const
Definition TypeBase.h:9039
bool isPointerType() const
Definition TypeBase.h:8673
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:789
bool isAtomicType() const
Definition TypeBase.h:8865
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9266
bool isValidCIRAtomicOrderingCABI(Int value)
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
@ Address
A pointer to a ValueDecl.
Definition Primitives.h:28
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
SyncScope
Defines sync scope values used internally by clang.
Definition SyncScope.h:42
unsigned long uint64_t
static bool atomicInfoGetAtomicPointer()
static bool aggValueSlotGC()
static bool opLoadStoreAtomic()
static bool opLoadStoreTbaa()
static bool atomicUseLibCall()
static bool atomicOpenMP()
static bool atomicMicrosoftVolatile()
static bool atomicSyncScopeID()
static bool atomicInfoGetAtomicAddress()
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650