clang 22.0.0git
CIRGenAtomic.cpp
Go to the documentation of this file.
1//===--- CIRGenAtomic.cpp - Emit CIR for atomic operations ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the code for emitting atomic operations.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
15
16using namespace clang;
17using namespace clang::CIRGen;
18using namespace cir;
19
20namespace {
21class AtomicInfo {
22 CIRGenFunction &cgf;
23 QualType atomicTy;
24 QualType valueTy;
25 uint64_t atomicSizeInBits = 0;
26 uint64_t valueSizeInBits = 0;
27 CharUnits atomicAlign;
28 CharUnits valueAlign;
29 TypeEvaluationKind evaluationKind = cir::TEK_Scalar;
30 LValue lvalue;
31 mlir::Location loc;
32
33public:
34 AtomicInfo(CIRGenFunction &cgf, LValue &lvalue, mlir::Location loc)
35 : cgf(cgf), loc(loc) {
36 assert(!lvalue.isGlobalReg());
37 ASTContext &ctx = cgf.getContext();
38 if (lvalue.isSimple()) {
39 atomicTy = lvalue.getType();
40 if (auto *ty = atomicTy->getAs<AtomicType>())
41 valueTy = ty->getValueType();
42 else
43 valueTy = atomicTy;
44 evaluationKind = cgf.getEvaluationKind(valueTy);
45
46 TypeInfo valueTypeInfo = ctx.getTypeInfo(valueTy);
47 TypeInfo atomicTypeInfo = ctx.getTypeInfo(atomicTy);
48 uint64_t valueAlignInBits = valueTypeInfo.Align;
49 uint64_t atomicAlignInBits = atomicTypeInfo.Align;
50 valueSizeInBits = valueTypeInfo.Width;
51 atomicSizeInBits = atomicTypeInfo.Width;
52 assert(valueSizeInBits <= atomicSizeInBits);
53 assert(valueAlignInBits <= atomicAlignInBits);
54
55 atomicAlign = ctx.toCharUnitsFromBits(atomicAlignInBits);
56 valueAlign = ctx.toCharUnitsFromBits(valueAlignInBits);
57 if (lvalue.getAlignment().isZero())
58 lvalue.setAlignment(atomicAlign);
59
60 this->lvalue = lvalue;
61 } else {
63 cgf.cgm.errorNYI(loc, "AtomicInfo: non-simple lvalue");
64 }
65
67 }
68
69 QualType getValueType() const { return valueTy; }
70 CharUnits getAtomicAlignment() const { return atomicAlign; }
71 TypeEvaluationKind getEvaluationKind() const { return evaluationKind; }
72 mlir::Value getAtomicPointer() const {
73 if (lvalue.isSimple())
74 return lvalue.getPointer();
76 return nullptr;
77 }
78 Address getAtomicAddress() const {
79 mlir::Type elemTy;
80 if (lvalue.isSimple()) {
81 elemTy = lvalue.getAddress().getElementType();
82 } else {
84 cgf.cgm.errorNYI(loc, "AtomicInfo::getAtomicAddress: non-simple lvalue");
85 }
86 return Address(getAtomicPointer(), elemTy, getAtomicAlignment());
87 }
88
89 /// Is the atomic size larger than the underlying value type?
90 ///
91 /// Note that the absence of padding does not mean that atomic
92 /// objects are completely interchangeable with non-atomic
93 /// objects: we might have promoted the alignment of a type
94 /// without making it bigger.
95 bool hasPadding() const { return (valueSizeInBits != atomicSizeInBits); }
96
97 bool emitMemSetZeroIfNecessary() const;
98
99 /// Cast the given pointer to an integer pointer suitable for atomic
100 /// operations on the source.
101 Address castToAtomicIntPointer(Address addr) const;
102
103 /// If addr is compatible with the iN that will be used for an atomic
104 /// operation, bitcast it. Otherwise, create a temporary that is suitable and
105 /// copy the value across.
106 Address convertToAtomicIntPointer(Address addr) const;
107
108 /// Copy an atomic r-value into atomic-layout memory.
109 void emitCopyIntoMemory(RValue rvalue) const;
110
111 /// Project an l-value down to the value field.
112 LValue projectValue() const {
113 assert(lvalue.isSimple());
114 Address addr = getAtomicAddress();
115 if (hasPadding()) {
116 cgf.cgm.errorNYI(loc, "AtomicInfo::projectValue: padding");
117 }
118
120 return LValue::makeAddr(addr, getValueType(), lvalue.getBaseInfo());
121 }
122
123 /// Creates temp alloca for intermediate operations on atomic value.
124 Address createTempAlloca() const;
125
126private:
127 bool requiresMemSetZero(mlir::Type ty) const;
128};
129} // namespace
130
131// This function emits any expression (scalar, complex, or aggregate)
132// into a temporary alloca.
134 Address declPtr = cgf.createMemTemp(
135 e->getType(), cgf.getLoc(e->getSourceRange()), ".atomictmp");
136 cgf.emitAnyExprToMem(e, declPtr, e->getType().getQualifiers(),
137 /*Init*/ true);
138 return declPtr;
139}
140
141/// Does a store of the given IR type modify the full expected width?
142static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty,
143 uint64_t expectedSize) {
144 return cgm.getDataLayout().getTypeStoreSize(ty) * 8 == expectedSize;
145}
146
147/// Does the atomic type require memsetting to zero before initialization?
148///
149/// The IR type is provided as a way of making certain queries faster.
150bool AtomicInfo::requiresMemSetZero(mlir::Type ty) const {
151 // If the atomic type has size padding, we definitely need a memset.
152 if (hasPadding())
153 return true;
154
155 // Otherwise, do some simple heuristics to try to avoid it:
156 switch (getEvaluationKind()) {
157 // For scalars and complexes, check whether the store size of the
158 // type uses the full size.
159 case cir::TEK_Scalar:
160 return !isFullSizeType(cgf.cgm, ty, atomicSizeInBits);
161 case cir::TEK_Complex:
162 return !isFullSizeType(cgf.cgm,
163 mlir::cast<cir::ComplexType>(ty).getElementType(),
164 atomicSizeInBits / 2);
165 // Padding in structs has an undefined bit pattern. User beware.
167 return false;
168 }
169 llvm_unreachable("bad evaluation kind");
170}
171
172Address AtomicInfo::convertToAtomicIntPointer(Address addr) const {
173 mlir::Type ty = addr.getElementType();
174 uint64_t sourceSizeInBits = cgf.cgm.getDataLayout().getTypeSizeInBits(ty);
175 if (sourceSizeInBits != atomicSizeInBits) {
176 cgf.cgm.errorNYI(
177 loc,
178 "AtomicInfo::convertToAtomicIntPointer: convert through temp alloca");
179 }
180
181 return castToAtomicIntPointer(addr);
182}
183
184Address AtomicInfo::createTempAlloca() const {
185 Address tempAlloca = cgf.createMemTemp(
186 (lvalue.isBitField() && valueSizeInBits > atomicSizeInBits) ? valueTy
187 : atomicTy,
188 getAtomicAlignment(), loc, "atomic-temp");
189
190 // Cast to pointer to value type for bitfields.
191 if (lvalue.isBitField()) {
192 cgf.cgm.errorNYI(loc, "AtomicInfo::createTempAlloca: bitfield lvalue");
193 }
194
195 return tempAlloca;
196}
197
198Address AtomicInfo::castToAtomicIntPointer(Address addr) const {
199 auto intTy = mlir::dyn_cast<cir::IntType>(addr.getElementType());
200 // Don't bother with int casts if the integer size is the same.
201 if (intTy && intTy.getWidth() == atomicSizeInBits)
202 return addr;
203 auto ty = cgf.getBuilder().getUIntNTy(atomicSizeInBits);
204 return addr.withElementType(cgf.getBuilder(), ty);
205}
206
207bool AtomicInfo::emitMemSetZeroIfNecessary() const {
208 assert(lvalue.isSimple());
209 Address addr = lvalue.getAddress();
210 if (!requiresMemSetZero(addr.getElementType()))
211 return false;
212
213 cgf.cgm.errorNYI(loc,
214 "AtomicInfo::emitMemSetZeroIfNecessary: emit memset zero");
215 return false;
216}
217
218/// Copy an r-value into memory as part of storing to an atomic type.
219/// This needs to create a bit-pattern suitable for atomic operations.
220void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
221 assert(lvalue.isSimple());
222
223 // If we have an r-value, the rvalue should be of the atomic type,
224 // which means that the caller is responsible for having zeroed
225 // any padding. Just do an aggregate copy of that type.
226 if (rvalue.isAggregate()) {
227 cgf.cgm.errorNYI("copying aggregate into atomic lvalue");
228 return;
229 }
230
231 // Okay, otherwise we're copying stuff.
232
233 // Zero out the buffer if necessary.
234 emitMemSetZeroIfNecessary();
235
236 // Drill past the padding if present.
237 LValue tempLValue = projectValue();
238
239 // Okay, store the rvalue in.
240 if (rvalue.isScalar()) {
241 cgf.emitStoreOfScalar(rvalue.getValue(), tempLValue, /*isInit=*/true);
242 } else {
243 cgf.cgm.errorNYI("copying complex into atomic lvalue");
244 }
245}
246
247static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak,
248 Address dest, Address ptr, Address val1,
249 Address val2, uint64_t size,
250 cir::MemOrder successOrder,
251 cir::MemOrder failureOrder) {
252 mlir::Location loc = cgf.getLoc(e->getSourceRange());
253
254 CIRGenBuilderTy &builder = cgf.getBuilder();
255 mlir::Value expected = builder.createLoad(loc, val1);
256 mlir::Value desired = builder.createLoad(loc, val2);
257
258 auto cmpxchg = cir::AtomicCmpXchg::create(
259 builder, loc, expected.getType(), builder.getBoolTy(), ptr.getPointer(),
260 expected, desired,
261 cir::MemOrderAttr::get(&cgf.getMLIRContext(), successOrder),
262 cir::MemOrderAttr::get(&cgf.getMLIRContext(), failureOrder),
263 builder.getI64IntegerAttr(ptr.getAlignment().getAsAlign().value()));
264
265 cmpxchg.setIsVolatile(e->isVolatile());
266 cmpxchg.setWeak(isWeak);
267
268 mlir::Value failed = builder.createNot(cmpxchg.getSuccess());
269 cir::IfOp::create(builder, loc, failed, /*withElseRegion=*/false,
270 [&](mlir::OpBuilder &, mlir::Location) {
271 auto ptrTy = mlir::cast<cir::PointerType>(
272 val1.getPointer().getType());
273 if (val1.getElementType() != ptrTy.getPointee()) {
274 val1 = val1.withPointer(builder.createPtrBitcast(
275 val1.getPointer(), val1.getElementType()));
276 }
277 builder.createStore(loc, cmpxchg.getOld(), val1);
278 builder.createYield(loc);
279 });
280
281 // Update the memory at Dest with Success's value.
282 cgf.emitStoreOfScalar(cmpxchg.getSuccess(),
283 cgf.makeAddrLValue(dest, e->getType()),
284 /*isInit=*/false);
285}
286
288 bool isWeak, Address dest, Address ptr,
289 Address val1, Address val2,
290 Expr *failureOrderExpr, uint64_t size,
291 cir::MemOrder successOrder) {
292 Expr::EvalResult failureOrderEval;
293 if (failureOrderExpr->EvaluateAsInt(failureOrderEval, cgf.getContext())) {
294 uint64_t failureOrderInt = failureOrderEval.Val.getInt().getZExtValue();
295
296 cir::MemOrder failureOrder;
297 if (!cir::isValidCIRAtomicOrderingCABI(failureOrderInt)) {
298 failureOrder = cir::MemOrder::Relaxed;
299 } else {
300 switch ((cir::MemOrder)failureOrderInt) {
301 case cir::MemOrder::Relaxed:
302 // 31.7.2.18: "The failure argument shall not be memory_order_release
303 // nor memory_order_acq_rel". Fallback to monotonic.
304 case cir::MemOrder::Release:
305 case cir::MemOrder::AcquireRelease:
306 failureOrder = cir::MemOrder::Relaxed;
307 break;
308 case cir::MemOrder::Consume:
309 case cir::MemOrder::Acquire:
310 failureOrder = cir::MemOrder::Acquire;
311 break;
312 case cir::MemOrder::SequentiallyConsistent:
313 failureOrder = cir::MemOrder::SequentiallyConsistent;
314 break;
315 }
316 }
317
318 // Prior to c++17, "the failure argument shall be no stronger than the
319 // success argument". This condition has been lifted and the only
320 // precondition is 31.7.2.18. Effectively treat this as a DR and skip
321 // language version checks.
322 emitAtomicCmpXchg(cgf, e, isWeak, dest, ptr, val1, val2, size, successOrder,
323 failureOrder);
324 return;
325 }
326
328 cgf.cgm.errorNYI(e->getSourceRange(),
329 "emitAtomicCmpXchgFailureSet: non-constant failure order");
330}
331
333 Address ptr, Address val1, Address val2,
334 Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size,
335 cir::MemOrder order) {
336 std::unique_ptr<AtomicScopeModel> scopeModel = expr->getScopeModel();
337 if (scopeModel) {
339 cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: atomic scope");
340 return;
341 }
342
344 llvm::StringRef opName;
345
346 CIRGenBuilderTy &builder = cgf.getBuilder();
347 mlir::Location loc = cgf.getLoc(expr->getSourceRange());
348 auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
349
350 switch (expr->getOp()) {
351 case AtomicExpr::AO__c11_atomic_init:
352 llvm_unreachable("already handled!");
353
354 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
355 emitAtomicCmpXchgFailureSet(cgf, expr, /*isWeak=*/false, dest, ptr, val1,
356 val2, failureOrderExpr, size, order);
357 return;
358
359 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
360 emitAtomicCmpXchgFailureSet(cgf, expr, /*isWeak=*/true, dest, ptr, val1,
361 val2, failureOrderExpr, size, order);
362 return;
363
364 case AtomicExpr::AO__atomic_compare_exchange:
365 case AtomicExpr::AO__atomic_compare_exchange_n: {
366 bool isWeak = false;
367 if (isWeakExpr->EvaluateAsBooleanCondition(isWeak, cgf.getContext())) {
368 emitAtomicCmpXchgFailureSet(cgf, expr, isWeak, dest, ptr, val1, val2,
369 failureOrderExpr, size, order);
370 } else {
372 cgf.cgm.errorNYI(expr->getSourceRange(),
373 "emitAtomicOp: non-constant isWeak");
374 }
375 return;
376 }
377
378 case AtomicExpr::AO__c11_atomic_load:
379 case AtomicExpr::AO__atomic_load_n:
380 case AtomicExpr::AO__atomic_load: {
381 cir::LoadOp load =
382 builder.createLoad(loc, ptr, /*isVolatile=*/expr->isVolatile());
383
385
386 load->setAttr("mem_order", orderAttr);
387
388 builder.createStore(loc, load->getResult(0), dest);
389 return;
390 }
391
392 case AtomicExpr::AO__c11_atomic_store:
393 case AtomicExpr::AO__atomic_store_n:
394 case AtomicExpr::AO__atomic_store: {
395 cir::LoadOp loadVal1 = builder.createLoad(loc, val1);
396
398
399 builder.createStore(loc, loadVal1, ptr, expr->isVolatile(),
400 /*align=*/mlir::IntegerAttr{}, orderAttr);
401 return;
402 }
403
404 case AtomicExpr::AO__c11_atomic_exchange:
405 case AtomicExpr::AO__atomic_exchange_n:
406 case AtomicExpr::AO__atomic_exchange:
407 opName = cir::AtomicXchg::getOperationName();
408 break;
409
410 case AtomicExpr::AO__opencl_atomic_init:
411
412 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
413 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
414
415 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
416 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
417
418 case AtomicExpr::AO__scoped_atomic_compare_exchange:
419 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
420
421 case AtomicExpr::AO__opencl_atomic_load:
422 case AtomicExpr::AO__hip_atomic_load:
423 case AtomicExpr::AO__scoped_atomic_load_n:
424 case AtomicExpr::AO__scoped_atomic_load:
425
426 case AtomicExpr::AO__opencl_atomic_store:
427 case AtomicExpr::AO__hip_atomic_store:
428 case AtomicExpr::AO__scoped_atomic_store:
429 case AtomicExpr::AO__scoped_atomic_store_n:
430
431 case AtomicExpr::AO__hip_atomic_exchange:
432 case AtomicExpr::AO__opencl_atomic_exchange:
433 case AtomicExpr::AO__scoped_atomic_exchange_n:
434 case AtomicExpr::AO__scoped_atomic_exchange:
435
436 case AtomicExpr::AO__atomic_add_fetch:
437 case AtomicExpr::AO__scoped_atomic_add_fetch:
438
439 case AtomicExpr::AO__c11_atomic_fetch_add:
440 case AtomicExpr::AO__hip_atomic_fetch_add:
441 case AtomicExpr::AO__opencl_atomic_fetch_add:
442 case AtomicExpr::AO__atomic_fetch_add:
443 case AtomicExpr::AO__scoped_atomic_fetch_add:
444
445 case AtomicExpr::AO__atomic_sub_fetch:
446 case AtomicExpr::AO__scoped_atomic_sub_fetch:
447
448 case AtomicExpr::AO__c11_atomic_fetch_sub:
449 case AtomicExpr::AO__hip_atomic_fetch_sub:
450 case AtomicExpr::AO__opencl_atomic_fetch_sub:
451 case AtomicExpr::AO__atomic_fetch_sub:
452 case AtomicExpr::AO__scoped_atomic_fetch_sub:
453
454 case AtomicExpr::AO__atomic_min_fetch:
455 case AtomicExpr::AO__scoped_atomic_min_fetch:
456
457 case AtomicExpr::AO__c11_atomic_fetch_min:
458 case AtomicExpr::AO__hip_atomic_fetch_min:
459 case AtomicExpr::AO__opencl_atomic_fetch_min:
460 case AtomicExpr::AO__atomic_fetch_min:
461 case AtomicExpr::AO__scoped_atomic_fetch_min:
462
463 case AtomicExpr::AO__atomic_max_fetch:
464 case AtomicExpr::AO__scoped_atomic_max_fetch:
465
466 case AtomicExpr::AO__c11_atomic_fetch_max:
467 case AtomicExpr::AO__hip_atomic_fetch_max:
468 case AtomicExpr::AO__opencl_atomic_fetch_max:
469 case AtomicExpr::AO__atomic_fetch_max:
470 case AtomicExpr::AO__scoped_atomic_fetch_max:
471
472 case AtomicExpr::AO__atomic_and_fetch:
473 case AtomicExpr::AO__scoped_atomic_and_fetch:
474
475 case AtomicExpr::AO__c11_atomic_fetch_and:
476 case AtomicExpr::AO__hip_atomic_fetch_and:
477 case AtomicExpr::AO__opencl_atomic_fetch_and:
478 case AtomicExpr::AO__atomic_fetch_and:
479 case AtomicExpr::AO__scoped_atomic_fetch_and:
480
481 case AtomicExpr::AO__atomic_or_fetch:
482 case AtomicExpr::AO__scoped_atomic_or_fetch:
483
484 case AtomicExpr::AO__c11_atomic_fetch_or:
485 case AtomicExpr::AO__hip_atomic_fetch_or:
486 case AtomicExpr::AO__opencl_atomic_fetch_or:
487 case AtomicExpr::AO__atomic_fetch_or:
488 case AtomicExpr::AO__scoped_atomic_fetch_or:
489
490 case AtomicExpr::AO__atomic_xor_fetch:
491 case AtomicExpr::AO__scoped_atomic_xor_fetch:
492
493 case AtomicExpr::AO__c11_atomic_fetch_xor:
494 case AtomicExpr::AO__hip_atomic_fetch_xor:
495 case AtomicExpr::AO__opencl_atomic_fetch_xor:
496 case AtomicExpr::AO__atomic_fetch_xor:
497 case AtomicExpr::AO__scoped_atomic_fetch_xor:
498
499 case AtomicExpr::AO__atomic_nand_fetch:
500 case AtomicExpr::AO__scoped_atomic_nand_fetch:
501
502 case AtomicExpr::AO__c11_atomic_fetch_nand:
503 case AtomicExpr::AO__atomic_fetch_nand:
504 case AtomicExpr::AO__scoped_atomic_fetch_nand:
505
506 case AtomicExpr::AO__atomic_test_and_set:
507
508 case AtomicExpr::AO__atomic_clear:
509 cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: expr op NYI");
510 return;
511 }
512
513 assert(!opName.empty() && "expected operation name to build");
514 mlir::Value loadVal1 = builder.createLoad(loc, val1);
515
516 SmallVector<mlir::Value> atomicOperands = {ptr.getPointer(), loadVal1};
517 SmallVector<mlir::Type> atomicResTys = {loadVal1.getType()};
518 mlir::Operation *rmwOp = builder.create(loc, builder.getStringAttr(opName),
519 atomicOperands, atomicResTys);
520
521 rmwOp->setAttr("mem_order", orderAttr);
522 if (expr->isVolatile())
523 rmwOp->setAttr("is_volatile", builder.getUnitAttr());
524
525 mlir::Value result = rmwOp->getResult(0);
526 builder.createStore(loc, result, dest);
527}
528
529static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad) {
531 return false;
532 auto memOrder = static_cast<cir::MemOrder>(order);
533 if (isStore)
534 return memOrder != cir::MemOrder::Consume &&
535 memOrder != cir::MemOrder::Acquire &&
536 memOrder != cir::MemOrder::AcquireRelease;
537 if (isLoad)
538 return memOrder != cir::MemOrder::Release &&
539 memOrder != cir::MemOrder::AcquireRelease;
540 return true;
541}
542
544 QualType atomicTy = e->getPtr()->getType()->getPointeeType();
545 QualType memTy = atomicTy;
546 if (const auto *ty = atomicTy->getAs<AtomicType>())
547 memTy = ty->getValueType();
548
549 Expr *isWeakExpr = nullptr;
550 Expr *orderFailExpr = nullptr;
551
552 Address val1 = Address::invalid();
553 Address val2 = Address::invalid();
554 Address dest = Address::invalid();
556
558 if (e->getOp() == AtomicExpr::AO__c11_atomic_init) {
559 LValue lvalue = makeAddrLValue(ptr, atomicTy);
560 emitAtomicInit(e->getVal1(), lvalue);
561 return RValue::get(nullptr);
562 }
563
564 TypeInfoChars typeInfo = getContext().getTypeInfoInChars(atomicTy);
565 uint64_t size = typeInfo.Width.getQuantity();
566
567 Expr::EvalResult orderConst;
568 mlir::Value order;
569 if (!e->getOrder()->EvaluateAsInt(orderConst, getContext()))
570 order = emitScalarExpr(e->getOrder());
571
572 bool shouldCastToIntPtrTy = true;
573
574 switch (e->getOp()) {
575 default:
576 cgm.errorNYI(e->getSourceRange(), "atomic op NYI");
577 return RValue::get(nullptr);
578
579 case AtomicExpr::AO__c11_atomic_init:
580 llvm_unreachable("already handled above with emitAtomicInit");
581
582 case AtomicExpr::AO__atomic_load_n:
583 case AtomicExpr::AO__c11_atomic_load:
584 break;
585
586 case AtomicExpr::AO__atomic_load:
588 break;
589
590 case AtomicExpr::AO__atomic_store:
592 break;
593
594 case AtomicExpr::AO__atomic_exchange:
597 break;
598
599 case AtomicExpr::AO__atomic_compare_exchange:
600 case AtomicExpr::AO__atomic_compare_exchange_n:
601 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
602 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
604 if (e->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
605 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
607 else
608 val2 = emitValToTemp(*this, e->getVal2());
609 orderFailExpr = e->getOrderFail();
610 if (e->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
611 e->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
612 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
613 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
614 isWeakExpr = e->getWeak();
615 break;
616
617 case AtomicExpr::AO__atomic_exchange_n:
618 case AtomicExpr::AO__atomic_store_n:
619 case AtomicExpr::AO__c11_atomic_exchange:
620 case AtomicExpr::AO__c11_atomic_store:
621 val1 = emitValToTemp(*this, e->getVal1());
622 break;
623 }
624
625 QualType resultTy = e->getType().getUnqualifiedType();
626
627 // The inlined atomics only function on iN types, where N is a power of 2. We
628 // need to make sure (via temporaries if necessary) that all incoming values
629 // are compatible.
630 LValue atomicValue = makeAddrLValue(ptr, atomicTy);
631 AtomicInfo atomics(*this, atomicValue, getLoc(e->getSourceRange()));
632
633 if (shouldCastToIntPtrTy) {
634 ptr = atomics.castToAtomicIntPointer(ptr);
635 if (val1.isValid())
636 val1 = atomics.convertToAtomicIntPointer(val1);
637 }
638 if (dest.isValid()) {
639 if (shouldCastToIntPtrTy)
640 dest = atomics.castToAtomicIntPointer(dest);
641 } else if (e->isCmpXChg()) {
642 dest = createMemTemp(resultTy, getLoc(e->getSourceRange()), "cmpxchg.bool");
643 } else if (!resultTy->isVoidType()) {
644 dest = atomics.createTempAlloca();
645 if (shouldCastToIntPtrTy)
646 dest = atomics.castToAtomicIntPointer(dest);
647 }
648
649 bool powerOf2Size = (size & (size - 1)) == 0;
650 bool useLibCall = !powerOf2Size || (size > 16);
651
652 // For atomics larger than 16 bytes, emit a libcall from the frontend. This
653 // avoids the overhead of dealing with excessively-large value types in IR.
654 // Non-power-of-2 values also lower to libcall here, as they are not currently
655 // permitted in IR instructions (although that constraint could be relaxed in
656 // the future). For other cases where a libcall is required on a given
657 // platform, we let the backend handle it (this includes handling for all of
658 // the size-optimized libcall variants, which are only valid up to 16 bytes.)
659 //
660 // See: https://llvm.org/docs/Atomics.html#libcalls-atomic
661 if (useLibCall) {
663 cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: emit atomic lib call");
664 return RValue::get(nullptr);
665 }
666
667 bool isStore = e->getOp() == AtomicExpr::AO__c11_atomic_store ||
668 e->getOp() == AtomicExpr::AO__opencl_atomic_store ||
669 e->getOp() == AtomicExpr::AO__hip_atomic_store ||
670 e->getOp() == AtomicExpr::AO__atomic_store ||
671 e->getOp() == AtomicExpr::AO__atomic_store_n ||
672 e->getOp() == AtomicExpr::AO__scoped_atomic_store ||
673 e->getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
674 e->getOp() == AtomicExpr::AO__atomic_clear;
675 bool isLoad = e->getOp() == AtomicExpr::AO__c11_atomic_load ||
676 e->getOp() == AtomicExpr::AO__opencl_atomic_load ||
677 e->getOp() == AtomicExpr::AO__hip_atomic_load ||
678 e->getOp() == AtomicExpr::AO__atomic_load ||
679 e->getOp() == AtomicExpr::AO__atomic_load_n ||
680 e->getOp() == AtomicExpr::AO__scoped_atomic_load ||
681 e->getOp() == AtomicExpr::AO__scoped_atomic_load_n;
682
683 if (!order) {
684 // We have evaluated the memory order as an integer constant in orderConst.
685 // We should not ever get to a case where the ordering isn't a valid CABI
686 // value, but it's hard to enforce that in general.
687 uint64_t ord = orderConst.Val.getInt().getZExtValue();
688 if (isMemOrderValid(ord, isStore, isLoad))
689 emitAtomicOp(*this, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
690 size, static_cast<cir::MemOrder>(ord));
691 } else {
693 cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: dynamic memory order");
694 return RValue::get(nullptr);
695 }
696
697 if (resultTy->isVoidType())
698 return RValue::get(nullptr);
699
700 return convertTempToRValue(
701 dest.withElementType(builder, convertTypeForMem(resultTy)), resultTy,
702 e->getExprLoc());
703}
704
706 AtomicInfo atomics(*this, dest, getLoc(init->getSourceRange()));
707
708 switch (atomics.getEvaluationKind()) {
709 case cir::TEK_Scalar: {
710 mlir::Value value = emitScalarExpr(init);
711 atomics.emitCopyIntoMemory(RValue::get(value));
712 return;
713 }
714
715 case cir::TEK_Complex: {
716 mlir::Value value = emitComplexExpr(init);
717 atomics.emitCopyIntoMemory(RValue::get(value));
718 return;
719 }
720
722 cgm.errorNYI(init->getSourceRange(), "emitAtomicInit: aggregate type");
723 return;
724 }
725
726 llvm_unreachable("bad evaluation kind");
727}
static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, uint64_t size, cir::MemOrder successOrder, cir::MemOrder failureOrder)
static Address emitValToTemp(CIRGenFunction &cgf, Expr *e)
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size, cir::MemOrder order)
static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad)
static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, Expr *failureOrderExpr, uint64_t size, cir::MemOrder successOrder)
static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
mlir::Value createNot(mlir::Value value)
mlir::Value createPtrBitcast(mlir::Value src, mlir::Type newPointeeTy)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
cir::BoolType getBoolTy()
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
llvm::TypeSize getTypeStoreSize(mlir::Type ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
APSInt & getInt()
Definition APValue.h:489
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition Expr.h:6814
Expr * getVal2() const
Definition Expr.h:6865
Expr * getOrder() const
Definition Expr.h:6848
bool isCmpXChg() const
Definition Expr.h:6898
AtomicOp getOp() const
Definition Expr.h:6877
Expr * getVal1() const
Definition Expr.h:6855
Expr * getPtr() const
Definition Expr.h:6845
Expr * getWeak() const
Definition Expr.h:6871
Expr * getOrderFail() const
Definition Expr.h:6861
bool isVolatile() const
Definition Expr.h:6894
Address withPointer(mlir::Value newPtr) const
Return address with different pointer, but same element type and alignment.
Definition Address.h:73
mlir::Value getPointer() const
Definition Address.h:81
mlir::Type getElementType() const
Definition Address.h:101
static Address invalid()
Definition Address.h:66
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
Definition Address.h:109
bool isValid() const
Definition Address.h:67
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
cir::IntType getUIntNTy(int n)
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile=false, mlir::IntegerAttr align={}, cir::MemOrderAttr order={})
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, bool isInit=false, bool isNontemporal=false)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitAtomicExpr(AtomicExpr *e)
mlir::Type convertTypeForMem(QualType t)
mlir::Value emitScalarExpr(const clang::Expr *e)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
void emitAtomicInit(Expr *init, LValue dest)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
clang::ASTContext & getContext() const
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const cir::CIRDataLayout getDataLayout() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
bool isAggregate() const
Definition CIRGenValue.h:51
static RValue get(mlir::Value v)
Definition CIRGenValue.h:82
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:56
bool isScalar() const
Definition CIRGenValue.h:49
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsBooleanCondition - Return true if this is a constant which we can fold and convert to a boo...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
A (possibly-)qualified type.
Definition TypeBase.h:937
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8325
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition TypeBase.h:8379
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:334
bool isVoidType() const
Definition TypeBase.h:8878
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9098
bool isValidCIRAtomicOrderingCABI(Int value)
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
unsigned long uint64_t
static bool atomicInfoGetAtomicPointer()
static bool atomicScope()
static bool atomicUseLibCall()
static bool atomicSyncScopeID()
static bool atomicInfoGetAtomicAddress()
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647