clang 22.0.0git
CIRGenAtomic.cpp
Go to the documentation of this file.
1//===--- CIRGenAtomic.cpp - Emit CIR for atomic operations ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the code for emitting atomic operations.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
15
16using namespace clang;
17using namespace clang::CIRGen;
18using namespace cir;
19
20namespace {
21class AtomicInfo {
22 CIRGenFunction &cgf;
23 QualType atomicTy;
24 QualType valueTy;
25 uint64_t atomicSizeInBits = 0;
26 uint64_t valueSizeInBits = 0;
27 CharUnits atomicAlign;
28 CharUnits valueAlign;
29 TypeEvaluationKind evaluationKind = cir::TEK_Scalar;
30 LValue lvalue;
31 mlir::Location loc;
32
33public:
34 AtomicInfo(CIRGenFunction &cgf, LValue &lvalue, mlir::Location loc)
35 : cgf(cgf), loc(loc) {
36 assert(!lvalue.isGlobalReg());
37 ASTContext &ctx = cgf.getContext();
38 if (lvalue.isSimple()) {
39 atomicTy = lvalue.getType();
40 if (auto *ty = atomicTy->getAs<AtomicType>())
41 valueTy = ty->getValueType();
42 else
43 valueTy = atomicTy;
44 evaluationKind = cgf.getEvaluationKind(valueTy);
45
46 TypeInfo valueTypeInfo = ctx.getTypeInfo(valueTy);
47 TypeInfo atomicTypeInfo = ctx.getTypeInfo(atomicTy);
48 uint64_t valueAlignInBits = valueTypeInfo.Align;
49 uint64_t atomicAlignInBits = atomicTypeInfo.Align;
50 valueSizeInBits = valueTypeInfo.Width;
51 atomicSizeInBits = atomicTypeInfo.Width;
52 assert(valueSizeInBits <= atomicSizeInBits);
53 assert(valueAlignInBits <= atomicAlignInBits);
54
55 atomicAlign = ctx.toCharUnitsFromBits(atomicAlignInBits);
56 valueAlign = ctx.toCharUnitsFromBits(valueAlignInBits);
57 if (lvalue.getAlignment().isZero())
58 lvalue.setAlignment(atomicAlign);
59
60 this->lvalue = lvalue;
61 } else {
63 cgf.cgm.errorNYI(loc, "AtomicInfo: non-simple lvalue");
64 }
65
67 }
68
69 QualType getValueType() const { return valueTy; }
70 CharUnits getAtomicAlignment() const { return atomicAlign; }
71 TypeEvaluationKind getEvaluationKind() const { return evaluationKind; }
72 mlir::Value getAtomicPointer() const {
73 if (lvalue.isSimple())
74 return lvalue.getPointer();
76 return nullptr;
77 }
78 Address getAtomicAddress() const {
79 mlir::Type elemTy;
80 if (lvalue.isSimple()) {
81 elemTy = lvalue.getAddress().getElementType();
82 } else {
84 cgf.cgm.errorNYI(loc, "AtomicInfo::getAtomicAddress: non-simple lvalue");
85 }
86 return Address(getAtomicPointer(), elemTy, getAtomicAlignment());
87 }
88
89 /// Is the atomic size larger than the underlying value type?
90 ///
91 /// Note that the absence of padding does not mean that atomic
92 /// objects are completely interchangeable with non-atomic
93 /// objects: we might have promoted the alignment of a type
94 /// without making it bigger.
95 bool hasPadding() const { return (valueSizeInBits != atomicSizeInBits); }
96
97 bool emitMemSetZeroIfNecessary() const;
98
99 /// Cast the given pointer to an integer pointer suitable for atomic
100 /// operations on the source.
101 Address castToAtomicIntPointer(Address addr) const;
102
103 /// If addr is compatible with the iN that will be used for an atomic
104 /// operation, bitcast it. Otherwise, create a temporary that is suitable and
105 /// copy the value across.
106 Address convertToAtomicIntPointer(Address addr) const;
107
108 /// Copy an atomic r-value into atomic-layout memory.
109 void emitCopyIntoMemory(RValue rvalue) const;
110
111 /// Project an l-value down to the value field.
112 LValue projectValue() const {
113 assert(lvalue.isSimple());
114 Address addr = getAtomicAddress();
115 if (hasPadding()) {
116 cgf.cgm.errorNYI(loc, "AtomicInfo::projectValue: padding");
117 }
118
120 return LValue::makeAddr(addr, getValueType(), lvalue.getBaseInfo());
121 }
122
123 /// Creates temp alloca for intermediate operations on atomic value.
124 Address createTempAlloca() const;
125
126private:
127 bool requiresMemSetZero(mlir::Type ty) const;
128};
129} // namespace
130
131// This function emits any expression (scalar, complex, or aggregate)
132// into a temporary alloca.
134 Address declPtr = cgf.createMemTemp(
135 e->getType(), cgf.getLoc(e->getSourceRange()), ".atomictmp");
136 cgf.emitAnyExprToMem(e, declPtr, e->getType().getQualifiers(),
137 /*Init*/ true);
138 return declPtr;
139}
140
141/// Does a store of the given IR type modify the full expected width?
142static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty,
143 uint64_t expectedSize) {
144 return cgm.getDataLayout().getTypeStoreSize(ty) * 8 == expectedSize;
145}
146
147/// Does the atomic type require memsetting to zero before initialization?
148///
149/// The IR type is provided as a way of making certain queries faster.
150bool AtomicInfo::requiresMemSetZero(mlir::Type ty) const {
151 // If the atomic type has size padding, we definitely need a memset.
152 if (hasPadding())
153 return true;
154
155 // Otherwise, do some simple heuristics to try to avoid it:
156 switch (getEvaluationKind()) {
157 // For scalars and complexes, check whether the store size of the
158 // type uses the full size.
159 case cir::TEK_Scalar:
160 return !isFullSizeType(cgf.cgm, ty, atomicSizeInBits);
161 case cir::TEK_Complex:
162 return !isFullSizeType(cgf.cgm,
163 mlir::cast<cir::ComplexType>(ty).getElementType(),
164 atomicSizeInBits / 2);
165 // Padding in structs has an undefined bit pattern. User beware.
167 return false;
168 }
169 llvm_unreachable("bad evaluation kind");
170}
171
172Address AtomicInfo::convertToAtomicIntPointer(Address addr) const {
173 mlir::Type ty = addr.getElementType();
174 uint64_t sourceSizeInBits = cgf.cgm.getDataLayout().getTypeSizeInBits(ty);
175 if (sourceSizeInBits != atomicSizeInBits) {
176 cgf.cgm.errorNYI(
177 loc,
178 "AtomicInfo::convertToAtomicIntPointer: convert through temp alloca");
179 }
180
181 return castToAtomicIntPointer(addr);
182}
183
184Address AtomicInfo::createTempAlloca() const {
185 Address tempAlloca = cgf.createMemTemp(
186 (lvalue.isBitField() && valueSizeInBits > atomicSizeInBits) ? valueTy
187 : atomicTy,
188 getAtomicAlignment(), loc, "atomic-temp");
189
190 // Cast to pointer to value type for bitfields.
191 if (lvalue.isBitField()) {
192 cgf.cgm.errorNYI(loc, "AtomicInfo::createTempAlloca: bitfield lvalue");
193 }
194
195 return tempAlloca;
196}
197
198Address AtomicInfo::castToAtomicIntPointer(Address addr) const {
199 auto intTy = mlir::dyn_cast<cir::IntType>(addr.getElementType());
200 // Don't bother with int casts if the integer size is the same.
201 if (intTy && intTy.getWidth() == atomicSizeInBits)
202 return addr;
203 auto ty = cgf.getBuilder().getUIntNTy(atomicSizeInBits);
204 return addr.withElementType(cgf.getBuilder(), ty);
205}
206
207bool AtomicInfo::emitMemSetZeroIfNecessary() const {
208 assert(lvalue.isSimple());
209 Address addr = lvalue.getAddress();
210 if (!requiresMemSetZero(addr.getElementType()))
211 return false;
212
213 cgf.cgm.errorNYI(loc,
214 "AtomicInfo::emitMemSetZeroIfNecessary: emit memset zero");
215 return false;
216}
217
218/// Copy an r-value into memory as part of storing to an atomic type.
219/// This needs to create a bit-pattern suitable for atomic operations.
220void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
221 assert(lvalue.isSimple());
222
223 // If we have an r-value, the rvalue should be of the atomic type,
224 // which means that the caller is responsible for having zeroed
225 // any padding. Just do an aggregate copy of that type.
226 if (rvalue.isAggregate()) {
227 cgf.cgm.errorNYI("copying aggregate into atomic lvalue");
228 return;
229 }
230
231 // Okay, otherwise we're copying stuff.
232
233 // Zero out the buffer if necessary.
234 emitMemSetZeroIfNecessary();
235
236 // Drill past the padding if present.
237 LValue tempLValue = projectValue();
238
239 // Okay, store the rvalue in.
240 if (rvalue.isScalar()) {
241 cgf.emitStoreOfScalar(rvalue.getValue(), tempLValue, /*isInit=*/true);
242 } else {
243 cgf.cgm.errorNYI("copying complex into atomic lvalue");
244 }
245}
246
247static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak,
248 Address dest, Address ptr, Address val1,
249 Address val2, uint64_t size,
250 cir::MemOrder successOrder,
251 cir::MemOrder failureOrder) {
252 mlir::Location loc = cgf.getLoc(e->getSourceRange());
253
254 CIRGenBuilderTy &builder = cgf.getBuilder();
255 mlir::Value expected = builder.createLoad(loc, val1);
256 mlir::Value desired = builder.createLoad(loc, val2);
257
258 auto cmpxchg = cir::AtomicCmpXchgOp::create(
259 builder, loc, expected.getType(), builder.getBoolTy(), ptr.getPointer(),
260 expected, desired,
261 cir::MemOrderAttr::get(&cgf.getMLIRContext(), successOrder),
262 cir::MemOrderAttr::get(&cgf.getMLIRContext(), failureOrder),
263 builder.getI64IntegerAttr(ptr.getAlignment().getAsAlign().value()));
264
265 cmpxchg.setIsVolatile(e->isVolatile());
266 cmpxchg.setWeak(isWeak);
267
268 mlir::Value failed = builder.createNot(cmpxchg.getSuccess());
269 cir::IfOp::create(builder, loc, failed, /*withElseRegion=*/false,
270 [&](mlir::OpBuilder &, mlir::Location) {
271 auto ptrTy = mlir::cast<cir::PointerType>(
272 val1.getPointer().getType());
273 if (val1.getElementType() != ptrTy.getPointee()) {
274 val1 = val1.withPointer(builder.createPtrBitcast(
275 val1.getPointer(), val1.getElementType()));
276 }
277 builder.createStore(loc, cmpxchg.getOld(), val1);
278 builder.createYield(loc);
279 });
280
281 // Update the memory at Dest with Success's value.
282 cgf.emitStoreOfScalar(cmpxchg.getSuccess(),
283 cgf.makeAddrLValue(dest, e->getType()),
284 /*isInit=*/false);
285}
286
288 bool isWeak, Address dest, Address ptr,
289 Address val1, Address val2,
290 Expr *failureOrderExpr, uint64_t size,
291 cir::MemOrder successOrder) {
292 Expr::EvalResult failureOrderEval;
293 if (failureOrderExpr->EvaluateAsInt(failureOrderEval, cgf.getContext())) {
294 uint64_t failureOrderInt = failureOrderEval.Val.getInt().getZExtValue();
295
296 cir::MemOrder failureOrder;
297 if (!cir::isValidCIRAtomicOrderingCABI(failureOrderInt)) {
298 failureOrder = cir::MemOrder::Relaxed;
299 } else {
300 switch ((cir::MemOrder)failureOrderInt) {
301 case cir::MemOrder::Relaxed:
302 // 31.7.2.18: "The failure argument shall not be memory_order_release
303 // nor memory_order_acq_rel". Fallback to monotonic.
304 case cir::MemOrder::Release:
305 case cir::MemOrder::AcquireRelease:
306 failureOrder = cir::MemOrder::Relaxed;
307 break;
308 case cir::MemOrder::Consume:
309 case cir::MemOrder::Acquire:
310 failureOrder = cir::MemOrder::Acquire;
311 break;
312 case cir::MemOrder::SequentiallyConsistent:
313 failureOrder = cir::MemOrder::SequentiallyConsistent;
314 break;
315 }
316 }
317
318 // Prior to c++17, "the failure argument shall be no stronger than the
319 // success argument". This condition has been lifted and the only
320 // precondition is 31.7.2.18. Effectively treat this as a DR and skip
321 // language version checks.
322 emitAtomicCmpXchg(cgf, e, isWeak, dest, ptr, val1, val2, size, successOrder,
323 failureOrder);
324 return;
325 }
326
328 cgf.cgm.errorNYI(e->getSourceRange(),
329 "emitAtomicCmpXchgFailureSet: non-constant failure order");
330}
331
333 Address ptr, Address val1, Address val2,
334 Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size,
335 cir::MemOrder order) {
336 std::unique_ptr<AtomicScopeModel> scopeModel = expr->getScopeModel();
337 if (scopeModel) {
339 cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: atomic scope");
340 return;
341 }
342
344 llvm::StringRef opName;
345
346 CIRGenBuilderTy &builder = cgf.getBuilder();
347 mlir::Location loc = cgf.getLoc(expr->getSourceRange());
348 auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
349 cir::AtomicFetchKindAttr fetchAttr;
350 bool fetchFirst = true;
351
352 switch (expr->getOp()) {
353 case AtomicExpr::AO__c11_atomic_init:
354 llvm_unreachable("already handled!");
355
356 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
357 emitAtomicCmpXchgFailureSet(cgf, expr, /*isWeak=*/false, dest, ptr, val1,
358 val2, failureOrderExpr, size, order);
359 return;
360
361 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
362 emitAtomicCmpXchgFailureSet(cgf, expr, /*isWeak=*/true, dest, ptr, val1,
363 val2, failureOrderExpr, size, order);
364 return;
365
366 case AtomicExpr::AO__atomic_compare_exchange:
367 case AtomicExpr::AO__atomic_compare_exchange_n: {
368 bool isWeak = false;
369 if (isWeakExpr->EvaluateAsBooleanCondition(isWeak, cgf.getContext())) {
370 emitAtomicCmpXchgFailureSet(cgf, expr, isWeak, dest, ptr, val1, val2,
371 failureOrderExpr, size, order);
372 } else {
374 cgf.cgm.errorNYI(expr->getSourceRange(),
375 "emitAtomicOp: non-constant isWeak");
376 }
377 return;
378 }
379
380 case AtomicExpr::AO__c11_atomic_load:
381 case AtomicExpr::AO__atomic_load_n:
382 case AtomicExpr::AO__atomic_load: {
383 cir::LoadOp load =
384 builder.createLoad(loc, ptr, /*isVolatile=*/expr->isVolatile());
385
387
388 load->setAttr("mem_order", orderAttr);
389
390 builder.createStore(loc, load->getResult(0), dest);
391 return;
392 }
393
394 case AtomicExpr::AO__c11_atomic_store:
395 case AtomicExpr::AO__atomic_store_n:
396 case AtomicExpr::AO__atomic_store: {
397 cir::LoadOp loadVal1 = builder.createLoad(loc, val1);
398
400
401 builder.createStore(loc, loadVal1, ptr, expr->isVolatile(),
402 /*align=*/mlir::IntegerAttr{}, orderAttr);
403 return;
404 }
405
406 case AtomicExpr::AO__c11_atomic_exchange:
407 case AtomicExpr::AO__atomic_exchange_n:
408 case AtomicExpr::AO__atomic_exchange:
409 opName = cir::AtomicXchgOp::getOperationName();
410 break;
411
412 case AtomicExpr::AO__atomic_add_fetch:
413 fetchFirst = false;
414 [[fallthrough]];
415 case AtomicExpr::AO__c11_atomic_fetch_add:
416 case AtomicExpr::AO__atomic_fetch_add:
417 opName = cir::AtomicFetchOp::getOperationName();
418 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
419 cir::AtomicFetchKind::Add);
420 break;
421
422 case AtomicExpr::AO__atomic_sub_fetch:
423 fetchFirst = false;
424 [[fallthrough]];
425 case AtomicExpr::AO__c11_atomic_fetch_sub:
426 case AtomicExpr::AO__atomic_fetch_sub:
427 opName = cir::AtomicFetchOp::getOperationName();
428 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
429 cir::AtomicFetchKind::Sub);
430 break;
431
432 case AtomicExpr::AO__atomic_min_fetch:
433 fetchFirst = false;
434 [[fallthrough]];
435 case AtomicExpr::AO__c11_atomic_fetch_min:
436 case AtomicExpr::AO__atomic_fetch_min:
437 opName = cir::AtomicFetchOp::getOperationName();
438 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
439 cir::AtomicFetchKind::Min);
440 break;
441
442 case AtomicExpr::AO__atomic_max_fetch:
443 fetchFirst = false;
444 [[fallthrough]];
445 case AtomicExpr::AO__c11_atomic_fetch_max:
446 case AtomicExpr::AO__atomic_fetch_max:
447 opName = cir::AtomicFetchOp::getOperationName();
448 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
449 cir::AtomicFetchKind::Max);
450 break;
451
452 case AtomicExpr::AO__atomic_and_fetch:
453 fetchFirst = false;
454 [[fallthrough]];
455 case AtomicExpr::AO__c11_atomic_fetch_and:
456 case AtomicExpr::AO__atomic_fetch_and:
457 opName = cir::AtomicFetchOp::getOperationName();
458 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
459 cir::AtomicFetchKind::And);
460 break;
461
462 case AtomicExpr::AO__atomic_or_fetch:
463 fetchFirst = false;
464 [[fallthrough]];
465 case AtomicExpr::AO__c11_atomic_fetch_or:
466 case AtomicExpr::AO__atomic_fetch_or:
467 opName = cir::AtomicFetchOp::getOperationName();
468 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
469 cir::AtomicFetchKind::Or);
470 break;
471
472 case AtomicExpr::AO__atomic_xor_fetch:
473 fetchFirst = false;
474 [[fallthrough]];
475 case AtomicExpr::AO__c11_atomic_fetch_xor:
476 case AtomicExpr::AO__atomic_fetch_xor:
477 opName = cir::AtomicFetchOp::getOperationName();
478 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
479 cir::AtomicFetchKind::Xor);
480 break;
481
482 case AtomicExpr::AO__atomic_nand_fetch:
483 fetchFirst = false;
484 [[fallthrough]];
485 case AtomicExpr::AO__c11_atomic_fetch_nand:
486 case AtomicExpr::AO__atomic_fetch_nand:
487 opName = cir::AtomicFetchOp::getOperationName();
488 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
489 cir::AtomicFetchKind::Nand);
490 break;
491
492 case AtomicExpr::AO__atomic_test_and_set: {
493 auto op = cir::AtomicTestAndSetOp::create(
494 builder, loc, ptr.getPointer(), order,
495 builder.getI64IntegerAttr(ptr.getAlignment().getQuantity()),
496 expr->isVolatile());
497 builder.createStore(loc, op, dest);
498 return;
499 }
500
501 case AtomicExpr::AO__atomic_clear: {
502 cir::AtomicClearOp::create(
503 builder, loc, ptr.getPointer(), order,
504 builder.getI64IntegerAttr(ptr.getAlignment().getQuantity()),
505 expr->isVolatile());
506 return;
507 }
508
509 case AtomicExpr::AO__opencl_atomic_init:
510
511 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
512 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
513
514 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
515 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
516
517 case AtomicExpr::AO__scoped_atomic_compare_exchange:
518 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
519
520 case AtomicExpr::AO__opencl_atomic_load:
521 case AtomicExpr::AO__hip_atomic_load:
522 case AtomicExpr::AO__scoped_atomic_load_n:
523 case AtomicExpr::AO__scoped_atomic_load:
524
525 case AtomicExpr::AO__opencl_atomic_store:
526 case AtomicExpr::AO__hip_atomic_store:
527 case AtomicExpr::AO__scoped_atomic_store:
528 case AtomicExpr::AO__scoped_atomic_store_n:
529
530 case AtomicExpr::AO__hip_atomic_exchange:
531 case AtomicExpr::AO__opencl_atomic_exchange:
532 case AtomicExpr::AO__scoped_atomic_exchange_n:
533 case AtomicExpr::AO__scoped_atomic_exchange:
534
535 case AtomicExpr::AO__scoped_atomic_add_fetch:
536
537 case AtomicExpr::AO__hip_atomic_fetch_add:
538 case AtomicExpr::AO__opencl_atomic_fetch_add:
539 case AtomicExpr::AO__scoped_atomic_fetch_add:
540
541 case AtomicExpr::AO__scoped_atomic_sub_fetch:
542
543 case AtomicExpr::AO__hip_atomic_fetch_sub:
544 case AtomicExpr::AO__opencl_atomic_fetch_sub:
545 case AtomicExpr::AO__scoped_atomic_fetch_sub:
546
547 case AtomicExpr::AO__scoped_atomic_min_fetch:
548
549 case AtomicExpr::AO__hip_atomic_fetch_min:
550 case AtomicExpr::AO__opencl_atomic_fetch_min:
551 case AtomicExpr::AO__scoped_atomic_fetch_min:
552
553 case AtomicExpr::AO__scoped_atomic_max_fetch:
554
555 case AtomicExpr::AO__hip_atomic_fetch_max:
556 case AtomicExpr::AO__opencl_atomic_fetch_max:
557 case AtomicExpr::AO__scoped_atomic_fetch_max:
558
559 case AtomicExpr::AO__scoped_atomic_and_fetch:
560
561 case AtomicExpr::AO__hip_atomic_fetch_and:
562 case AtomicExpr::AO__opencl_atomic_fetch_and:
563 case AtomicExpr::AO__scoped_atomic_fetch_and:
564
565 case AtomicExpr::AO__scoped_atomic_or_fetch:
566
567 case AtomicExpr::AO__hip_atomic_fetch_or:
568 case AtomicExpr::AO__opencl_atomic_fetch_or:
569 case AtomicExpr::AO__scoped_atomic_fetch_or:
570
571 case AtomicExpr::AO__scoped_atomic_xor_fetch:
572
573 case AtomicExpr::AO__hip_atomic_fetch_xor:
574 case AtomicExpr::AO__opencl_atomic_fetch_xor:
575 case AtomicExpr::AO__scoped_atomic_fetch_xor:
576
577 case AtomicExpr::AO__scoped_atomic_nand_fetch:
578
579 case AtomicExpr::AO__scoped_atomic_fetch_nand:
580 cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: expr op NYI");
581 return;
582 }
583
584 assert(!opName.empty() && "expected operation name to build");
585 mlir::Value loadVal1 = builder.createLoad(loc, val1);
586
587 SmallVector<mlir::Value> atomicOperands = {ptr.getPointer(), loadVal1};
588 SmallVector<mlir::Type> atomicResTys = {loadVal1.getType()};
589 mlir::Operation *rmwOp = builder.create(loc, builder.getStringAttr(opName),
590 atomicOperands, atomicResTys);
591
592 if (fetchAttr)
593 rmwOp->setAttr("binop", fetchAttr);
594 rmwOp->setAttr("mem_order", orderAttr);
595 if (expr->isVolatile())
596 rmwOp->setAttr("is_volatile", builder.getUnitAttr());
597 if (fetchFirst && opName == cir::AtomicFetchOp::getOperationName())
598 rmwOp->setAttr("fetch_first", builder.getUnitAttr());
599
600 mlir::Value result = rmwOp->getResult(0);
601 builder.createStore(loc, result, dest);
602}
603
604static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad) {
606 return false;
607 auto memOrder = static_cast<cir::MemOrder>(order);
608 if (isStore)
609 return memOrder != cir::MemOrder::Consume &&
610 memOrder != cir::MemOrder::Acquire &&
611 memOrder != cir::MemOrder::AcquireRelease;
612 if (isLoad)
613 return memOrder != cir::MemOrder::Release &&
614 memOrder != cir::MemOrder::AcquireRelease;
615 return true;
616}
617
619 QualType atomicTy = e->getPtr()->getType()->getPointeeType();
620 QualType memTy = atomicTy;
621 if (const auto *ty = atomicTy->getAs<AtomicType>())
622 memTy = ty->getValueType();
623
624 Expr *isWeakExpr = nullptr;
625 Expr *orderFailExpr = nullptr;
626
627 Address val1 = Address::invalid();
628 Address val2 = Address::invalid();
629 Address dest = Address::invalid();
631
633 if (e->getOp() == AtomicExpr::AO__c11_atomic_init) {
634 LValue lvalue = makeAddrLValue(ptr, atomicTy);
635 emitAtomicInit(e->getVal1(), lvalue);
636 return RValue::get(nullptr);
637 }
638
639 TypeInfoChars typeInfo = getContext().getTypeInfoInChars(atomicTy);
640 uint64_t size = typeInfo.Width.getQuantity();
641
642 Expr::EvalResult orderConst;
643 mlir::Value order;
644 if (!e->getOrder()->EvaluateAsInt(orderConst, getContext()))
645 order = emitScalarExpr(e->getOrder());
646
647 bool shouldCastToIntPtrTy = true;
648
649 switch (e->getOp()) {
650 default:
651 cgm.errorNYI(e->getSourceRange(), "atomic op NYI");
652 return RValue::get(nullptr);
653
654 case AtomicExpr::AO__c11_atomic_init:
655 llvm_unreachable("already handled above with emitAtomicInit");
656
657 case AtomicExpr::AO__atomic_load_n:
658 case AtomicExpr::AO__c11_atomic_load:
659 case AtomicExpr::AO__atomic_test_and_set:
660 case AtomicExpr::AO__atomic_clear:
661 break;
662
663 case AtomicExpr::AO__atomic_load:
665 break;
666
667 case AtomicExpr::AO__atomic_store:
669 break;
670
671 case AtomicExpr::AO__atomic_exchange:
674 break;
675
676 case AtomicExpr::AO__atomic_compare_exchange:
677 case AtomicExpr::AO__atomic_compare_exchange_n:
678 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
679 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
681 if (e->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
682 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
684 else
685 val2 = emitValToTemp(*this, e->getVal2());
686 orderFailExpr = e->getOrderFail();
687 if (e->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
688 e->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
689 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
690 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
691 isWeakExpr = e->getWeak();
692 break;
693
694 case AtomicExpr::AO__c11_atomic_fetch_add:
695 case AtomicExpr::AO__c11_atomic_fetch_sub:
696 if (memTy->isPointerType()) {
697 cgm.errorNYI(e->getSourceRange(),
698 "atomic fetch-and-add and fetch-and-sub for pointers");
699 return RValue::get(nullptr);
700 }
701 [[fallthrough]];
702 case AtomicExpr::AO__atomic_fetch_add:
703 case AtomicExpr::AO__atomic_fetch_max:
704 case AtomicExpr::AO__atomic_fetch_min:
705 case AtomicExpr::AO__atomic_fetch_sub:
706 case AtomicExpr::AO__atomic_add_fetch:
707 case AtomicExpr::AO__atomic_max_fetch:
708 case AtomicExpr::AO__atomic_min_fetch:
709 case AtomicExpr::AO__atomic_sub_fetch:
710 case AtomicExpr::AO__c11_atomic_fetch_max:
711 case AtomicExpr::AO__c11_atomic_fetch_min:
712 shouldCastToIntPtrTy = !memTy->isFloatingType();
713 [[fallthrough]];
714
715 case AtomicExpr::AO__atomic_fetch_and:
716 case AtomicExpr::AO__atomic_fetch_nand:
717 case AtomicExpr::AO__atomic_fetch_or:
718 case AtomicExpr::AO__atomic_fetch_xor:
719 case AtomicExpr::AO__atomic_and_fetch:
720 case AtomicExpr::AO__atomic_nand_fetch:
721 case AtomicExpr::AO__atomic_or_fetch:
722 case AtomicExpr::AO__atomic_xor_fetch:
723 case AtomicExpr::AO__atomic_exchange_n:
724 case AtomicExpr::AO__atomic_store_n:
725 case AtomicExpr::AO__c11_atomic_fetch_and:
726 case AtomicExpr::AO__c11_atomic_fetch_nand:
727 case AtomicExpr::AO__c11_atomic_fetch_or:
728 case AtomicExpr::AO__c11_atomic_fetch_xor:
729 case AtomicExpr::AO__c11_atomic_exchange:
730 case AtomicExpr::AO__c11_atomic_store:
731 val1 = emitValToTemp(*this, e->getVal1());
732 break;
733 }
734
735 QualType resultTy = e->getType().getUnqualifiedType();
736
737 // The inlined atomics only function on iN types, where N is a power of 2. We
738 // need to make sure (via temporaries if necessary) that all incoming values
739 // are compatible.
740 LValue atomicValue = makeAddrLValue(ptr, atomicTy);
741 AtomicInfo atomics(*this, atomicValue, getLoc(e->getSourceRange()));
742
743 if (shouldCastToIntPtrTy) {
744 ptr = atomics.castToAtomicIntPointer(ptr);
745 if (val1.isValid())
746 val1 = atomics.convertToAtomicIntPointer(val1);
747 }
748 if (dest.isValid()) {
749 if (shouldCastToIntPtrTy)
750 dest = atomics.castToAtomicIntPointer(dest);
751 } else if (e->isCmpXChg()) {
752 dest = createMemTemp(resultTy, getLoc(e->getSourceRange()), "cmpxchg.bool");
753 } else if (e->getOp() == AtomicExpr::AO__atomic_test_and_set) {
754 dest = createMemTemp(resultTy, getLoc(e->getSourceRange()),
755 "test_and_set.bool");
756 } else if (!resultTy->isVoidType()) {
757 dest = atomics.createTempAlloca();
758 if (shouldCastToIntPtrTy)
759 dest = atomics.castToAtomicIntPointer(dest);
760 }
761
762 bool powerOf2Size = (size & (size - 1)) == 0;
763 bool useLibCall = !powerOf2Size || (size > 16);
764
765 // For atomics larger than 16 bytes, emit a libcall from the frontend. This
766 // avoids the overhead of dealing with excessively-large value types in IR.
767 // Non-power-of-2 values also lower to libcall here, as they are not currently
768 // permitted in IR instructions (although that constraint could be relaxed in
769 // the future). For other cases where a libcall is required on a given
770 // platform, we let the backend handle it (this includes handling for all of
771 // the size-optimized libcall variants, which are only valid up to 16 bytes.)
772 //
773 // See: https://llvm.org/docs/Atomics.html#libcalls-atomic
774 if (useLibCall) {
776 cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: emit atomic lib call");
777 return RValue::get(nullptr);
778 }
779
780 bool isStore = e->getOp() == AtomicExpr::AO__c11_atomic_store ||
781 e->getOp() == AtomicExpr::AO__opencl_atomic_store ||
782 e->getOp() == AtomicExpr::AO__hip_atomic_store ||
783 e->getOp() == AtomicExpr::AO__atomic_store ||
784 e->getOp() == AtomicExpr::AO__atomic_store_n ||
785 e->getOp() == AtomicExpr::AO__scoped_atomic_store ||
786 e->getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
787 e->getOp() == AtomicExpr::AO__atomic_clear;
788 bool isLoad = e->getOp() == AtomicExpr::AO__c11_atomic_load ||
789 e->getOp() == AtomicExpr::AO__opencl_atomic_load ||
790 e->getOp() == AtomicExpr::AO__hip_atomic_load ||
791 e->getOp() == AtomicExpr::AO__atomic_load ||
792 e->getOp() == AtomicExpr::AO__atomic_load_n ||
793 e->getOp() == AtomicExpr::AO__scoped_atomic_load ||
794 e->getOp() == AtomicExpr::AO__scoped_atomic_load_n;
795
796 if (!order) {
797 // We have evaluated the memory order as an integer constant in orderConst.
798 // We should not ever get to a case where the ordering isn't a valid CABI
799 // value, but it's hard to enforce that in general.
800 uint64_t ord = orderConst.Val.getInt().getZExtValue();
801 if (isMemOrderValid(ord, isStore, isLoad))
802 emitAtomicOp(*this, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
803 size, static_cast<cir::MemOrder>(ord));
804 } else {
806 cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: dynamic memory order");
807 return RValue::get(nullptr);
808 }
809
810 if (resultTy->isVoidType())
811 return RValue::get(nullptr);
812
813 return convertTempToRValue(
814 dest.withElementType(builder, convertTypeForMem(resultTy)), resultTy,
815 e->getExprLoc());
816}
817
819 AtomicInfo atomics(*this, dest, getLoc(init->getSourceRange()));
820
821 switch (atomics.getEvaluationKind()) {
822 case cir::TEK_Scalar: {
823 mlir::Value value = emitScalarExpr(init);
824 atomics.emitCopyIntoMemory(RValue::get(value));
825 return;
826 }
827
828 case cir::TEK_Complex: {
829 mlir::Value value = emitComplexExpr(init);
830 atomics.emitCopyIntoMemory(RValue::get(value));
831 return;
832 }
833
834 case cir::TEK_Aggregate: {
835 // Fix up the destination if the initializer isn't an expression
836 // of atomic type.
837 bool zeroed = false;
838 if (!init->getType()->isAtomicType()) {
839 zeroed = atomics.emitMemSetZeroIfNecessary();
840 dest = atomics.projectValue();
841 }
842
843 // Evaluate the expression directly into the destination.
849
850 emitAggExpr(init, slot);
851 return;
852 }
853 }
854
855 llvm_unreachable("bad evaluation kind");
856}
static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, uint64_t size, cir::MemOrder successOrder, cir::MemOrder failureOrder)
static Address emitValToTemp(CIRGenFunction &cgf, Expr *e)
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size, cir::MemOrder order)
static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad)
static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, Expr *failureOrderExpr, uint64_t size, cir::MemOrder successOrder)
static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
mlir::Value createNot(mlir::Value value)
mlir::Value createPtrBitcast(mlir::Value src, mlir::Type newPointeeTy)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
cir::BoolType getBoolTy()
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
llvm::TypeSize getTypeStoreSize(mlir::Type ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
APSInt & getInt()
Definition APValue.h:489
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition Expr.h:6814
Expr * getVal2() const
Definition Expr.h:6865
Expr * getOrder() const
Definition Expr.h:6848
bool isCmpXChg() const
Definition Expr.h:6898
AtomicOp getOp() const
Definition Expr.h:6877
Expr * getVal1() const
Definition Expr.h:6855
Expr * getPtr() const
Definition Expr.h:6845
Expr * getWeak() const
Definition Expr.h:6871
Expr * getOrderFail() const
Definition Expr.h:6861
bool isVolatile() const
Definition Expr.h:6894
Address withPointer(mlir::Value newPtr) const
Return address with different pointer, but same element type and alignment.
Definition Address.h:74
mlir::Value getPointer() const
Definition Address.h:82
mlir::Type getElementType() const
Definition Address.h:109
static Address invalid()
Definition Address.h:67
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
Definition Address.h:117
bool isValid() const
Definition Address.h:68
An aggregate value slot.
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
cir::IntType getUIntNTy(int n)
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile=false, mlir::IntegerAttr align={}, cir::MemOrderAttr order={})
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, bool isInit=false, bool isNontemporal=false)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitAtomicExpr(AtomicExpr *e)
mlir::Type convertTypeForMem(QualType t)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
void emitAtomicInit(Expr *init, LValue dest)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
clang::ASTContext & getContext() const
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const cir::CIRDataLayout getDataLayout() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
bool isAggregate() const
Definition CIRGenValue.h:51
static RValue get(mlir::Value v)
Definition CIRGenValue.h:82
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:56
bool isScalar() const
Definition CIRGenValue.h:49
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsBooleanCondition - Return true if this is a constant which we can fold and convert to a boo...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
A (possibly-)qualified type.
Definition TypeBase.h:937
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8318
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition TypeBase.h:8372
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
bool isVoidType() const
Definition TypeBase.h:8871
bool isPointerType() const
Definition TypeBase.h:8515
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isAtomicType() const
Definition TypeBase.h:8697
bool isFloatingType() const
Definition Type.cpp:2304
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9091
bool isValidCIRAtomicOrderingCABI(Int value)
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
unsigned long uint64_t
static bool atomicInfoGetAtomicPointer()
static bool aggValueSlotGC()
static bool atomicScope()
static bool atomicUseLibCall()
static bool atomicSyncScopeID()
static bool atomicInfoGetAtomicAddress()
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647