clang 23.0.0git
CIRGenAtomic.cpp
Go to the documentation of this file.
1//===--- CIRGenAtomic.cpp - Emit CIR for atomic operations ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the code for emitting atomic operations.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
15
16using namespace clang;
17using namespace clang::CIRGen;
18using namespace cir;
19
20namespace {
21class AtomicInfo {
22 CIRGenFunction &cgf;
23 QualType atomicTy;
24 QualType valueTy;
25 uint64_t atomicSizeInBits = 0;
26 uint64_t valueSizeInBits = 0;
27 CharUnits atomicAlign;
28 CharUnits valueAlign;
29 TypeEvaluationKind evaluationKind = cir::TEK_Scalar;
30 bool useLibCall = true;
31 LValue lvalue;
32 mlir::Location loc;
33
34public:
35 AtomicInfo(CIRGenFunction &cgf, LValue &lvalue, mlir::Location loc)
36 : cgf(cgf), loc(loc) {
37 assert(!lvalue.isGlobalReg());
38 ASTContext &ctx = cgf.getContext();
39 if (lvalue.isSimple()) {
40 atomicTy = lvalue.getType();
41 if (auto *ty = atomicTy->getAs<AtomicType>())
42 valueTy = ty->getValueType();
43 else
44 valueTy = atomicTy;
45 evaluationKind = cgf.getEvaluationKind(valueTy);
46
47 TypeInfo valueTypeInfo = ctx.getTypeInfo(valueTy);
48 TypeInfo atomicTypeInfo = ctx.getTypeInfo(atomicTy);
49 uint64_t valueAlignInBits = valueTypeInfo.Align;
50 uint64_t atomicAlignInBits = atomicTypeInfo.Align;
51 valueSizeInBits = valueTypeInfo.Width;
52 atomicSizeInBits = atomicTypeInfo.Width;
53 assert(valueSizeInBits <= atomicSizeInBits);
54 assert(valueAlignInBits <= atomicAlignInBits);
55
56 atomicAlign = ctx.toCharUnitsFromBits(atomicAlignInBits);
57 valueAlign = ctx.toCharUnitsFromBits(valueAlignInBits);
58 if (lvalue.getAlignment().isZero())
59 lvalue.setAlignment(atomicAlign);
60
61 this->lvalue = lvalue;
62 } else {
64 cgf.cgm.errorNYI(loc, "AtomicInfo: non-simple lvalue");
65 }
66 useLibCall = !ctx.getTargetInfo().hasBuiltinAtomic(
67 atomicSizeInBits, ctx.toBits(lvalue.getAlignment()));
68 }
69
70 QualType getValueType() const { return valueTy; }
71 CharUnits getAtomicAlignment() const { return atomicAlign; }
72 TypeEvaluationKind getEvaluationKind() const { return evaluationKind; }
73 mlir::Value getAtomicPointer() const {
74 if (lvalue.isSimple())
75 return lvalue.getPointer();
77 return nullptr;
78 }
79 bool shouldUseLibCall() const { return useLibCall; }
80 const LValue &getAtomicLValue() const { return lvalue; }
81 Address getAtomicAddress() const {
82 mlir::Type elemTy;
83 if (lvalue.isSimple()) {
84 elemTy = lvalue.getAddress().getElementType();
85 } else {
87 cgf.cgm.errorNYI(loc, "AtomicInfo::getAtomicAddress: non-simple lvalue");
88 }
89 return Address(getAtomicPointer(), elemTy, getAtomicAlignment());
90 }
91
92 /// Is the atomic size larger than the underlying value type?
93 ///
94 /// Note that the absence of padding does not mean that atomic
95 /// objects are completely interchangeable with non-atomic
96 /// objects: we might have promoted the alignment of a type
97 /// without making it bigger.
98 bool hasPadding() const { return (valueSizeInBits != atomicSizeInBits); }
99
100 bool emitMemSetZeroIfNecessary() const;
101
102 mlir::Value getScalarRValValueOrNull(RValue rvalue) const;
103
104 /// Cast the given pointer to an integer pointer suitable for atomic
105 /// operations on the source.
106 Address castToAtomicIntPointer(Address addr) const;
107
108 /// If addr is compatible with the iN that will be used for an atomic
109 /// operation, bitcast it. Otherwise, create a temporary that is suitable and
110 /// copy the value across.
111 Address convertToAtomicIntPointer(Address addr) const;
112
113 /// Converts a rvalue to integer value.
114 mlir::Value convertRValueToInt(RValue rvalue, bool cmpxchg = false) const;
115
116 /// Copy an atomic r-value into atomic-layout memory.
117 void emitCopyIntoMemory(RValue rvalue) const;
118
119 /// Project an l-value down to the value field.
120 LValue projectValue() const {
121 assert(lvalue.isSimple());
122 Address addr = getAtomicAddress();
123 if (hasPadding()) {
124 cgf.cgm.errorNYI(loc, "AtomicInfo::projectValue: padding");
125 }
126
128 return LValue::makeAddr(addr, getValueType(), lvalue.getBaseInfo());
129 }
130
131 /// Creates temp alloca for intermediate operations on atomic value.
132 Address createTempAlloca() const;
133
134private:
135 bool requiresMemSetZero(mlir::Type ty) const;
136};
137} // namespace
138
139// This function emits any expression (scalar, complex, or aggregate)
140// into a temporary alloca.
142 Address declPtr = cgf.createMemTemp(
143 e->getType(), cgf.getLoc(e->getSourceRange()), ".atomictmp");
144 cgf.emitAnyExprToMem(e, declPtr, e->getType().getQualifiers(),
145 /*Init*/ true);
146 return declPtr;
147}
148
149/// Does a store of the given IR type modify the full expected width?
150static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty,
151 uint64_t expectedSize) {
152 return cgm.getDataLayout().getTypeStoreSize(ty) * 8 == expectedSize;
153}
154
155/// Does the atomic type require memsetting to zero before initialization?
156///
157/// The IR type is provided as a way of making certain queries faster.
158bool AtomicInfo::requiresMemSetZero(mlir::Type ty) const {
159 // If the atomic type has size padding, we definitely need a memset.
160 if (hasPadding())
161 return true;
162
163 // Otherwise, do some simple heuristics to try to avoid it:
164 switch (getEvaluationKind()) {
165 // For scalars and complexes, check whether the store size of the
166 // type uses the full size.
167 case cir::TEK_Scalar:
168 return !isFullSizeType(cgf.cgm, ty, atomicSizeInBits);
169 case cir::TEK_Complex:
170 return !isFullSizeType(cgf.cgm,
171 mlir::cast<cir::ComplexType>(ty).getElementType(),
172 atomicSizeInBits / 2);
173 // Padding in structs has an undefined bit pattern. User beware.
175 return false;
176 }
177 llvm_unreachable("bad evaluation kind");
178}
179
180Address AtomicInfo::convertToAtomicIntPointer(Address addr) const {
181 mlir::Type ty = addr.getElementType();
182 uint64_t sourceSizeInBits = cgf.cgm.getDataLayout().getTypeSizeInBits(ty);
183 if (sourceSizeInBits != atomicSizeInBits) {
184 cgf.cgm.errorNYI(
185 loc,
186 "AtomicInfo::convertToAtomicIntPointer: convert through temp alloca");
187 }
188
189 return castToAtomicIntPointer(addr);
190}
191
192Address AtomicInfo::createTempAlloca() const {
193 Address tempAlloca = cgf.createMemTemp(
194 (lvalue.isBitField() && valueSizeInBits > atomicSizeInBits) ? valueTy
195 : atomicTy,
196 getAtomicAlignment(), loc, "atomic-temp");
197
198 // Cast to pointer to value type for bitfields.
199 if (lvalue.isBitField()) {
200 cgf.cgm.errorNYI(loc, "AtomicInfo::createTempAlloca: bitfield lvalue");
201 }
202
203 return tempAlloca;
204}
205
206mlir::Value AtomicInfo::getScalarRValValueOrNull(RValue rvalue) const {
207 if (rvalue.isScalar() && (!hasPadding() || !lvalue.isSimple()))
208 return rvalue.getValue();
209 return nullptr;
210}
211
212Address AtomicInfo::castToAtomicIntPointer(Address addr) const {
213 auto intTy = mlir::dyn_cast<cir::IntType>(addr.getElementType());
214 // Don't bother with int casts if the integer size is the same.
215 if (intTy && intTy.getWidth() == atomicSizeInBits)
216 return addr;
217 auto ty = cgf.getBuilder().getUIntNTy(atomicSizeInBits);
218 return addr.withElementType(cgf.getBuilder(), ty);
219}
220
221bool AtomicInfo::emitMemSetZeroIfNecessary() const {
222 assert(lvalue.isSimple());
223 Address addr = lvalue.getAddress();
224 if (!requiresMemSetZero(addr.getElementType()))
225 return false;
226
227 cgf.cgm.errorNYI(loc,
228 "AtomicInfo::emitMemSetZeroIfNecaessary: emit memset zero");
229 return false;
230}
231
232/// Return true if \param valueTy is a type that should be casted to integer
233/// around the atomic memory operation. If \param cmpxchg is true, then the
234/// cast of a floating point type is made as that instruction can not have
235/// floating point operands. TODO: Allow compare-and-exchange and FP - see
236/// comment in CIRGenAtomicExpandPass.cpp.
237static bool shouldCastToInt(mlir::Type valueTy, bool cmpxchg) {
238 if (cir::isAnyFloatingPointType(valueTy))
239 return isa<cir::FP80Type>(valueTy) || cmpxchg;
240 return !isa<cir::IntType>(valueTy) && !isa<cir::PointerType>(valueTy);
241}
242
243mlir::Value AtomicInfo::convertRValueToInt(RValue rvalue, bool cmpxchg) const {
244 // If we've got a scalar value of the right size, try to avoid going
245 // through memory. Floats get casted if needed by AtomicExpandPass.
246 if (mlir::Value value = getScalarRValValueOrNull(rvalue)) {
247 if (!shouldCastToInt(value.getType(), cmpxchg))
248 return cgf.emitToMemory(value, valueTy);
249
250 cgf.cgm.errorNYI(
251 loc, "AtomicInfo::convertRValueToInt: cast scalar rvalue to int");
252 return nullptr;
253 }
254
255 cgf.cgm.errorNYI(
256 loc, "AtomicInfo::convertRValueToInt: cast non-scalar rvalue to int");
257 return nullptr;
258}
259
260/// Copy an r-value into memory as part of storing to an atomic type.
261/// This needs to create a bit-pattern suitable for atomic operations.
262void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
263 assert(lvalue.isSimple());
264
265 // If we have an r-value, the rvalue should be of the atomic type,
266 // which means that the caller is responsible for having zeroed
267 // any padding. Just do an aggregate copy of that type.
268 if (rvalue.isAggregate()) {
269 cgf.cgm.errorNYI("copying aggregate into atomic lvalue");
270 return;
271 }
272
273 // Okay, otherwise we're copying stuff.
274
275 // Zero out the buffer if necessary.
276 emitMemSetZeroIfNecessary();
277
278 // Drill past the padding if present.
279 LValue tempLValue = projectValue();
280
281 // Okay, store the rvalue in.
282 if (rvalue.isScalar()) {
283 cgf.emitStoreOfScalar(rvalue.getValue(), tempLValue, /*isInit=*/true);
284 } else {
285 cgf.cgm.errorNYI("copying complex into atomic lvalue");
286 }
287}
288
290 mlir::Location loc) {
291 mlir::ArrayAttr ordersAttr = builder.getArrayAttr({});
292 mlir::OpBuilder::InsertPoint insertPoint;
293 cir::CaseOp::create(builder, loc, ordersAttr, cir::CaseOpKind::Default,
294 insertPoint);
295 builder.restoreInsertionPoint(insertPoint);
296}
297
298// Create a "case" operation with the given list of orders as its values. Also
299// create the region that will hold the body of the switch-case label.
300static void emitMemOrderCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc,
301 mlir::Type orderType,
304 for (cir::MemOrder order : orders)
305 orderAttrs.push_back(cir::IntAttr::get(orderType, static_cast<int>(order)));
306 mlir::ArrayAttr ordersAttr = builder.getArrayAttr(orderAttrs);
307
308 mlir::OpBuilder::InsertPoint insertPoint;
309 cir::CaseOp::create(builder, loc, ordersAttr, cir::CaseOpKind::Anyof,
310 insertPoint);
311 builder.restoreInsertionPoint(insertPoint);
312}
313
314static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak,
315 Address dest, Address ptr, Address val1,
316 Address val2, uint64_t size,
317 cir::MemOrder successOrder,
318 cir::MemOrder failureOrder) {
319 mlir::Location loc = cgf.getLoc(e->getSourceRange());
320
321 CIRGenBuilderTy &builder = cgf.getBuilder();
322 mlir::Value expected = builder.createLoad(loc, val1);
323 mlir::Value desired = builder.createLoad(loc, val2);
324
325 auto cmpxchg = cir::AtomicCmpXchgOp::create(
326 builder, loc, expected.getType(), builder.getBoolTy(), ptr.getPointer(),
327 expected, desired,
328 cir::MemOrderAttr::get(&cgf.getMLIRContext(), successOrder),
329 cir::MemOrderAttr::get(&cgf.getMLIRContext(), failureOrder),
330 builder.getI64IntegerAttr(ptr.getAlignment().getAsAlign().value()));
331
332 cmpxchg.setIsVolatile(e->isVolatile());
333 cmpxchg.setWeak(isWeak);
334
335 mlir::Value failed = builder.createNot(cmpxchg.getSuccess());
336 cir::IfOp::create(builder, loc, failed, /*withElseRegion=*/false,
337 [&](mlir::OpBuilder &, mlir::Location) {
338 auto ptrTy = mlir::cast<cir::PointerType>(
339 val1.getPointer().getType());
340 if (val1.getElementType() != ptrTy.getPointee()) {
341 val1 = val1.withPointer(builder.createPtrBitcast(
342 val1.getPointer(), val1.getElementType()));
343 }
344 builder.createStore(loc, cmpxchg.getOld(), val1);
345 builder.createYield(loc);
346 });
347
348 // Update the memory at Dest with Success's value.
349 cgf.emitStoreOfScalar(cmpxchg.getSuccess(),
350 cgf.makeAddrLValue(dest, e->getType()),
351 /*isInit=*/false);
352}
353
355 bool isWeak, Address dest, Address ptr,
356 Address val1, Address val2,
357 Expr *failureOrderExpr, uint64_t size,
358 cir::MemOrder successOrder) {
359 Expr::EvalResult failureOrderEval;
360 if (failureOrderExpr->EvaluateAsInt(failureOrderEval, cgf.getContext())) {
361 uint64_t failureOrderInt = failureOrderEval.Val.getInt().getZExtValue();
362
363 cir::MemOrder failureOrder;
364 if (!cir::isValidCIRAtomicOrderingCABI(failureOrderInt)) {
365 failureOrder = cir::MemOrder::Relaxed;
366 } else {
367 switch ((cir::MemOrder)failureOrderInt) {
368 case cir::MemOrder::Relaxed:
369 // 31.7.2.18: "The failure argument shall not be memory_order_release
370 // nor memory_order_acq_rel". Fallback to monotonic.
371 case cir::MemOrder::Release:
372 case cir::MemOrder::AcquireRelease:
373 failureOrder = cir::MemOrder::Relaxed;
374 break;
375 case cir::MemOrder::Consume:
376 case cir::MemOrder::Acquire:
377 failureOrder = cir::MemOrder::Acquire;
378 break;
379 case cir::MemOrder::SequentiallyConsistent:
380 failureOrder = cir::MemOrder::SequentiallyConsistent;
381 break;
382 }
383 }
384
385 // Prior to c++17, "the failure argument shall be no stronger than the
386 // success argument". This condition has been lifted and the only
387 // precondition is 31.7.2.18. Effectively treat this as a DR and skip
388 // language version checks.
389 emitAtomicCmpXchg(cgf, e, isWeak, dest, ptr, val1, val2, size, successOrder,
390 failureOrder);
391 return;
392 }
393
395 cgf.cgm.errorNYI(e->getSourceRange(),
396 "emitAtomicCmpXchgFailureSet: non-constant failure order");
397}
398
400 Address ptr, Address val1, Address val2,
401 Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size,
402 cir::MemOrder order, cir::SyncScopeKind scope) {
404 llvm::StringRef opName;
405
406 CIRGenBuilderTy &builder = cgf.getBuilder();
407 mlir::Location loc = cgf.getLoc(expr->getSourceRange());
408 auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
409 auto scopeAttr = cir::SyncScopeKindAttr::get(builder.getContext(), scope);
410 cir::AtomicFetchKindAttr fetchAttr;
411 bool fetchFirst = true;
412
413 switch (expr->getOp()) {
414 case AtomicExpr::AO__c11_atomic_init:
415 llvm_unreachable("already handled!");
416
417 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
418 emitAtomicCmpXchgFailureSet(cgf, expr, /*isWeak=*/false, dest, ptr, val1,
419 val2, failureOrderExpr, size, order);
420 return;
421
422 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
423 emitAtomicCmpXchgFailureSet(cgf, expr, /*isWeak=*/true, dest, ptr, val1,
424 val2, failureOrderExpr, size, order);
425 return;
426
427 case AtomicExpr::AO__atomic_compare_exchange:
428 case AtomicExpr::AO__atomic_compare_exchange_n: {
429 bool isWeak = false;
430 if (isWeakExpr->EvaluateAsBooleanCondition(isWeak, cgf.getContext())) {
431 emitAtomicCmpXchgFailureSet(cgf, expr, isWeak, dest, ptr, val1, val2,
432 failureOrderExpr, size, order);
433 } else {
435 cgf.cgm.errorNYI(expr->getSourceRange(),
436 "emitAtomicOp: non-constant isWeak");
437 }
438 return;
439 }
440
441 case AtomicExpr::AO__c11_atomic_load:
442 case AtomicExpr::AO__atomic_load_n:
443 case AtomicExpr::AO__atomic_load:
444 case AtomicExpr::AO__scoped_atomic_load_n:
445 case AtomicExpr::AO__scoped_atomic_load: {
446 cir::LoadOp load =
447 builder.createLoad(loc, ptr, /*isVolatile=*/expr->isVolatile());
448
449 load->setAttr("mem_order", orderAttr);
450 load->setAttr("sync_scope", scopeAttr);
451
452 builder.createStore(loc, load->getResult(0), dest);
453 return;
454 }
455
456 case AtomicExpr::AO__c11_atomic_store:
457 case AtomicExpr::AO__atomic_store_n:
458 case AtomicExpr::AO__atomic_store:
459 case AtomicExpr::AO__scoped_atomic_store:
460 case AtomicExpr::AO__scoped_atomic_store_n: {
461 cir::LoadOp loadVal1 = builder.createLoad(loc, val1);
462
464
465 builder.createStore(loc, loadVal1, ptr, expr->isVolatile(),
466 /*align=*/mlir::IntegerAttr{}, scopeAttr, orderAttr);
467 return;
468 }
469
470 case AtomicExpr::AO__c11_atomic_exchange:
471 case AtomicExpr::AO__atomic_exchange_n:
472 case AtomicExpr::AO__atomic_exchange:
473 opName = cir::AtomicXchgOp::getOperationName();
474 break;
475
476 case AtomicExpr::AO__atomic_add_fetch:
477 fetchFirst = false;
478 [[fallthrough]];
479 case AtomicExpr::AO__c11_atomic_fetch_add:
480 case AtomicExpr::AO__atomic_fetch_add:
481 opName = cir::AtomicFetchOp::getOperationName();
482 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
483 cir::AtomicFetchKind::Add);
484 break;
485
486 case AtomicExpr::AO__atomic_sub_fetch:
487 fetchFirst = false;
488 [[fallthrough]];
489 case AtomicExpr::AO__c11_atomic_fetch_sub:
490 case AtomicExpr::AO__atomic_fetch_sub:
491 opName = cir::AtomicFetchOp::getOperationName();
492 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
493 cir::AtomicFetchKind::Sub);
494 break;
495
496 case AtomicExpr::AO__atomic_min_fetch:
497 fetchFirst = false;
498 [[fallthrough]];
499 case AtomicExpr::AO__c11_atomic_fetch_min:
500 case AtomicExpr::AO__atomic_fetch_min:
501 opName = cir::AtomicFetchOp::getOperationName();
502 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
503 cir::AtomicFetchKind::Min);
504 break;
505
506 case AtomicExpr::AO__atomic_max_fetch:
507 fetchFirst = false;
508 [[fallthrough]];
509 case AtomicExpr::AO__c11_atomic_fetch_max:
510 case AtomicExpr::AO__atomic_fetch_max:
511 opName = cir::AtomicFetchOp::getOperationName();
512 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
513 cir::AtomicFetchKind::Max);
514 break;
515
516 case AtomicExpr::AO__atomic_and_fetch:
517 fetchFirst = false;
518 [[fallthrough]];
519 case AtomicExpr::AO__c11_atomic_fetch_and:
520 case AtomicExpr::AO__atomic_fetch_and:
521 opName = cir::AtomicFetchOp::getOperationName();
522 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
523 cir::AtomicFetchKind::And);
524 break;
525
526 case AtomicExpr::AO__atomic_or_fetch:
527 fetchFirst = false;
528 [[fallthrough]];
529 case AtomicExpr::AO__c11_atomic_fetch_or:
530 case AtomicExpr::AO__atomic_fetch_or:
531 opName = cir::AtomicFetchOp::getOperationName();
532 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
533 cir::AtomicFetchKind::Or);
534 break;
535
536 case AtomicExpr::AO__atomic_xor_fetch:
537 fetchFirst = false;
538 [[fallthrough]];
539 case AtomicExpr::AO__c11_atomic_fetch_xor:
540 case AtomicExpr::AO__atomic_fetch_xor:
541 opName = cir::AtomicFetchOp::getOperationName();
542 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
543 cir::AtomicFetchKind::Xor);
544 break;
545
546 case AtomicExpr::AO__atomic_nand_fetch:
547 fetchFirst = false;
548 [[fallthrough]];
549 case AtomicExpr::AO__c11_atomic_fetch_nand:
550 case AtomicExpr::AO__atomic_fetch_nand:
551 opName = cir::AtomicFetchOp::getOperationName();
552 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
553 cir::AtomicFetchKind::Nand);
554 break;
555
556 case AtomicExpr::AO__atomic_test_and_set: {
557 auto op = cir::AtomicTestAndSetOp::create(
558 builder, loc, ptr.getPointer(), order,
559 builder.getI64IntegerAttr(ptr.getAlignment().getQuantity()),
560 expr->isVolatile());
561 builder.createStore(loc, op, dest);
562 return;
563 }
564
565 case AtomicExpr::AO__atomic_clear: {
566 cir::AtomicClearOp::create(
567 builder, loc, ptr.getPointer(), order,
568 builder.getI64IntegerAttr(ptr.getAlignment().getQuantity()),
569 expr->isVolatile());
570 return;
571 }
572
573 case AtomicExpr::AO__opencl_atomic_init:
574
575 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
576 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
577
578 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
579 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
580
581 case AtomicExpr::AO__scoped_atomic_compare_exchange:
582 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
583
584 case AtomicExpr::AO__opencl_atomic_load:
585 case AtomicExpr::AO__hip_atomic_load:
586
587 case AtomicExpr::AO__opencl_atomic_store:
588 case AtomicExpr::AO__hip_atomic_store:
589
590 case AtomicExpr::AO__hip_atomic_exchange:
591 case AtomicExpr::AO__opencl_atomic_exchange:
592 case AtomicExpr::AO__scoped_atomic_exchange_n:
593 case AtomicExpr::AO__scoped_atomic_exchange:
594
595 case AtomicExpr::AO__scoped_atomic_add_fetch:
596
597 case AtomicExpr::AO__hip_atomic_fetch_add:
598 case AtomicExpr::AO__opencl_atomic_fetch_add:
599 case AtomicExpr::AO__scoped_atomic_fetch_add:
600
601 case AtomicExpr::AO__scoped_atomic_sub_fetch:
602
603 case AtomicExpr::AO__hip_atomic_fetch_sub:
604 case AtomicExpr::AO__opencl_atomic_fetch_sub:
605 case AtomicExpr::AO__scoped_atomic_fetch_sub:
606
607 case AtomicExpr::AO__scoped_atomic_min_fetch:
608
609 case AtomicExpr::AO__hip_atomic_fetch_min:
610 case AtomicExpr::AO__opencl_atomic_fetch_min:
611 case AtomicExpr::AO__scoped_atomic_fetch_min:
612
613 case AtomicExpr::AO__scoped_atomic_max_fetch:
614
615 case AtomicExpr::AO__hip_atomic_fetch_max:
616 case AtomicExpr::AO__opencl_atomic_fetch_max:
617 case AtomicExpr::AO__scoped_atomic_fetch_max:
618
619 case AtomicExpr::AO__scoped_atomic_and_fetch:
620
621 case AtomicExpr::AO__hip_atomic_fetch_and:
622 case AtomicExpr::AO__opencl_atomic_fetch_and:
623 case AtomicExpr::AO__scoped_atomic_fetch_and:
624
625 case AtomicExpr::AO__scoped_atomic_or_fetch:
626
627 case AtomicExpr::AO__hip_atomic_fetch_or:
628 case AtomicExpr::AO__opencl_atomic_fetch_or:
629 case AtomicExpr::AO__scoped_atomic_fetch_or:
630
631 case AtomicExpr::AO__scoped_atomic_xor_fetch:
632
633 case AtomicExpr::AO__hip_atomic_fetch_xor:
634 case AtomicExpr::AO__opencl_atomic_fetch_xor:
635 case AtomicExpr::AO__scoped_atomic_fetch_xor:
636
637 case AtomicExpr::AO__scoped_atomic_nand_fetch:
638
639 case AtomicExpr::AO__scoped_atomic_fetch_nand:
640
641 case AtomicExpr::AO__scoped_atomic_fetch_uinc:
642 case AtomicExpr::AO__scoped_atomic_fetch_udec:
643 case AtomicExpr::AO__atomic_fetch_uinc:
644 case AtomicExpr::AO__atomic_fetch_udec:
645 cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: expr op NYI");
646 return;
647 }
648
649 assert(!opName.empty() && "expected operation name to build");
650 mlir::Value loadVal1 = builder.createLoad(loc, val1);
651
652 SmallVector<mlir::Value> atomicOperands = {ptr.getPointer(), loadVal1};
653 SmallVector<mlir::Type> atomicResTys = {loadVal1.getType()};
654 mlir::Operation *rmwOp = builder.create(loc, builder.getStringAttr(opName),
655 atomicOperands, atomicResTys);
656
657 if (fetchAttr)
658 rmwOp->setAttr("binop", fetchAttr);
659 rmwOp->setAttr("mem_order", orderAttr);
660 if (expr->isVolatile())
661 rmwOp->setAttr("is_volatile", builder.getUnitAttr());
662 if (fetchFirst && opName == cir::AtomicFetchOp::getOperationName())
663 rmwOp->setAttr("fetch_first", builder.getUnitAttr());
664
665 mlir::Value result = rmwOp->getResult(0);
666 builder.createStore(loc, result, dest);
667}
668
669// Map clang sync scope to CIR sync scope.
670static cir::SyncScopeKind convertSyncScopeToCIR(CIRGenFunction &cgf,
671 SourceRange range,
672 clang::SyncScope scope) {
673 switch (scope) {
674 default: {
676 cgf.cgm.errorNYI(range, "convertSyncScopeToCIR: unhandled sync scope");
677 return cir::SyncScopeKind::System;
678 }
679
681 return cir::SyncScopeKind::SingleThread;
683 return cir::SyncScopeKind::System;
684 }
685}
686
688 Address ptr, Address val1, Address val2,
689 Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size,
690 cir::MemOrder order,
691 const std::optional<Expr::EvalResult> &scopeConst,
692 mlir::Value scopeValue) {
693 std::unique_ptr<AtomicScopeModel> scopeModel = expr->getScopeModel();
694
695 if (!scopeModel) {
696 emitAtomicOp(cgf, expr, dest, ptr, val1, val2, isWeakExpr, failureOrderExpr,
697 size, order, cir::SyncScopeKind::System);
698 return;
699 }
700
701 if (scopeConst.has_value()) {
702 cir::SyncScopeKind mappedScope = convertSyncScopeToCIR(
703 cgf, expr->getScope()->getSourceRange(),
704 scopeModel->map(scopeConst->Val.getInt().getZExtValue()));
705 emitAtomicOp(cgf, expr, dest, ptr, val1, val2, isWeakExpr, failureOrderExpr,
706 size, order, mappedScope);
707 return;
708 }
709
711 cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: dynamic sync scope");
712}
713
714static std::optional<cir::MemOrder>
715getEffectiveAtomicMemOrder(cir::MemOrder oriOrder, bool isStore, bool isLoad,
716 bool isFence) {
717 // Some memory orders are not supported by partial atomic operation:
718 // {memory_order_releaxed} is not valid for fence operations.
719 // {memory_order_consume, memory_order_acquire} are not valid for write-only
720 // operations.
721 // {memory_order_release} is not valid for read-only operations.
722 // {memory_order_acq_rel} is only valid for read-write operations.
723 if (isStore) {
724 if (oriOrder == cir::MemOrder::Consume ||
725 oriOrder == cir::MemOrder::Acquire ||
726 oriOrder == cir::MemOrder::AcquireRelease)
727 return std::nullopt;
728 } else if (isLoad) {
729 if (oriOrder == cir::MemOrder::Release ||
730 oriOrder == cir::MemOrder::AcquireRelease)
731 return std::nullopt;
732 } else if (isFence) {
733 if (oriOrder == cir::MemOrder::Relaxed)
734 return std::nullopt;
735 }
736 // memory_order_consume is not implemented, it is always treated like
737 // memory_order_acquire
738 if (oriOrder == cir::MemOrder::Consume)
739 return cir::MemOrder::Acquire;
740 return oriOrder;
741}
742
744 CIRGenFunction &cgf, mlir::Value order, bool isStore, bool isLoad,
745 bool isFence, llvm::function_ref<void(cir::MemOrder)> emitAtomicOpFn) {
746 if (!order)
747 return;
748 // The memory order is not known at compile-time. The atomic operations
749 // can't handle runtime memory orders; the memory order must be hard coded.
750 // Generate a "switch" statement that converts a runtime value into a
751 // compile-time value.
752 CIRGenBuilderTy &builder = cgf.getBuilder();
753 cir::SwitchOp::create(
754 builder, order.getLoc(), order,
755 [&](mlir::OpBuilder &, mlir::Location loc, mlir::OperationState &) {
756 mlir::Block *switchBlock = builder.getBlock();
757
758 auto emitMemOrderCase = [&](llvm::ArrayRef<cir::MemOrder> caseOrders) {
759 // Checking there are same effective memory order for each case.
760 for (int i = 1, e = caseOrders.size(); i < e; i++)
761 assert((getEffectiveAtomicMemOrder(caseOrders[i - 1], isStore,
762 isLoad, isFence) ==
763 getEffectiveAtomicMemOrder(caseOrders[i], isStore, isLoad,
764 isFence)) &&
765 "Effective memory order must be same!");
766 // Emit case label and atomic opeartion if neccessary.
767 if (caseOrders.empty()) {
768 emitMemOrderDefaultCaseLabel(builder, loc);
769 // There is no good way to report an unsupported memory order at
770 // runtime, hence the fallback to memory_order_relaxed.
771 if (!isFence)
772 emitAtomicOpFn(cir::MemOrder::Relaxed);
773 } else if (std::optional<cir::MemOrder> actualOrder =
774 getEffectiveAtomicMemOrder(caseOrders[0], isStore,
775 isLoad, isFence)) {
776 // Included in default case.
777 if (!isFence && actualOrder == cir::MemOrder::Relaxed)
778 return;
779 // Creating case operation for effective memory order. If there are
780 // multiple cases in `caseOrders`, the actual order of each case
781 // must be same, this needs to be guaranteed by the caller.
782 emitMemOrderCaseLabel(builder, loc, order.getType(), caseOrders);
783 emitAtomicOpFn(actualOrder.value());
784 } else {
785 // Do nothing if (!caseOrders.empty() && !actualOrder)
786 return;
787 }
788 builder.createBreak(loc);
789 builder.setInsertionPointToEnd(switchBlock);
790 };
791
792 emitMemOrderCase(/*default:*/ {});
793 emitMemOrderCase({cir::MemOrder::Relaxed});
794 emitMemOrderCase({cir::MemOrder::Consume, cir::MemOrder::Acquire});
795 emitMemOrderCase({cir::MemOrder::Release});
796 emitMemOrderCase({cir::MemOrder::AcquireRelease});
797 emitMemOrderCase({cir::MemOrder::SequentiallyConsistent});
798
799 builder.createYield(loc);
800 });
801}
802
804 const Expr *memOrder, bool isStore, bool isLoad, bool isFence,
805 llvm::function_ref<void(cir::MemOrder)> emitAtomicOpFn) {
806 // Emit the memory order operand, and try to evaluate it as a constant.
807 Expr::EvalResult eval;
808 if (memOrder->EvaluateAsInt(eval, getContext())) {
809 uint64_t constOrder = eval.Val.getInt().getZExtValue();
810 // We should not ever get to a case where the ordering isn't a valid CABI
811 // value, but it's hard to enforce that in general.
812 if (!cir::isValidCIRAtomicOrderingCABI(constOrder))
813 return;
814 cir::MemOrder oriOrder = static_cast<cir::MemOrder>(constOrder);
815 if (std::optional<cir::MemOrder> actualOrder =
816 getEffectiveAtomicMemOrder(oriOrder, isStore, isLoad, isFence))
817 emitAtomicOpFn(actualOrder.value());
818 return;
819 }
820
821 // Otherwise, handle variable memory ordering. Emit `SwitchOp` to convert
822 // dynamic value to static value.
823 mlir::Value dynOrder = emitScalarExpr(memOrder);
824 emitAtomicExprWithDynamicMemOrder(*this, dynOrder, isStore, isLoad, isFence,
825 emitAtomicOpFn);
826}
827
829 QualType atomicTy = e->getPtr()->getType()->getPointeeType();
830 QualType memTy = atomicTy;
831 if (const auto *ty = atomicTy->getAs<AtomicType>())
832 memTy = ty->getValueType();
833
834 Expr *isWeakExpr = nullptr;
835 Expr *orderFailExpr = nullptr;
836
837 Address val1 = Address::invalid();
838 Address val2 = Address::invalid();
839 Address dest = Address::invalid();
841
843 if (e->getOp() == AtomicExpr::AO__c11_atomic_init) {
844 LValue lvalue = makeAddrLValue(ptr, atomicTy);
845 emitAtomicInit(e->getVal1(), lvalue);
846 return RValue::get(nullptr);
847 }
848
849 TypeInfoChars typeInfo = getContext().getTypeInfoInChars(atomicTy);
850 uint64_t size = typeInfo.Width.getQuantity();
851
852 // Emit the sync scope operand, and try to evaluate it as a constant.
853 mlir::Value scope =
854 e->getScopeModel() ? emitScalarExpr(e->getScope()) : nullptr;
855 std::optional<Expr::EvalResult> scopeConst;
856 if (Expr::EvalResult eval;
857 e->getScopeModel() && e->getScope()->EvaluateAsInt(eval, getContext()))
858 scopeConst.emplace(std::move(eval));
859
860 bool shouldCastToIntPtrTy = true;
861
862 switch (e->getOp()) {
863 default:
864 cgm.errorNYI(e->getSourceRange(), "atomic op NYI");
865 return RValue::get(nullptr);
866
867 case AtomicExpr::AO__c11_atomic_init:
868 llvm_unreachable("already handled above with emitAtomicInit");
869
870 case AtomicExpr::AO__atomic_load_n:
871 case AtomicExpr::AO__scoped_atomic_load_n:
872 case AtomicExpr::AO__c11_atomic_load:
873 case AtomicExpr::AO__atomic_test_and_set:
874 case AtomicExpr::AO__atomic_clear:
875 break;
876
877 case AtomicExpr::AO__atomic_load:
878 case AtomicExpr::AO__scoped_atomic_load:
880 break;
881
882 case AtomicExpr::AO__atomic_store:
883 case AtomicExpr::AO__scoped_atomic_store:
885 break;
886
887 case AtomicExpr::AO__atomic_exchange:
890 break;
891
892 case AtomicExpr::AO__atomic_compare_exchange:
893 case AtomicExpr::AO__atomic_compare_exchange_n:
894 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
895 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
897 if (e->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
898 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
900 else
901 val2 = emitValToTemp(*this, e->getVal2());
902 orderFailExpr = e->getOrderFail();
903 if (e->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
904 e->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
905 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
906 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
907 isWeakExpr = e->getWeak();
908 break;
909
910 case AtomicExpr::AO__c11_atomic_fetch_add:
911 case AtomicExpr::AO__c11_atomic_fetch_sub:
912 if (memTy->isPointerType()) {
913 cgm.errorNYI(e->getSourceRange(),
914 "atomic fetch-and-add and fetch-and-sub for pointers");
915 return RValue::get(nullptr);
916 }
917 [[fallthrough]];
918 case AtomicExpr::AO__atomic_fetch_add:
919 case AtomicExpr::AO__atomic_fetch_max:
920 case AtomicExpr::AO__atomic_fetch_min:
921 case AtomicExpr::AO__atomic_fetch_sub:
922 case AtomicExpr::AO__atomic_add_fetch:
923 case AtomicExpr::AO__atomic_max_fetch:
924 case AtomicExpr::AO__atomic_min_fetch:
925 case AtomicExpr::AO__atomic_sub_fetch:
926 case AtomicExpr::AO__c11_atomic_fetch_max:
927 case AtomicExpr::AO__c11_atomic_fetch_min:
928 shouldCastToIntPtrTy = !memTy->isFloatingType();
929 [[fallthrough]];
930
931 case AtomicExpr::AO__atomic_fetch_and:
932 case AtomicExpr::AO__atomic_fetch_nand:
933 case AtomicExpr::AO__atomic_fetch_or:
934 case AtomicExpr::AO__atomic_fetch_xor:
935 case AtomicExpr::AO__atomic_and_fetch:
936 case AtomicExpr::AO__atomic_nand_fetch:
937 case AtomicExpr::AO__atomic_or_fetch:
938 case AtomicExpr::AO__atomic_xor_fetch:
939 case AtomicExpr::AO__atomic_exchange_n:
940 case AtomicExpr::AO__atomic_store_n:
941 case AtomicExpr::AO__c11_atomic_fetch_and:
942 case AtomicExpr::AO__c11_atomic_fetch_nand:
943 case AtomicExpr::AO__c11_atomic_fetch_or:
944 case AtomicExpr::AO__c11_atomic_fetch_xor:
945 case AtomicExpr::AO__c11_atomic_exchange:
946 case AtomicExpr::AO__c11_atomic_store:
947 case AtomicExpr::AO__scoped_atomic_store_n:
948 val1 = emitValToTemp(*this, e->getVal1());
949 break;
950 }
951
952 QualType resultTy = e->getType().getUnqualifiedType();
953
954 // The inlined atomics only function on iN types, where N is a power of 2. We
955 // need to make sure (via temporaries if necessary) that all incoming values
956 // are compatible.
957 LValue atomicValue = makeAddrLValue(ptr, atomicTy);
958 AtomicInfo atomics(*this, atomicValue, getLoc(e->getSourceRange()));
959
960 if (shouldCastToIntPtrTy) {
961 ptr = atomics.castToAtomicIntPointer(ptr);
962 if (val1.isValid())
963 val1 = atomics.convertToAtomicIntPointer(val1);
964 }
965 if (dest.isValid()) {
966 if (shouldCastToIntPtrTy)
967 dest = atomics.castToAtomicIntPointer(dest);
968 } else if (e->isCmpXChg()) {
969 dest = createMemTemp(resultTy, getLoc(e->getSourceRange()), "cmpxchg.bool");
970 } else if (e->getOp() == AtomicExpr::AO__atomic_test_and_set) {
971 dest = createMemTemp(resultTy, getLoc(e->getSourceRange()),
972 "test_and_set.bool");
973 } else if (!resultTy->isVoidType()) {
974 dest = atomics.createTempAlloca();
975 if (shouldCastToIntPtrTy)
976 dest = atomics.castToAtomicIntPointer(dest);
977 }
978
979 bool powerOf2Size = (size & (size - 1)) == 0;
980 bool useLibCall = !powerOf2Size || (size > 16);
981
982 // For atomics larger than 16 bytes, emit a libcall from the frontend. This
983 // avoids the overhead of dealing with excessively-large value types in IR.
984 // Non-power-of-2 values also lower to libcall here, as they are not currently
985 // permitted in IR instructions (although that constraint could be relaxed in
986 // the future). For other cases where a libcall is required on a given
987 // platform, we let the backend handle it (this includes handling for all of
988 // the size-optimized libcall variants, which are only valid up to 16 bytes.)
989 //
990 // See: https://llvm.org/docs/Atomics.html#libcalls-atomic
991 if (useLibCall) {
993 cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: emit atomic lib call");
994 return RValue::get(nullptr);
995 }
996
997 bool isStore = e->getOp() == AtomicExpr::AO__c11_atomic_store ||
998 e->getOp() == AtomicExpr::AO__opencl_atomic_store ||
999 e->getOp() == AtomicExpr::AO__hip_atomic_store ||
1000 e->getOp() == AtomicExpr::AO__atomic_store ||
1001 e->getOp() == AtomicExpr::AO__atomic_store_n ||
1002 e->getOp() == AtomicExpr::AO__scoped_atomic_store ||
1003 e->getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
1004 e->getOp() == AtomicExpr::AO__atomic_clear;
1005 bool isLoad = e->getOp() == AtomicExpr::AO__c11_atomic_load ||
1006 e->getOp() == AtomicExpr::AO__opencl_atomic_load ||
1007 e->getOp() == AtomicExpr::AO__hip_atomic_load ||
1008 e->getOp() == AtomicExpr::AO__atomic_load ||
1009 e->getOp() == AtomicExpr::AO__atomic_load_n ||
1010 e->getOp() == AtomicExpr::AO__scoped_atomic_load ||
1011 e->getOp() == AtomicExpr::AO__scoped_atomic_load_n;
1012
1013 auto emitAtomicOpCallBackFn = [&](cir::MemOrder memOrder) {
1014 emitAtomicOp(*this, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
1015 size, memOrder, scopeConst, scope);
1016 };
1017 emitAtomicExprWithMemOrder(e->getOrder(), isStore, isLoad, /*isFence*/ false,
1018 emitAtomicOpCallBackFn);
1019
1020 if (resultTy->isVoidType())
1021 return RValue::get(nullptr);
1022
1023 return convertTempToRValue(
1024 dest.withElementType(builder, convertTypeForMem(resultTy)), resultTy,
1025 e->getExprLoc());
1026}
1027
1028void CIRGenFunction::emitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
1029 bool isVolatile = dest.isVolatileQualified();
1030 auto order = cir::MemOrder::SequentiallyConsistent;
1031 if (!dest.getType()->isAtomicType()) {
1033 }
1034 return emitAtomicStore(rvalue, dest, order, isVolatile, isInit);
1035}
1036
1037/// Emit a store to an l-value of atomic type.
1038///
1039/// Note that the r-value is expected to be an r-value of the atomic type; this
1040/// means that for aggregate r-values, it should include storage for any padding
1041/// that was necessary.
1043 cir::MemOrder order, bool isVolatile,
1044 bool isInit) {
1045 // If this is an aggregate r-value, it should agree in type except
1046 // maybe for address-space qualification.
1047 mlir::Location loc = dest.getPointer().getLoc();
1048 assert(!rvalue.isAggregate() ||
1050 dest.getAddress().getElementType());
1051
1052 AtomicInfo atomics(*this, dest, loc);
1053 LValue lvalue = atomics.getAtomicLValue();
1054
1055 if (lvalue.isSimple()) {
1056 // If this is an initialization, just put the value there normally.
1057 if (isInit) {
1058 atomics.emitCopyIntoMemory(rvalue);
1059 return;
1060 }
1061
1062 // Check whether we should use a library call.
1063 if (atomics.shouldUseLibCall()) {
1065 cgm.errorNYI(loc, "emitAtomicStore: atomic store with library call");
1066 return;
1067 }
1068
1069 // Okay, we're doing this natively.
1070 mlir::Value valueToStore = atomics.convertRValueToInt(rvalue);
1071
1072 // Do the atomic store.
1073 Address addr = atomics.getAtomicAddress();
1074 if (mlir::Value value = atomics.getScalarRValValueOrNull(rvalue)) {
1075 if (shouldCastToInt(value.getType(), /*CmpXchg=*/false)) {
1076 addr = atomics.castToAtomicIntPointer(addr);
1077 valueToStore =
1078 builder.createIntCast(valueToStore, addr.getElementType());
1079 }
1080 }
1081 cir::StoreOp store = builder.createStore(loc, valueToStore, addr);
1082
1083 // Initializations don't need to be atomic.
1084 if (!isInit) {
1086 store.setMemOrder(order);
1087 }
1088
1089 // Other decoration.
1090 if (isVolatile)
1091 store.setIsVolatile(true);
1092
1094 return;
1095 }
1096
1097 cgm.errorNYI(loc, "emitAtomicStore: non-simple atomic lvalue");
1099}
1100
1102 AtomicInfo atomics(*this, dest, getLoc(init->getSourceRange()));
1103
1104 switch (atomics.getEvaluationKind()) {
1105 case cir::TEK_Scalar: {
1106 mlir::Value value = emitScalarExpr(init);
1107 atomics.emitCopyIntoMemory(RValue::get(value));
1108 return;
1109 }
1110
1111 case cir::TEK_Complex: {
1112 mlir::Value value = emitComplexExpr(init);
1113 atomics.emitCopyIntoMemory(RValue::get(value));
1114 return;
1115 }
1116
1117 case cir::TEK_Aggregate: {
1118 // Fix up the destination if the initializer isn't an expression
1119 // of atomic type.
1120 bool zeroed = false;
1121 if (!init->getType()->isAtomicType()) {
1122 zeroed = atomics.emitMemSetZeroIfNecessary();
1123 dest = atomics.projectValue();
1124 }
1125
1126 // Evaluate the expression directly into the destination.
1132
1133 emitAggExpr(init, slot);
1134 return;
1135 }
1136 }
1137
1138 llvm_unreachable("bad evaluation kind");
1139}
static bool shouldCastToInt(mlir::Type valueTy, bool cmpxchg)
Return true if.
static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, uint64_t size, cir::MemOrder successOrder, cir::MemOrder failureOrder)
static Address emitValToTemp(CIRGenFunction &cgf, Expr *e)
static void emitMemOrderCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc, mlir::Type orderType, llvm::ArrayRef< cir::MemOrder > orders)
static cir::SyncScopeKind convertSyncScopeToCIR(CIRGenFunction &cgf, SourceRange range, clang::SyncScope scope)
static void emitAtomicExprWithDynamicMemOrder(CIRGenFunction &cgf, mlir::Value order, bool isStore, bool isLoad, bool isFence, llvm::function_ref< void(cir::MemOrder)> emitAtomicOpFn)
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size, cir::MemOrder order, cir::SyncScopeKind scope)
static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, Expr *failureOrderExpr, uint64_t size, cir::MemOrder successOrder)
static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
static void emitMemOrderDefaultCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc)
static std::optional< cir::MemOrder > getEffectiveAtomicMemOrder(cir::MemOrder oriOrder, bool isStore, bool isLoad, bool isFence)
mlir::Value createNot(mlir::Value value)
mlir::Value createPtrBitcast(mlir::Value src, mlir::Type newPointeeTy)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
cir::BoolType getBoolTy()
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
llvm::TypeSize getTypeStoreSize(mlir::Type ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
APSInt & getInt()
Definition APValue.h:489
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:909
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition Expr.h:6880
static std::unique_ptr< AtomicScopeModel > getScopeModel(AtomicOp Op)
Get atomic scope model for the atomic op code.
Definition Expr.h:7029
Expr * getVal2() const
Definition Expr.h:6931
Expr * getOrder() const
Definition Expr.h:6914
Expr * getScope() const
Definition Expr.h:6917
bool isCmpXChg() const
Definition Expr.h:6964
AtomicOp getOp() const
Definition Expr.h:6943
Expr * getVal1() const
Definition Expr.h:6921
Expr * getPtr() const
Definition Expr.h:6911
Expr * getWeak() const
Definition Expr.h:6937
Expr * getOrderFail() const
Definition Expr.h:6927
bool isVolatile() const
Definition Expr.h:6960
Address withPointer(mlir::Value newPtr) const
Return address with different pointer, but same element type and alignment.
Definition Address.h:80
mlir::Value getPointer() const
Definition Address.h:95
mlir::Type getElementType() const
Definition Address.h:122
static Address invalid()
Definition Address.h:73
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
Definition Address.h:135
bool isValid() const
Definition Address.h:74
An aggregate value slot.
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile=false, mlir::IntegerAttr align={}, cir::SyncScopeKindAttr scope={}, cir::MemOrderAttr order={})
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
cir::IntType getUIntNTy(int n)
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitAtomicExpr(AtomicExpr *e)
mlir::Type convertTypeForMem(QualType t)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
void emitAtomicExprWithMemOrder(const Expr *memOrder, bool isStore, bool isLoad, bool isFence, llvm::function_ref< void(cir::MemOrder)> emitAtomicOp)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
void emitAtomicInit(Expr *init, LValue dest)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
clang::ASTContext & getContext() const
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const cir::CIRDataLayout getDataLayout() const
Address getAddress() const
clang::QualType getType() const
mlir::Value getPointer() const
bool isVolatileQualified() const
bool isSimple() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
Address getAggregateAddress() const
Return the value of the address of the aggregate.
Definition CIRGenValue.h:69
bool isAggregate() const
Definition CIRGenValue.h:51
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
bool isScalar() const
Definition CIRGenValue.h:49
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsBooleanCondition - Return true if this is a constant which we can fold and convert to a boo...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
A (possibly-)qualified type.
Definition TypeBase.h:937
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8332
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition TypeBase.h:8386
A trivial tuple used to represent a source range.
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
virtual bool hasBuiltinAtomic(uint64_t AtomicSizeInBits, uint64_t AlignmentInBits) const
Returns true if the given target supports lock-free atomic operations at the specified width and alig...
Definition TargetInfo.h:864
bool isVoidType() const
Definition TypeBase.h:8891
bool isPointerType() const
Definition TypeBase.h:8529
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
bool isAtomicType() const
Definition TypeBase.h:8717
bool isFloatingType() const
Definition Type.cpp:2305
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9111
bool isValidCIRAtomicOrderingCABI(Int value)
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
SyncScope
Defines sync scope values used internally by clang.
Definition SyncScope.h:42
unsigned long uint64_t
static bool atomicInfoGetAtomicPointer()
static bool aggValueSlotGC()
static bool opLoadStoreAtomic()
static bool opLoadStoreTbaa()
static bool atomicUseLibCall()
static bool atomicOpenMP()
static bool atomicMicrosoftVolatile()
static bool atomicSyncScopeID()
static bool atomicInfoGetAtomicAddress()
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647