clang 22.0.0git
CIRGenAtomic.cpp
Go to the documentation of this file.
1//===--- CIRGenAtomic.cpp - Emit CIR for atomic operations ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the code for emitting atomic operations.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
15
16using namespace clang;
17using namespace clang::CIRGen;
18using namespace cir;
19
20namespace {
21class AtomicInfo {
22 CIRGenFunction &cgf;
23 QualType atomicTy;
24 QualType valueTy;
25 uint64_t atomicSizeInBits = 0;
26 uint64_t valueSizeInBits = 0;
27 CharUnits atomicAlign;
28 CharUnits valueAlign;
29 TypeEvaluationKind evaluationKind = cir::TEK_Scalar;
30 bool useLibCall = true;
31 LValue lvalue;
32 mlir::Location loc;
33
34public:
35 AtomicInfo(CIRGenFunction &cgf, LValue &lvalue, mlir::Location loc)
36 : cgf(cgf), loc(loc) {
37 assert(!lvalue.isGlobalReg());
38 ASTContext &ctx = cgf.getContext();
39 if (lvalue.isSimple()) {
40 atomicTy = lvalue.getType();
41 if (auto *ty = atomicTy->getAs<AtomicType>())
42 valueTy = ty->getValueType();
43 else
44 valueTy = atomicTy;
45 evaluationKind = cgf.getEvaluationKind(valueTy);
46
47 TypeInfo valueTypeInfo = ctx.getTypeInfo(valueTy);
48 TypeInfo atomicTypeInfo = ctx.getTypeInfo(atomicTy);
49 uint64_t valueAlignInBits = valueTypeInfo.Align;
50 uint64_t atomicAlignInBits = atomicTypeInfo.Align;
51 valueSizeInBits = valueTypeInfo.Width;
52 atomicSizeInBits = atomicTypeInfo.Width;
53 assert(valueSizeInBits <= atomicSizeInBits);
54 assert(valueAlignInBits <= atomicAlignInBits);
55
56 atomicAlign = ctx.toCharUnitsFromBits(atomicAlignInBits);
57 valueAlign = ctx.toCharUnitsFromBits(valueAlignInBits);
58 if (lvalue.getAlignment().isZero())
59 lvalue.setAlignment(atomicAlign);
60
61 this->lvalue = lvalue;
62 } else {
64 cgf.cgm.errorNYI(loc, "AtomicInfo: non-simple lvalue");
65 }
66 useLibCall = !ctx.getTargetInfo().hasBuiltinAtomic(
67 atomicSizeInBits, ctx.toBits(lvalue.getAlignment()));
68 }
69
70 QualType getValueType() const { return valueTy; }
71 CharUnits getAtomicAlignment() const { return atomicAlign; }
72 TypeEvaluationKind getEvaluationKind() const { return evaluationKind; }
73 mlir::Value getAtomicPointer() const {
74 if (lvalue.isSimple())
75 return lvalue.getPointer();
77 return nullptr;
78 }
79 bool shouldUseLibCall() const { return useLibCall; }
80 const LValue &getAtomicLValue() const { return lvalue; }
81 Address getAtomicAddress() const {
82 mlir::Type elemTy;
83 if (lvalue.isSimple()) {
84 elemTy = lvalue.getAddress().getElementType();
85 } else {
87 cgf.cgm.errorNYI(loc, "AtomicInfo::getAtomicAddress: non-simple lvalue");
88 }
89 return Address(getAtomicPointer(), elemTy, getAtomicAlignment());
90 }
91
92 /// Is the atomic size larger than the underlying value type?
93 ///
94 /// Note that the absence of padding does not mean that atomic
95 /// objects are completely interchangeable with non-atomic
96 /// objects: we might have promoted the alignment of a type
97 /// without making it bigger.
98 bool hasPadding() const { return (valueSizeInBits != atomicSizeInBits); }
99
100 bool emitMemSetZeroIfNecessary() const;
101
102 mlir::Value getScalarRValValueOrNull(RValue rvalue) const;
103
104 /// Cast the given pointer to an integer pointer suitable for atomic
105 /// operations on the source.
106 Address castToAtomicIntPointer(Address addr) const;
107
108 /// If addr is compatible with the iN that will be used for an atomic
109 /// operation, bitcast it. Otherwise, create a temporary that is suitable and
110 /// copy the value across.
111 Address convertToAtomicIntPointer(Address addr) const;
112
113 /// Converts a rvalue to integer value.
114 mlir::Value convertRValueToInt(RValue rvalue, bool cmpxchg = false) const;
115
116 /// Copy an atomic r-value into atomic-layout memory.
117 void emitCopyIntoMemory(RValue rvalue) const;
118
119 /// Project an l-value down to the value field.
120 LValue projectValue() const {
121 assert(lvalue.isSimple());
122 Address addr = getAtomicAddress();
123 if (hasPadding()) {
124 cgf.cgm.errorNYI(loc, "AtomicInfo::projectValue: padding");
125 }
126
128 return LValue::makeAddr(addr, getValueType(), lvalue.getBaseInfo());
129 }
130
131 /// Creates temp alloca for intermediate operations on atomic value.
132 Address createTempAlloca() const;
133
134private:
135 bool requiresMemSetZero(mlir::Type ty) const;
136};
137} // namespace
138
139// This function emits any expression (scalar, complex, or aggregate)
140// into a temporary alloca.
142 Address declPtr = cgf.createMemTemp(
143 e->getType(), cgf.getLoc(e->getSourceRange()), ".atomictmp");
144 cgf.emitAnyExprToMem(e, declPtr, e->getType().getQualifiers(),
145 /*Init*/ true);
146 return declPtr;
147}
148
149/// Does a store of the given IR type modify the full expected width?
150static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty,
151 uint64_t expectedSize) {
152 return cgm.getDataLayout().getTypeStoreSize(ty) * 8 == expectedSize;
153}
154
155/// Does the atomic type require memsetting to zero before initialization?
156///
157/// The IR type is provided as a way of making certain queries faster.
158bool AtomicInfo::requiresMemSetZero(mlir::Type ty) const {
159 // If the atomic type has size padding, we definitely need a memset.
160 if (hasPadding())
161 return true;
162
163 // Otherwise, do some simple heuristics to try to avoid it:
164 switch (getEvaluationKind()) {
165 // For scalars and complexes, check whether the store size of the
166 // type uses the full size.
167 case cir::TEK_Scalar:
168 return !isFullSizeType(cgf.cgm, ty, atomicSizeInBits);
169 case cir::TEK_Complex:
170 return !isFullSizeType(cgf.cgm,
171 mlir::cast<cir::ComplexType>(ty).getElementType(),
172 atomicSizeInBits / 2);
173 // Padding in structs has an undefined bit pattern. User beware.
175 return false;
176 }
177 llvm_unreachable("bad evaluation kind");
178}
179
180Address AtomicInfo::convertToAtomicIntPointer(Address addr) const {
181 mlir::Type ty = addr.getElementType();
182 uint64_t sourceSizeInBits = cgf.cgm.getDataLayout().getTypeSizeInBits(ty);
183 if (sourceSizeInBits != atomicSizeInBits) {
184 cgf.cgm.errorNYI(
185 loc,
186 "AtomicInfo::convertToAtomicIntPointer: convert through temp alloca");
187 }
188
189 return castToAtomicIntPointer(addr);
190}
191
192Address AtomicInfo::createTempAlloca() const {
193 Address tempAlloca = cgf.createMemTemp(
194 (lvalue.isBitField() && valueSizeInBits > atomicSizeInBits) ? valueTy
195 : atomicTy,
196 getAtomicAlignment(), loc, "atomic-temp");
197
198 // Cast to pointer to value type for bitfields.
199 if (lvalue.isBitField()) {
200 cgf.cgm.errorNYI(loc, "AtomicInfo::createTempAlloca: bitfield lvalue");
201 }
202
203 return tempAlloca;
204}
205
206mlir::Value AtomicInfo::getScalarRValValueOrNull(RValue rvalue) const {
207 if (rvalue.isScalar() && (!hasPadding() || !lvalue.isSimple()))
208 return rvalue.getValue();
209 return nullptr;
210}
211
212Address AtomicInfo::castToAtomicIntPointer(Address addr) const {
213 auto intTy = mlir::dyn_cast<cir::IntType>(addr.getElementType());
214 // Don't bother with int casts if the integer size is the same.
215 if (intTy && intTy.getWidth() == atomicSizeInBits)
216 return addr;
217 auto ty = cgf.getBuilder().getUIntNTy(atomicSizeInBits);
218 return addr.withElementType(cgf.getBuilder(), ty);
219}
220
221bool AtomicInfo::emitMemSetZeroIfNecessary() const {
222 assert(lvalue.isSimple());
223 Address addr = lvalue.getAddress();
224 if (!requiresMemSetZero(addr.getElementType()))
225 return false;
226
227 cgf.cgm.errorNYI(loc,
228 "AtomicInfo::emitMemSetZeroIfNecaessary: emit memset zero");
229 return false;
230}
231
232/// Return true if \param valueTy is a type that should be casted to integer
233/// around the atomic memory operation. If \param cmpxchg is true, then the
234/// cast of a floating point type is made as that instruction can not have
235/// floating point operands. TODO: Allow compare-and-exchange and FP - see
236/// comment in CIRGenAtomicExpandPass.cpp.
237static bool shouldCastToInt(mlir::Type valueTy, bool cmpxchg) {
238 if (cir::isAnyFloatingPointType(valueTy))
239 return isa<cir::FP80Type>(valueTy) || cmpxchg;
240 return !isa<cir::IntType>(valueTy) && !isa<cir::PointerType>(valueTy);
241}
242
243mlir::Value AtomicInfo::convertRValueToInt(RValue rvalue, bool cmpxchg) const {
244 // If we've got a scalar value of the right size, try to avoid going
245 // through memory. Floats get casted if needed by AtomicExpandPass.
246 if (mlir::Value value = getScalarRValValueOrNull(rvalue)) {
247 if (!shouldCastToInt(value.getType(), cmpxchg))
248 return cgf.emitToMemory(value, valueTy);
249
250 cgf.cgm.errorNYI(
251 loc, "AtomicInfo::convertRValueToInt: cast scalar rvalue to int");
252 return nullptr;
253 }
254
255 cgf.cgm.errorNYI(
256 loc, "AtomicInfo::convertRValueToInt: cast non-scalar rvalue to int");
257 return nullptr;
258}
259
260/// Copy an r-value into memory as part of storing to an atomic type.
261/// This needs to create a bit-pattern suitable for atomic operations.
262void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
263 assert(lvalue.isSimple());
264
265 // If we have an r-value, the rvalue should be of the atomic type,
266 // which means that the caller is responsible for having zeroed
267 // any padding. Just do an aggregate copy of that type.
268 if (rvalue.isAggregate()) {
269 cgf.cgm.errorNYI("copying aggregate into atomic lvalue");
270 return;
271 }
272
273 // Okay, otherwise we're copying stuff.
274
275 // Zero out the buffer if necessary.
276 emitMemSetZeroIfNecessary();
277
278 // Drill past the padding if present.
279 LValue tempLValue = projectValue();
280
281 // Okay, store the rvalue in.
282 if (rvalue.isScalar()) {
283 cgf.emitStoreOfScalar(rvalue.getValue(), tempLValue, /*isInit=*/true);
284 } else {
285 cgf.cgm.errorNYI("copying complex into atomic lvalue");
286 }
287}
288
290 mlir::Location loc) {
291 mlir::ArrayAttr ordersAttr = builder.getArrayAttr({});
292 mlir::OpBuilder::InsertPoint insertPoint;
293 cir::CaseOp::create(builder, loc, ordersAttr, cir::CaseOpKind::Default,
294 insertPoint);
295 builder.restoreInsertionPoint(insertPoint);
296}
297
298// Create a "case" operation with the given list of orders as its values. Also
299// create the region that will hold the body of the switch-case label.
300static void emitMemOrderCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc,
301 mlir::Type orderType,
304 for (cir::MemOrder order : orders)
305 orderAttrs.push_back(cir::IntAttr::get(orderType, static_cast<int>(order)));
306 mlir::ArrayAttr ordersAttr = builder.getArrayAttr(orderAttrs);
307
308 mlir::OpBuilder::InsertPoint insertPoint;
309 cir::CaseOp::create(builder, loc, ordersAttr, cir::CaseOpKind::Anyof,
310 insertPoint);
311 builder.restoreInsertionPoint(insertPoint);
312}
313
314static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak,
315 Address dest, Address ptr, Address val1,
316 Address val2, uint64_t size,
317 cir::MemOrder successOrder,
318 cir::MemOrder failureOrder) {
319 mlir::Location loc = cgf.getLoc(e->getSourceRange());
320
321 CIRGenBuilderTy &builder = cgf.getBuilder();
322 mlir::Value expected = builder.createLoad(loc, val1);
323 mlir::Value desired = builder.createLoad(loc, val2);
324
325 auto cmpxchg = cir::AtomicCmpXchgOp::create(
326 builder, loc, expected.getType(), builder.getBoolTy(), ptr.getPointer(),
327 expected, desired,
328 cir::MemOrderAttr::get(&cgf.getMLIRContext(), successOrder),
329 cir::MemOrderAttr::get(&cgf.getMLIRContext(), failureOrder),
330 builder.getI64IntegerAttr(ptr.getAlignment().getAsAlign().value()));
331
332 cmpxchg.setIsVolatile(e->isVolatile());
333 cmpxchg.setWeak(isWeak);
334
335 mlir::Value failed = builder.createNot(cmpxchg.getSuccess());
336 cir::IfOp::create(builder, loc, failed, /*withElseRegion=*/false,
337 [&](mlir::OpBuilder &, mlir::Location) {
338 auto ptrTy = mlir::cast<cir::PointerType>(
339 val1.getPointer().getType());
340 if (val1.getElementType() != ptrTy.getPointee()) {
341 val1 = val1.withPointer(builder.createPtrBitcast(
342 val1.getPointer(), val1.getElementType()));
343 }
344 builder.createStore(loc, cmpxchg.getOld(), val1);
345 builder.createYield(loc);
346 });
347
348 // Update the memory at Dest with Success's value.
349 cgf.emitStoreOfScalar(cmpxchg.getSuccess(),
350 cgf.makeAddrLValue(dest, e->getType()),
351 /*isInit=*/false);
352}
353
355 bool isWeak, Address dest, Address ptr,
356 Address val1, Address val2,
357 Expr *failureOrderExpr, uint64_t size,
358 cir::MemOrder successOrder) {
359 Expr::EvalResult failureOrderEval;
360 if (failureOrderExpr->EvaluateAsInt(failureOrderEval, cgf.getContext())) {
361 uint64_t failureOrderInt = failureOrderEval.Val.getInt().getZExtValue();
362
363 cir::MemOrder failureOrder;
364 if (!cir::isValidCIRAtomicOrderingCABI(failureOrderInt)) {
365 failureOrder = cir::MemOrder::Relaxed;
366 } else {
367 switch ((cir::MemOrder)failureOrderInt) {
368 case cir::MemOrder::Relaxed:
369 // 31.7.2.18: "The failure argument shall not be memory_order_release
370 // nor memory_order_acq_rel". Fallback to monotonic.
371 case cir::MemOrder::Release:
372 case cir::MemOrder::AcquireRelease:
373 failureOrder = cir::MemOrder::Relaxed;
374 break;
375 case cir::MemOrder::Consume:
376 case cir::MemOrder::Acquire:
377 failureOrder = cir::MemOrder::Acquire;
378 break;
379 case cir::MemOrder::SequentiallyConsistent:
380 failureOrder = cir::MemOrder::SequentiallyConsistent;
381 break;
382 }
383 }
384
385 // Prior to c++17, "the failure argument shall be no stronger than the
386 // success argument". This condition has been lifted and the only
387 // precondition is 31.7.2.18. Effectively treat this as a DR and skip
388 // language version checks.
389 emitAtomicCmpXchg(cgf, e, isWeak, dest, ptr, val1, val2, size, successOrder,
390 failureOrder);
391 return;
392 }
393
395 cgf.cgm.errorNYI(e->getSourceRange(),
396 "emitAtomicCmpXchgFailureSet: non-constant failure order");
397}
398
400 Address ptr, Address val1, Address val2,
401 Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size,
402 cir::MemOrder order) {
403 std::unique_ptr<AtomicScopeModel> scopeModel = expr->getScopeModel();
404 if (scopeModel) {
406 cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: atomic scope");
407 return;
408 }
409
411 llvm::StringRef opName;
412
413 CIRGenBuilderTy &builder = cgf.getBuilder();
414 mlir::Location loc = cgf.getLoc(expr->getSourceRange());
415 auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
416 cir::AtomicFetchKindAttr fetchAttr;
417 bool fetchFirst = true;
418
419 switch (expr->getOp()) {
420 case AtomicExpr::AO__c11_atomic_init:
421 llvm_unreachable("already handled!");
422
423 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
424 emitAtomicCmpXchgFailureSet(cgf, expr, /*isWeak=*/false, dest, ptr, val1,
425 val2, failureOrderExpr, size, order);
426 return;
427
428 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
429 emitAtomicCmpXchgFailureSet(cgf, expr, /*isWeak=*/true, dest, ptr, val1,
430 val2, failureOrderExpr, size, order);
431 return;
432
433 case AtomicExpr::AO__atomic_compare_exchange:
434 case AtomicExpr::AO__atomic_compare_exchange_n: {
435 bool isWeak = false;
436 if (isWeakExpr->EvaluateAsBooleanCondition(isWeak, cgf.getContext())) {
437 emitAtomicCmpXchgFailureSet(cgf, expr, isWeak, dest, ptr, val1, val2,
438 failureOrderExpr, size, order);
439 } else {
441 cgf.cgm.errorNYI(expr->getSourceRange(),
442 "emitAtomicOp: non-constant isWeak");
443 }
444 return;
445 }
446
447 case AtomicExpr::AO__c11_atomic_load:
448 case AtomicExpr::AO__atomic_load_n:
449 case AtomicExpr::AO__atomic_load: {
450 cir::LoadOp load =
451 builder.createLoad(loc, ptr, /*isVolatile=*/expr->isVolatile());
452
454
455 load->setAttr("mem_order", orderAttr);
456
457 builder.createStore(loc, load->getResult(0), dest);
458 return;
459 }
460
461 case AtomicExpr::AO__c11_atomic_store:
462 case AtomicExpr::AO__atomic_store_n:
463 case AtomicExpr::AO__atomic_store: {
464 cir::LoadOp loadVal1 = builder.createLoad(loc, val1);
465
467
468 builder.createStore(loc, loadVal1, ptr, expr->isVolatile(),
469 /*align=*/mlir::IntegerAttr{}, orderAttr);
470 return;
471 }
472
473 case AtomicExpr::AO__c11_atomic_exchange:
474 case AtomicExpr::AO__atomic_exchange_n:
475 case AtomicExpr::AO__atomic_exchange:
476 opName = cir::AtomicXchgOp::getOperationName();
477 break;
478
479 case AtomicExpr::AO__atomic_add_fetch:
480 fetchFirst = false;
481 [[fallthrough]];
482 case AtomicExpr::AO__c11_atomic_fetch_add:
483 case AtomicExpr::AO__atomic_fetch_add:
484 opName = cir::AtomicFetchOp::getOperationName();
485 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
486 cir::AtomicFetchKind::Add);
487 break;
488
489 case AtomicExpr::AO__atomic_sub_fetch:
490 fetchFirst = false;
491 [[fallthrough]];
492 case AtomicExpr::AO__c11_atomic_fetch_sub:
493 case AtomicExpr::AO__atomic_fetch_sub:
494 opName = cir::AtomicFetchOp::getOperationName();
495 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
496 cir::AtomicFetchKind::Sub);
497 break;
498
499 case AtomicExpr::AO__atomic_min_fetch:
500 fetchFirst = false;
501 [[fallthrough]];
502 case AtomicExpr::AO__c11_atomic_fetch_min:
503 case AtomicExpr::AO__atomic_fetch_min:
504 opName = cir::AtomicFetchOp::getOperationName();
505 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
506 cir::AtomicFetchKind::Min);
507 break;
508
509 case AtomicExpr::AO__atomic_max_fetch:
510 fetchFirst = false;
511 [[fallthrough]];
512 case AtomicExpr::AO__c11_atomic_fetch_max:
513 case AtomicExpr::AO__atomic_fetch_max:
514 opName = cir::AtomicFetchOp::getOperationName();
515 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
516 cir::AtomicFetchKind::Max);
517 break;
518
519 case AtomicExpr::AO__atomic_and_fetch:
520 fetchFirst = false;
521 [[fallthrough]];
522 case AtomicExpr::AO__c11_atomic_fetch_and:
523 case AtomicExpr::AO__atomic_fetch_and:
524 opName = cir::AtomicFetchOp::getOperationName();
525 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
526 cir::AtomicFetchKind::And);
527 break;
528
529 case AtomicExpr::AO__atomic_or_fetch:
530 fetchFirst = false;
531 [[fallthrough]];
532 case AtomicExpr::AO__c11_atomic_fetch_or:
533 case AtomicExpr::AO__atomic_fetch_or:
534 opName = cir::AtomicFetchOp::getOperationName();
535 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
536 cir::AtomicFetchKind::Or);
537 break;
538
539 case AtomicExpr::AO__atomic_xor_fetch:
540 fetchFirst = false;
541 [[fallthrough]];
542 case AtomicExpr::AO__c11_atomic_fetch_xor:
543 case AtomicExpr::AO__atomic_fetch_xor:
544 opName = cir::AtomicFetchOp::getOperationName();
545 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
546 cir::AtomicFetchKind::Xor);
547 break;
548
549 case AtomicExpr::AO__atomic_nand_fetch:
550 fetchFirst = false;
551 [[fallthrough]];
552 case AtomicExpr::AO__c11_atomic_fetch_nand:
553 case AtomicExpr::AO__atomic_fetch_nand:
554 opName = cir::AtomicFetchOp::getOperationName();
555 fetchAttr = cir::AtomicFetchKindAttr::get(builder.getContext(),
556 cir::AtomicFetchKind::Nand);
557 break;
558
559 case AtomicExpr::AO__atomic_test_and_set: {
560 auto op = cir::AtomicTestAndSetOp::create(
561 builder, loc, ptr.getPointer(), order,
562 builder.getI64IntegerAttr(ptr.getAlignment().getQuantity()),
563 expr->isVolatile());
564 builder.createStore(loc, op, dest);
565 return;
566 }
567
568 case AtomicExpr::AO__atomic_clear: {
569 cir::AtomicClearOp::create(
570 builder, loc, ptr.getPointer(), order,
571 builder.getI64IntegerAttr(ptr.getAlignment().getQuantity()),
572 expr->isVolatile());
573 return;
574 }
575
576 case AtomicExpr::AO__opencl_atomic_init:
577
578 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
579 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
580
581 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
582 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
583
584 case AtomicExpr::AO__scoped_atomic_compare_exchange:
585 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
586
587 case AtomicExpr::AO__opencl_atomic_load:
588 case AtomicExpr::AO__hip_atomic_load:
589 case AtomicExpr::AO__scoped_atomic_load_n:
590 case AtomicExpr::AO__scoped_atomic_load:
591
592 case AtomicExpr::AO__opencl_atomic_store:
593 case AtomicExpr::AO__hip_atomic_store:
594 case AtomicExpr::AO__scoped_atomic_store:
595 case AtomicExpr::AO__scoped_atomic_store_n:
596
597 case AtomicExpr::AO__hip_atomic_exchange:
598 case AtomicExpr::AO__opencl_atomic_exchange:
599 case AtomicExpr::AO__scoped_atomic_exchange_n:
600 case AtomicExpr::AO__scoped_atomic_exchange:
601
602 case AtomicExpr::AO__scoped_atomic_add_fetch:
603
604 case AtomicExpr::AO__hip_atomic_fetch_add:
605 case AtomicExpr::AO__opencl_atomic_fetch_add:
606 case AtomicExpr::AO__scoped_atomic_fetch_add:
607
608 case AtomicExpr::AO__scoped_atomic_sub_fetch:
609
610 case AtomicExpr::AO__hip_atomic_fetch_sub:
611 case AtomicExpr::AO__opencl_atomic_fetch_sub:
612 case AtomicExpr::AO__scoped_atomic_fetch_sub:
613
614 case AtomicExpr::AO__scoped_atomic_min_fetch:
615
616 case AtomicExpr::AO__hip_atomic_fetch_min:
617 case AtomicExpr::AO__opencl_atomic_fetch_min:
618 case AtomicExpr::AO__scoped_atomic_fetch_min:
619
620 case AtomicExpr::AO__scoped_atomic_max_fetch:
621
622 case AtomicExpr::AO__hip_atomic_fetch_max:
623 case AtomicExpr::AO__opencl_atomic_fetch_max:
624 case AtomicExpr::AO__scoped_atomic_fetch_max:
625
626 case AtomicExpr::AO__scoped_atomic_and_fetch:
627
628 case AtomicExpr::AO__hip_atomic_fetch_and:
629 case AtomicExpr::AO__opencl_atomic_fetch_and:
630 case AtomicExpr::AO__scoped_atomic_fetch_and:
631
632 case AtomicExpr::AO__scoped_atomic_or_fetch:
633
634 case AtomicExpr::AO__hip_atomic_fetch_or:
635 case AtomicExpr::AO__opencl_atomic_fetch_or:
636 case AtomicExpr::AO__scoped_atomic_fetch_or:
637
638 case AtomicExpr::AO__scoped_atomic_xor_fetch:
639
640 case AtomicExpr::AO__hip_atomic_fetch_xor:
641 case AtomicExpr::AO__opencl_atomic_fetch_xor:
642 case AtomicExpr::AO__scoped_atomic_fetch_xor:
643
644 case AtomicExpr::AO__scoped_atomic_nand_fetch:
645
646 case AtomicExpr::AO__scoped_atomic_fetch_nand:
647 cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: expr op NYI");
648 return;
649 }
650
651 assert(!opName.empty() && "expected operation name to build");
652 mlir::Value loadVal1 = builder.createLoad(loc, val1);
653
654 SmallVector<mlir::Value> atomicOperands = {ptr.getPointer(), loadVal1};
655 SmallVector<mlir::Type> atomicResTys = {loadVal1.getType()};
656 mlir::Operation *rmwOp = builder.create(loc, builder.getStringAttr(opName),
657 atomicOperands, atomicResTys);
658
659 if (fetchAttr)
660 rmwOp->setAttr("binop", fetchAttr);
661 rmwOp->setAttr("mem_order", orderAttr);
662 if (expr->isVolatile())
663 rmwOp->setAttr("is_volatile", builder.getUnitAttr());
664 if (fetchFirst && opName == cir::AtomicFetchOp::getOperationName())
665 rmwOp->setAttr("fetch_first", builder.getUnitAttr());
666
667 mlir::Value result = rmwOp->getResult(0);
668 builder.createStore(loc, result, dest);
669}
670
671static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad) {
673 return false;
674 auto memOrder = static_cast<cir::MemOrder>(order);
675 if (isStore)
676 return memOrder != cir::MemOrder::Consume &&
677 memOrder != cir::MemOrder::Acquire &&
678 memOrder != cir::MemOrder::AcquireRelease;
679 if (isLoad)
680 return memOrder != cir::MemOrder::Release &&
681 memOrder != cir::MemOrder::AcquireRelease;
682 return true;
683}
684
686 CIRGenFunction &cgf, mlir::Value order, AtomicExpr *e, Address dest,
687 Address ptr, Address val1, Address val2, Expr *isWeakExpr,
688 Expr *orderFailExpr, uint64_t size, bool isStore, bool isLoad) {
689 // The memory order is not known at compile-time. The atomic operations
690 // can't handle runtime memory orders; the memory order must be hard coded.
691 // Generate a "switch" statement that converts a runtime value into a
692 // compile-time value.
693 CIRGenBuilderTy &builder = cgf.getBuilder();
694 cir::SwitchOp::create(
695 builder, order.getLoc(), order,
696 [&](mlir::OpBuilder &, mlir::Location loc, mlir::OperationState &) {
697 mlir::Block *switchBlock = builder.getBlock();
698
699 auto emitMemOrderCase = [&](llvm::ArrayRef<cir::MemOrder> caseOrders,
700 cir::MemOrder actualOrder) {
701 if (caseOrders.empty())
702 emitMemOrderDefaultCaseLabel(builder, loc);
703 else
704 emitMemOrderCaseLabel(builder, loc, order.getType(), caseOrders);
705 emitAtomicOp(cgf, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
706 size, actualOrder);
707 builder.createBreak(loc);
708 builder.setInsertionPointToEnd(switchBlock);
709 };
710
711 // default:
712 // Use memory_order_relaxed for relaxed operations and for any memory
713 // order value that is not supported. There is no good way to report
714 // an unsupported memory order at runtime, hence the fallback to
715 // memory_order_relaxed.
716 emitMemOrderCase(/*caseOrders=*/{}, cir::MemOrder::Relaxed);
717
718 if (!isStore) {
719 // case consume:
720 // case acquire:
721 // memory_order_consume is not implemented; it is always treated
722 // like memory_order_acquire. These memory orders are not valid for
723 // write-only operations.
724 emitMemOrderCase({cir::MemOrder::Consume, cir::MemOrder::Acquire},
725 cir::MemOrder::Acquire);
726 }
727
728 if (!isLoad) {
729 // case release:
730 // memory_order_release is not valid for read-only operations.
731 emitMemOrderCase({cir::MemOrder::Release}, cir::MemOrder::Release);
732 }
733
734 if (!isLoad && !isStore) {
735 // case acq_rel:
736 // memory_order_acq_rel is only valid for read-write operations.
737 emitMemOrderCase({cir::MemOrder::AcquireRelease},
738 cir::MemOrder::AcquireRelease);
739 }
740
741 // case seq_cst:
742 emitMemOrderCase({cir::MemOrder::SequentiallyConsistent},
743 cir::MemOrder::SequentiallyConsistent);
744
745 builder.createYield(loc);
746 });
747}
748
750 QualType atomicTy = e->getPtr()->getType()->getPointeeType();
751 QualType memTy = atomicTy;
752 if (const auto *ty = atomicTy->getAs<AtomicType>())
753 memTy = ty->getValueType();
754
755 Expr *isWeakExpr = nullptr;
756 Expr *orderFailExpr = nullptr;
757
758 Address val1 = Address::invalid();
759 Address val2 = Address::invalid();
760 Address dest = Address::invalid();
762
764 if (e->getOp() == AtomicExpr::AO__c11_atomic_init) {
765 LValue lvalue = makeAddrLValue(ptr, atomicTy);
766 emitAtomicInit(e->getVal1(), lvalue);
767 return RValue::get(nullptr);
768 }
769
770 TypeInfoChars typeInfo = getContext().getTypeInfoInChars(atomicTy);
771 uint64_t size = typeInfo.Width.getQuantity();
772
773 Expr::EvalResult orderConst;
774 mlir::Value order;
775 if (!e->getOrder()->EvaluateAsInt(orderConst, getContext()))
776 order = emitScalarExpr(e->getOrder());
777
778 bool shouldCastToIntPtrTy = true;
779
780 switch (e->getOp()) {
781 default:
782 cgm.errorNYI(e->getSourceRange(), "atomic op NYI");
783 return RValue::get(nullptr);
784
785 case AtomicExpr::AO__c11_atomic_init:
786 llvm_unreachable("already handled above with emitAtomicInit");
787
788 case AtomicExpr::AO__atomic_load_n:
789 case AtomicExpr::AO__c11_atomic_load:
790 case AtomicExpr::AO__atomic_test_and_set:
791 case AtomicExpr::AO__atomic_clear:
792 break;
793
794 case AtomicExpr::AO__atomic_load:
796 break;
797
798 case AtomicExpr::AO__atomic_store:
800 break;
801
802 case AtomicExpr::AO__atomic_exchange:
805 break;
806
807 case AtomicExpr::AO__atomic_compare_exchange:
808 case AtomicExpr::AO__atomic_compare_exchange_n:
809 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
810 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
812 if (e->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
813 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
815 else
816 val2 = emitValToTemp(*this, e->getVal2());
817 orderFailExpr = e->getOrderFail();
818 if (e->getOp() == AtomicExpr::AO__atomic_compare_exchange_n ||
819 e->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
820 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange_n ||
821 e->getOp() == AtomicExpr::AO__scoped_atomic_compare_exchange)
822 isWeakExpr = e->getWeak();
823 break;
824
825 case AtomicExpr::AO__c11_atomic_fetch_add:
826 case AtomicExpr::AO__c11_atomic_fetch_sub:
827 if (memTy->isPointerType()) {
828 cgm.errorNYI(e->getSourceRange(),
829 "atomic fetch-and-add and fetch-and-sub for pointers");
830 return RValue::get(nullptr);
831 }
832 [[fallthrough]];
833 case AtomicExpr::AO__atomic_fetch_add:
834 case AtomicExpr::AO__atomic_fetch_max:
835 case AtomicExpr::AO__atomic_fetch_min:
836 case AtomicExpr::AO__atomic_fetch_sub:
837 case AtomicExpr::AO__atomic_add_fetch:
838 case AtomicExpr::AO__atomic_max_fetch:
839 case AtomicExpr::AO__atomic_min_fetch:
840 case AtomicExpr::AO__atomic_sub_fetch:
841 case AtomicExpr::AO__c11_atomic_fetch_max:
842 case AtomicExpr::AO__c11_atomic_fetch_min:
843 shouldCastToIntPtrTy = !memTy->isFloatingType();
844 [[fallthrough]];
845
846 case AtomicExpr::AO__atomic_fetch_and:
847 case AtomicExpr::AO__atomic_fetch_nand:
848 case AtomicExpr::AO__atomic_fetch_or:
849 case AtomicExpr::AO__atomic_fetch_xor:
850 case AtomicExpr::AO__atomic_and_fetch:
851 case AtomicExpr::AO__atomic_nand_fetch:
852 case AtomicExpr::AO__atomic_or_fetch:
853 case AtomicExpr::AO__atomic_xor_fetch:
854 case AtomicExpr::AO__atomic_exchange_n:
855 case AtomicExpr::AO__atomic_store_n:
856 case AtomicExpr::AO__c11_atomic_fetch_and:
857 case AtomicExpr::AO__c11_atomic_fetch_nand:
858 case AtomicExpr::AO__c11_atomic_fetch_or:
859 case AtomicExpr::AO__c11_atomic_fetch_xor:
860 case AtomicExpr::AO__c11_atomic_exchange:
861 case AtomicExpr::AO__c11_atomic_store:
862 val1 = emitValToTemp(*this, e->getVal1());
863 break;
864 }
865
866 QualType resultTy = e->getType().getUnqualifiedType();
867
868 // The inlined atomics only function on iN types, where N is a power of 2. We
869 // need to make sure (via temporaries if necessary) that all incoming values
870 // are compatible.
871 LValue atomicValue = makeAddrLValue(ptr, atomicTy);
872 AtomicInfo atomics(*this, atomicValue, getLoc(e->getSourceRange()));
873
874 if (shouldCastToIntPtrTy) {
875 ptr = atomics.castToAtomicIntPointer(ptr);
876 if (val1.isValid())
877 val1 = atomics.convertToAtomicIntPointer(val1);
878 }
879 if (dest.isValid()) {
880 if (shouldCastToIntPtrTy)
881 dest = atomics.castToAtomicIntPointer(dest);
882 } else if (e->isCmpXChg()) {
883 dest = createMemTemp(resultTy, getLoc(e->getSourceRange()), "cmpxchg.bool");
884 } else if (e->getOp() == AtomicExpr::AO__atomic_test_and_set) {
885 dest = createMemTemp(resultTy, getLoc(e->getSourceRange()),
886 "test_and_set.bool");
887 } else if (!resultTy->isVoidType()) {
888 dest = atomics.createTempAlloca();
889 if (shouldCastToIntPtrTy)
890 dest = atomics.castToAtomicIntPointer(dest);
891 }
892
893 bool powerOf2Size = (size & (size - 1)) == 0;
894 bool useLibCall = !powerOf2Size || (size > 16);
895
896 // For atomics larger than 16 bytes, emit a libcall from the frontend. This
897 // avoids the overhead of dealing with excessively-large value types in IR.
898 // Non-power-of-2 values also lower to libcall here, as they are not currently
899 // permitted in IR instructions (although that constraint could be relaxed in
900 // the future). For other cases where a libcall is required on a given
901 // platform, we let the backend handle it (this includes handling for all of
902 // the size-optimized libcall variants, which are only valid up to 16 bytes.)
903 //
904 // See: https://llvm.org/docs/Atomics.html#libcalls-atomic
905 if (useLibCall) {
907 cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: emit atomic lib call");
908 return RValue::get(nullptr);
909 }
910
911 bool isStore = e->getOp() == AtomicExpr::AO__c11_atomic_store ||
912 e->getOp() == AtomicExpr::AO__opencl_atomic_store ||
913 e->getOp() == AtomicExpr::AO__hip_atomic_store ||
914 e->getOp() == AtomicExpr::AO__atomic_store ||
915 e->getOp() == AtomicExpr::AO__atomic_store_n ||
916 e->getOp() == AtomicExpr::AO__scoped_atomic_store ||
917 e->getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
918 e->getOp() == AtomicExpr::AO__atomic_clear;
919 bool isLoad = e->getOp() == AtomicExpr::AO__c11_atomic_load ||
920 e->getOp() == AtomicExpr::AO__opencl_atomic_load ||
921 e->getOp() == AtomicExpr::AO__hip_atomic_load ||
922 e->getOp() == AtomicExpr::AO__atomic_load ||
923 e->getOp() == AtomicExpr::AO__atomic_load_n ||
924 e->getOp() == AtomicExpr::AO__scoped_atomic_load ||
925 e->getOp() == AtomicExpr::AO__scoped_atomic_load_n;
926
927 if (!order) {
928 // We have evaluated the memory order as an integer constant in orderConst.
929 // We should not ever get to a case where the ordering isn't a valid CABI
930 // value, but it's hard to enforce that in general.
931 uint64_t ord = orderConst.Val.getInt().getZExtValue();
932 if (isMemOrderValid(ord, isStore, isLoad))
933 emitAtomicOp(*this, e, dest, ptr, val1, val2, isWeakExpr, orderFailExpr,
934 size, static_cast<cir::MemOrder>(ord));
935 } else {
936 emitAtomicExprWithDynamicMemOrder(*this, order, e, dest, ptr, val1, val2,
937 isWeakExpr, orderFailExpr, size, isStore,
938 isLoad);
939 }
940
941 if (resultTy->isVoidType())
942 return RValue::get(nullptr);
943
944 return convertTempToRValue(
945 dest.withElementType(builder, convertTypeForMem(resultTy)), resultTy,
946 e->getExprLoc());
947}
948
949void CIRGenFunction::emitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
950 bool isVolatile = dest.isVolatileQualified();
951 auto order = cir::MemOrder::SequentiallyConsistent;
952 if (!dest.getType()->isAtomicType()) {
954 }
955 return emitAtomicStore(rvalue, dest, order, isVolatile, isInit);
956}
957
958/// Emit a store to an l-value of atomic type.
959///
960/// Note that the r-value is expected to be an r-value of the atomic type; this
961/// means that for aggregate r-values, it should include storage for any padding
962/// that was necessary.
964 cir::MemOrder order, bool isVolatile,
965 bool isInit) {
966 // If this is an aggregate r-value, it should agree in type except
967 // maybe for address-space qualification.
968 mlir::Location loc = dest.getPointer().getLoc();
969 assert(!rvalue.isAggregate() ||
971 dest.getAddress().getElementType());
972
973 AtomicInfo atomics(*this, dest, loc);
974 LValue lvalue = atomics.getAtomicLValue();
975
976 if (lvalue.isSimple()) {
977 // If this is an initialization, just put the value there normally.
978 if (isInit) {
979 atomics.emitCopyIntoMemory(rvalue);
980 return;
981 }
982
983 // Check whether we should use a library call.
984 if (atomics.shouldUseLibCall()) {
986 cgm.errorNYI(loc, "emitAtomicStore: atomic store with library call");
987 return;
988 }
989
990 // Okay, we're doing this natively.
991 mlir::Value valueToStore = atomics.convertRValueToInt(rvalue);
992
993 // Do the atomic store.
994 Address addr = atomics.getAtomicAddress();
995 if (mlir::Value value = atomics.getScalarRValValueOrNull(rvalue)) {
996 if (shouldCastToInt(value.getType(), /*CmpXchg=*/false)) {
997 addr = atomics.castToAtomicIntPointer(addr);
998 valueToStore =
999 builder.createIntCast(valueToStore, addr.getElementType());
1000 }
1001 }
1002 cir::StoreOp store = builder.createStore(loc, valueToStore, addr);
1003
1004 // Initializations don't need to be atomic.
1005 if (!isInit) {
1007 store.setMemOrder(order);
1008 }
1009
1010 // Other decoration.
1011 if (isVolatile)
1012 store.setIsVolatile(true);
1013
1015 return;
1016 }
1017
1018 cgm.errorNYI(loc, "emitAtomicStore: non-simple atomic lvalue");
1020}
1021
1023 AtomicInfo atomics(*this, dest, getLoc(init->getSourceRange()));
1024
1025 switch (atomics.getEvaluationKind()) {
1026 case cir::TEK_Scalar: {
1027 mlir::Value value = emitScalarExpr(init);
1028 atomics.emitCopyIntoMemory(RValue::get(value));
1029 return;
1030 }
1031
1032 case cir::TEK_Complex: {
1033 mlir::Value value = emitComplexExpr(init);
1034 atomics.emitCopyIntoMemory(RValue::get(value));
1035 return;
1036 }
1037
1038 case cir::TEK_Aggregate: {
1039 // Fix up the destination if the initializer isn't an expression
1040 // of atomic type.
1041 bool zeroed = false;
1042 if (!init->getType()->isAtomicType()) {
1043 zeroed = atomics.emitMemSetZeroIfNecessary();
1044 dest = atomics.projectValue();
1045 }
1046
1047 // Evaluate the expression directly into the destination.
1053
1054 emitAggExpr(init, slot);
1055 return;
1056 }
1057 }
1058
1059 llvm_unreachable("bad evaluation kind");
1060}
static bool shouldCastToInt(mlir::Type valueTy, bool cmpxchg)
Return true if.
static void emitAtomicCmpXchg(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, uint64_t size, cir::MemOrder successOrder, cir::MemOrder failureOrder)
static Address emitValToTemp(CIRGenFunction &cgf, Expr *e)
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *failureOrderExpr, int64_t size, cir::MemOrder order)
static void emitAtomicExprWithDynamicMemOrder(CIRGenFunction &cgf, mlir::Value order, AtomicExpr *e, Address dest, Address ptr, Address val1, Address val2, Expr *isWeakExpr, Expr *orderFailExpr, uint64_t size, bool isStore, bool isLoad)
static void emitMemOrderCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc, mlir::Type orderType, llvm::ArrayRef< cir::MemOrder > orders)
static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad)
static void emitAtomicCmpXchgFailureSet(CIRGenFunction &cgf, AtomicExpr *e, bool isWeak, Address dest, Address ptr, Address val1, Address val2, Expr *failureOrderExpr, uint64_t size, cir::MemOrder successOrder)
static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
static void emitMemOrderDefaultCaseLabel(CIRGenBuilderTy &builder, mlir::Location loc)
mlir::Value createNot(mlir::Value value)
mlir::Value createPtrBitcast(mlir::Value src, mlir::Type newPointeeTy)
cir::YieldOp createYield(mlir::Location loc, mlir::ValueRange value={})
Create a yield operation.
cir::BoolType getBoolTy()
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
llvm::TypeSize getTypeStoreSize(mlir::Type ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
APSInt & getInt()
Definition APValue.h:489
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
int64_t toBits(CharUnits CharSize) const
Convert a size in characters to a size in bits.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:891
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition Expr.h:6814
Expr * getVal2() const
Definition Expr.h:6865
Expr * getOrder() const
Definition Expr.h:6848
bool isCmpXChg() const
Definition Expr.h:6898
AtomicOp getOp() const
Definition Expr.h:6877
Expr * getVal1() const
Definition Expr.h:6855
Expr * getPtr() const
Definition Expr.h:6845
Expr * getWeak() const
Definition Expr.h:6871
Expr * getOrderFail() const
Definition Expr.h:6861
bool isVolatile() const
Definition Expr.h:6894
Address withPointer(mlir::Value newPtr) const
Return address with different pointer, but same element type and alignment.
Definition Address.h:76
mlir::Value getPointer() const
Definition Address.h:90
mlir::Type getElementType() const
Definition Address.h:117
static Address invalid()
Definition Address.h:69
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
Definition Address.h:130
bool isValid() const
Definition Address.h:70
An aggregate value slot.
static AggValueSlot forLValue(const LValue &LV, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
cir::IntType getUIntNTy(int n)
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile=false, mlir::IntegerAttr align={}, cir::MemOrderAttr order={})
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitAtomicExpr(AtomicExpr *e)
mlir::Type convertTypeForMem(QualType t)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
void emitAtomicInit(Expr *init, LValue dest)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
clang::ASTContext & getContext() const
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const cir::CIRDataLayout getDataLayout() const
Address getAddress() const
clang::QualType getType() const
mlir::Value getPointer() const
bool isVolatileQualified() const
bool isSimple() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
Address getAggregateAddress() const
Return the value of the address of the aggregate.
Definition CIRGenValue.h:69
bool isAggregate() const
Definition CIRGenValue.h:51
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
bool isScalar() const
Definition CIRGenValue.h:49
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsBooleanCondition - Return true if this is a constant which we can fold and convert to a boo...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
A (possibly-)qualified type.
Definition TypeBase.h:937
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8318
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition TypeBase.h:8372
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
virtual bool hasBuiltinAtomic(uint64_t AtomicSizeInBits, uint64_t AlignmentInBits) const
Returns true if the given target supports lock-free atomic operations at the specified width and alig...
Definition TargetInfo.h:862
bool isVoidType() const
Definition TypeBase.h:8871
bool isPointerType() const
Definition TypeBase.h:8515
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isAtomicType() const
Definition TypeBase.h:8697
bool isFloatingType() const
Definition Type.cpp:2304
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9091
bool isValidCIRAtomicOrderingCABI(Int value)
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
unsigned long uint64_t
static bool atomicInfoGetAtomicPointer()
static bool aggValueSlotGC()
static bool atomicScope()
static bool opLoadStoreAtomic()
static bool opLoadStoreTbaa()
static bool atomicUseLibCall()
static bool atomicOpenMP()
static bool atomicMicrosoftVolatile()
static bool atomicSyncScopeID()
static bool atomicInfoGetAtomicAddress()
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647