clang 22.0.0git
CIRGenAtomic.cpp
Go to the documentation of this file.
1//===--- CIRGenAtomic.cpp - Emit CIR for atomic operations ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the code for emitting atomic operations.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CIRGenFunction.h"
15
16using namespace clang;
17using namespace clang::CIRGen;
18using namespace cir;
19
20namespace {
21class AtomicInfo {
22 CIRGenFunction &cgf;
23 QualType atomicTy;
24 QualType valueTy;
25 uint64_t atomicSizeInBits = 0;
26 uint64_t valueSizeInBits = 0;
27 CharUnits atomicAlign;
28 CharUnits valueAlign;
29 TypeEvaluationKind evaluationKind = cir::TEK_Scalar;
30 LValue lvalue;
31 mlir::Location loc;
32
33public:
34 AtomicInfo(CIRGenFunction &cgf, LValue &lvalue, mlir::Location loc)
35 : cgf(cgf), loc(loc) {
36 assert(!lvalue.isGlobalReg());
37 ASTContext &ctx = cgf.getContext();
38 if (lvalue.isSimple()) {
39 atomicTy = lvalue.getType();
40 if (auto *ty = atomicTy->getAs<AtomicType>())
41 valueTy = ty->getValueType();
42 else
43 valueTy = atomicTy;
44 evaluationKind = cgf.getEvaluationKind(valueTy);
45
46 TypeInfo valueTypeInfo = ctx.getTypeInfo(valueTy);
47 TypeInfo atomicTypeInfo = ctx.getTypeInfo(atomicTy);
48 uint64_t valueAlignInBits = valueTypeInfo.Align;
49 uint64_t atomicAlignInBits = atomicTypeInfo.Align;
50 valueSizeInBits = valueTypeInfo.Width;
51 atomicSizeInBits = atomicTypeInfo.Width;
52 assert(valueSizeInBits <= atomicSizeInBits);
53 assert(valueAlignInBits <= atomicAlignInBits);
54
55 atomicAlign = ctx.toCharUnitsFromBits(atomicAlignInBits);
56 valueAlign = ctx.toCharUnitsFromBits(valueAlignInBits);
57 if (lvalue.getAlignment().isZero())
58 lvalue.setAlignment(atomicAlign);
59
60 this->lvalue = lvalue;
61 } else {
63 cgf.cgm.errorNYI(loc, "AtomicInfo: non-simple lvalue");
64 }
65
67 }
68
69 QualType getValueType() const { return valueTy; }
70 CharUnits getAtomicAlignment() const { return atomicAlign; }
71 TypeEvaluationKind getEvaluationKind() const { return evaluationKind; }
72 mlir::Value getAtomicPointer() const {
73 if (lvalue.isSimple())
74 return lvalue.getPointer();
76 return nullptr;
77 }
78 Address getAtomicAddress() const {
79 mlir::Type elemTy;
80 if (lvalue.isSimple()) {
81 elemTy = lvalue.getAddress().getElementType();
82 } else {
84 cgf.cgm.errorNYI(loc, "AtomicInfo::getAtomicAddress: non-simple lvalue");
85 }
86 return Address(getAtomicPointer(), elemTy, getAtomicAlignment());
87 }
88
89 /// Is the atomic size larger than the underlying value type?
90 ///
91 /// Note that the absence of padding does not mean that atomic
92 /// objects are completely interchangeable with non-atomic
93 /// objects: we might have promoted the alignment of a type
94 /// without making it bigger.
95 bool hasPadding() const { return (valueSizeInBits != atomicSizeInBits); }
96
97 bool emitMemSetZeroIfNecessary() const;
98
99 /// Cast the given pointer to an integer pointer suitable for atomic
100 /// operations on the source.
101 Address castToAtomicIntPointer(Address addr) const;
102
103 /// If addr is compatible with the iN that will be used for an atomic
104 /// operation, bitcast it. Otherwise, create a temporary that is suitable and
105 /// copy the value across.
106 Address convertToAtomicIntPointer(Address addr) const;
107
108 /// Copy an atomic r-value into atomic-layout memory.
109 void emitCopyIntoMemory(RValue rvalue) const;
110
111 /// Project an l-value down to the value field.
112 LValue projectValue() const {
113 assert(lvalue.isSimple());
114 Address addr = getAtomicAddress();
115 if (hasPadding()) {
116 cgf.cgm.errorNYI(loc, "AtomicInfo::projectValue: padding");
117 }
118
120 return LValue::makeAddr(addr, getValueType(), lvalue.getBaseInfo());
121 }
122
123 /// Creates temp alloca for intermediate operations on atomic value.
124 Address createTempAlloca() const;
125
126private:
127 bool requiresMemSetZero(mlir::Type ty) const;
128};
129} // namespace
130
131// This function emits any expression (scalar, complex, or aggregate)
132// into a temporary alloca.
134 Address declPtr = cgf.createMemTemp(
135 e->getType(), cgf.getLoc(e->getSourceRange()), ".atomictmp");
136 cgf.emitAnyExprToMem(e, declPtr, e->getType().getQualifiers(),
137 /*Init*/ true);
138 return declPtr;
139}
140
141/// Does a store of the given IR type modify the full expected width?
142static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty,
143 uint64_t expectedSize) {
144 return cgm.getDataLayout().getTypeStoreSize(ty) * 8 == expectedSize;
145}
146
147/// Does the atomic type require memsetting to zero before initialization?
148///
149/// The IR type is provided as a way of making certain queries faster.
150bool AtomicInfo::requiresMemSetZero(mlir::Type ty) const {
151 // If the atomic type has size padding, we definitely need a memset.
152 if (hasPadding())
153 return true;
154
155 // Otherwise, do some simple heuristics to try to avoid it:
156 switch (getEvaluationKind()) {
157 // For scalars and complexes, check whether the store size of the
158 // type uses the full size.
159 case cir::TEK_Scalar:
160 return !isFullSizeType(cgf.cgm, ty, atomicSizeInBits);
161 case cir::TEK_Complex:
162 cgf.cgm.errorNYI(loc, "AtomicInfo::requiresMemSetZero: complex type");
163 return false;
164
165 // Padding in structs has an undefined bit pattern. User beware.
167 return false;
168 }
169 llvm_unreachable("bad evaluation kind");
170}
171
172Address AtomicInfo::convertToAtomicIntPointer(Address addr) const {
173 mlir::Type ty = addr.getElementType();
174 uint64_t sourceSizeInBits = cgf.cgm.getDataLayout().getTypeSizeInBits(ty);
175 if (sourceSizeInBits != atomicSizeInBits) {
176 cgf.cgm.errorNYI(
177 loc,
178 "AtomicInfo::convertToAtomicIntPointer: convert through temp alloca");
179 }
180
181 return castToAtomicIntPointer(addr);
182}
183
184Address AtomicInfo::createTempAlloca() const {
185 Address tempAlloca = cgf.createMemTemp(
186 (lvalue.isBitField() && valueSizeInBits > atomicSizeInBits) ? valueTy
187 : atomicTy,
188 getAtomicAlignment(), loc, "atomic-temp");
189
190 // Cast to pointer to value type for bitfields.
191 if (lvalue.isBitField()) {
192 cgf.cgm.errorNYI(loc, "AtomicInfo::createTempAlloca: bitfield lvalue");
193 }
194
195 return tempAlloca;
196}
197
198Address AtomicInfo::castToAtomicIntPointer(Address addr) const {
199 auto intTy = mlir::dyn_cast<cir::IntType>(addr.getElementType());
200 // Don't bother with int casts if the integer size is the same.
201 if (intTy && intTy.getWidth() == atomicSizeInBits)
202 return addr;
203 auto ty = cgf.getBuilder().getUIntNTy(atomicSizeInBits);
204 return addr.withElementType(cgf.getBuilder(), ty);
205}
206
207bool AtomicInfo::emitMemSetZeroIfNecessary() const {
208 assert(lvalue.isSimple());
209 Address addr = lvalue.getAddress();
210 if (!requiresMemSetZero(addr.getElementType()))
211 return false;
212
213 cgf.cgm.errorNYI(loc,
214 "AtomicInfo::emitMemSetZeroIfNecessary: emit memset zero");
215 return false;
216}
217
218/// Copy an r-value into memory as part of storing to an atomic type.
219/// This needs to create a bit-pattern suitable for atomic operations.
220void AtomicInfo::emitCopyIntoMemory(RValue rvalue) const {
221 assert(lvalue.isSimple());
222
223 // If we have an r-value, the rvalue should be of the atomic type,
224 // which means that the caller is responsible for having zeroed
225 // any padding. Just do an aggregate copy of that type.
226 if (rvalue.isAggregate()) {
227 cgf.cgm.errorNYI("copying aggregate into atomic lvalue");
228 return;
229 }
230
231 // Okay, otherwise we're copying stuff.
232
233 // Zero out the buffer if necessary.
234 emitMemSetZeroIfNecessary();
235
236 // Drill past the padding if present.
237 LValue tempLValue = projectValue();
238
239 // Okay, store the rvalue in.
240 if (rvalue.isScalar()) {
241 cgf.emitStoreOfScalar(rvalue.getValue(), tempLValue, /*isInit=*/true);
242 } else {
243 cgf.cgm.errorNYI("copying complex into atomic lvalue");
244 }
245}
246
248 Address ptr, Address val1, uint64_t size,
249 cir::MemOrder order) {
250 std::unique_ptr<AtomicScopeModel> scopeModel = expr->getScopeModel();
251 if (scopeModel) {
253 cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: atomic scope");
254 return;
255 }
256
258
259 CIRGenBuilderTy &builder = cgf.getBuilder();
260 mlir::Location loc = cgf.getLoc(expr->getSourceRange());
261 auto orderAttr = cir::MemOrderAttr::get(builder.getContext(), order);
262
263 switch (expr->getOp()) {
264 case AtomicExpr::AO__c11_atomic_init:
265 llvm_unreachable("already handled!");
266
267 case AtomicExpr::AO__c11_atomic_load:
268 case AtomicExpr::AO__atomic_load_n:
269 case AtomicExpr::AO__atomic_load: {
270 cir::LoadOp load =
271 builder.createLoad(loc, ptr, /*isVolatile=*/expr->isVolatile());
272
274
275 load->setAttr("mem_order", orderAttr);
276
277 builder.createStore(loc, load->getResult(0), dest);
278 return;
279 }
280
281 case AtomicExpr::AO__c11_atomic_store:
282 case AtomicExpr::AO__atomic_store_n:
283 case AtomicExpr::AO__atomic_store: {
284 cir::LoadOp loadVal1 = builder.createLoad(loc, val1);
285
287
288 builder.createStore(loc, loadVal1, ptr, expr->isVolatile(),
289 /*align=*/mlir::IntegerAttr{}, orderAttr);
290 return;
291 }
292
293 case AtomicExpr::AO__opencl_atomic_init:
294
295 case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
296 case AtomicExpr::AO__hip_atomic_compare_exchange_strong:
297 case AtomicExpr::AO__opencl_atomic_compare_exchange_strong:
298
299 case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
300 case AtomicExpr::AO__opencl_atomic_compare_exchange_weak:
301 case AtomicExpr::AO__hip_atomic_compare_exchange_weak:
302
303 case AtomicExpr::AO__atomic_compare_exchange:
304 case AtomicExpr::AO__atomic_compare_exchange_n:
305 case AtomicExpr::AO__scoped_atomic_compare_exchange:
306 case AtomicExpr::AO__scoped_atomic_compare_exchange_n:
307
308 case AtomicExpr::AO__opencl_atomic_load:
309 case AtomicExpr::AO__hip_atomic_load:
310 case AtomicExpr::AO__scoped_atomic_load_n:
311 case AtomicExpr::AO__scoped_atomic_load:
312
313 case AtomicExpr::AO__opencl_atomic_store:
314 case AtomicExpr::AO__hip_atomic_store:
315 case AtomicExpr::AO__scoped_atomic_store:
316 case AtomicExpr::AO__scoped_atomic_store_n:
317
318 case AtomicExpr::AO__c11_atomic_exchange:
319 case AtomicExpr::AO__hip_atomic_exchange:
320 case AtomicExpr::AO__opencl_atomic_exchange:
321 case AtomicExpr::AO__atomic_exchange_n:
322 case AtomicExpr::AO__atomic_exchange:
323 case AtomicExpr::AO__scoped_atomic_exchange_n:
324 case AtomicExpr::AO__scoped_atomic_exchange:
325
326 case AtomicExpr::AO__atomic_add_fetch:
327 case AtomicExpr::AO__scoped_atomic_add_fetch:
328
329 case AtomicExpr::AO__c11_atomic_fetch_add:
330 case AtomicExpr::AO__hip_atomic_fetch_add:
331 case AtomicExpr::AO__opencl_atomic_fetch_add:
332 case AtomicExpr::AO__atomic_fetch_add:
333 case AtomicExpr::AO__scoped_atomic_fetch_add:
334
335 case AtomicExpr::AO__atomic_sub_fetch:
336 case AtomicExpr::AO__scoped_atomic_sub_fetch:
337
338 case AtomicExpr::AO__c11_atomic_fetch_sub:
339 case AtomicExpr::AO__hip_atomic_fetch_sub:
340 case AtomicExpr::AO__opencl_atomic_fetch_sub:
341 case AtomicExpr::AO__atomic_fetch_sub:
342 case AtomicExpr::AO__scoped_atomic_fetch_sub:
343
344 case AtomicExpr::AO__atomic_min_fetch:
345 case AtomicExpr::AO__scoped_atomic_min_fetch:
346
347 case AtomicExpr::AO__c11_atomic_fetch_min:
348 case AtomicExpr::AO__hip_atomic_fetch_min:
349 case AtomicExpr::AO__opencl_atomic_fetch_min:
350 case AtomicExpr::AO__atomic_fetch_min:
351 case AtomicExpr::AO__scoped_atomic_fetch_min:
352
353 case AtomicExpr::AO__atomic_max_fetch:
354 case AtomicExpr::AO__scoped_atomic_max_fetch:
355
356 case AtomicExpr::AO__c11_atomic_fetch_max:
357 case AtomicExpr::AO__hip_atomic_fetch_max:
358 case AtomicExpr::AO__opencl_atomic_fetch_max:
359 case AtomicExpr::AO__atomic_fetch_max:
360 case AtomicExpr::AO__scoped_atomic_fetch_max:
361
362 case AtomicExpr::AO__atomic_and_fetch:
363 case AtomicExpr::AO__scoped_atomic_and_fetch:
364
365 case AtomicExpr::AO__c11_atomic_fetch_and:
366 case AtomicExpr::AO__hip_atomic_fetch_and:
367 case AtomicExpr::AO__opencl_atomic_fetch_and:
368 case AtomicExpr::AO__atomic_fetch_and:
369 case AtomicExpr::AO__scoped_atomic_fetch_and:
370
371 case AtomicExpr::AO__atomic_or_fetch:
372 case AtomicExpr::AO__scoped_atomic_or_fetch:
373
374 case AtomicExpr::AO__c11_atomic_fetch_or:
375 case AtomicExpr::AO__hip_atomic_fetch_or:
376 case AtomicExpr::AO__opencl_atomic_fetch_or:
377 case AtomicExpr::AO__atomic_fetch_or:
378 case AtomicExpr::AO__scoped_atomic_fetch_or:
379
380 case AtomicExpr::AO__atomic_xor_fetch:
381 case AtomicExpr::AO__scoped_atomic_xor_fetch:
382
383 case AtomicExpr::AO__c11_atomic_fetch_xor:
384 case AtomicExpr::AO__hip_atomic_fetch_xor:
385 case AtomicExpr::AO__opencl_atomic_fetch_xor:
386 case AtomicExpr::AO__atomic_fetch_xor:
387 case AtomicExpr::AO__scoped_atomic_fetch_xor:
388
389 case AtomicExpr::AO__atomic_nand_fetch:
390 case AtomicExpr::AO__scoped_atomic_nand_fetch:
391
392 case AtomicExpr::AO__c11_atomic_fetch_nand:
393 case AtomicExpr::AO__atomic_fetch_nand:
394 case AtomicExpr::AO__scoped_atomic_fetch_nand:
395
396 case AtomicExpr::AO__atomic_test_and_set:
397
398 case AtomicExpr::AO__atomic_clear:
399 cgf.cgm.errorNYI(expr->getSourceRange(), "emitAtomicOp: expr op NYI");
400 break;
401 }
402}
403
404static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad) {
406 return false;
407 auto memOrder = static_cast<cir::MemOrder>(order);
408 if (isStore)
409 return memOrder != cir::MemOrder::Consume &&
410 memOrder != cir::MemOrder::Acquire &&
411 memOrder != cir::MemOrder::AcquireRelease;
412 if (isLoad)
413 return memOrder != cir::MemOrder::Release &&
414 memOrder != cir::MemOrder::AcquireRelease;
415 return true;
416}
417
419 QualType atomicTy = e->getPtr()->getType()->getPointeeType();
420 QualType memTy = atomicTy;
421 if (const auto *ty = atomicTy->getAs<AtomicType>())
422 memTy = ty->getValueType();
423
424 Address val1 = Address::invalid();
425 Address dest = Address::invalid();
427
429 if (e->getOp() == AtomicExpr::AO__c11_atomic_init) {
430 LValue lvalue = makeAddrLValue(ptr, atomicTy);
431 emitAtomicInit(e->getVal1(), lvalue);
432 return RValue::get(nullptr);
433 }
434
435 TypeInfoChars typeInfo = getContext().getTypeInfoInChars(atomicTy);
436 uint64_t size = typeInfo.Width.getQuantity();
437
438 Expr::EvalResult orderConst;
439 mlir::Value order;
440 if (!e->getOrder()->EvaluateAsInt(orderConst, getContext()))
441 order = emitScalarExpr(e->getOrder());
442
443 bool shouldCastToIntPtrTy = true;
444
445 switch (e->getOp()) {
446 default:
447 cgm.errorNYI(e->getSourceRange(), "atomic op NYI");
448 return RValue::get(nullptr);
449
450 case AtomicExpr::AO__c11_atomic_init:
451 llvm_unreachable("already handled above with emitAtomicInit");
452
453 case AtomicExpr::AO__atomic_load_n:
454 case AtomicExpr::AO__c11_atomic_load:
455 break;
456
457 case AtomicExpr::AO__atomic_load:
459 break;
460
461 case AtomicExpr::AO__atomic_store:
463 break;
464
465 case AtomicExpr::AO__atomic_store_n:
466 case AtomicExpr::AO__c11_atomic_store:
467 val1 = emitValToTemp(*this, e->getVal1());
468 break;
469 }
470
471 QualType resultTy = e->getType().getUnqualifiedType();
472
473 // The inlined atomics only function on iN types, where N is a power of 2. We
474 // need to make sure (via temporaries if necessary) that all incoming values
475 // are compatible.
476 LValue atomicValue = makeAddrLValue(ptr, atomicTy);
477 AtomicInfo atomics(*this, atomicValue, getLoc(e->getSourceRange()));
478
479 if (shouldCastToIntPtrTy) {
480 ptr = atomics.castToAtomicIntPointer(ptr);
481 if (val1.isValid())
482 val1 = atomics.convertToAtomicIntPointer(val1);
483 }
484 if (dest.isValid()) {
485 if (shouldCastToIntPtrTy)
486 dest = atomics.castToAtomicIntPointer(dest);
487 } else if (!resultTy->isVoidType()) {
488 dest = atomics.createTempAlloca();
489 if (shouldCastToIntPtrTy)
490 dest = atomics.castToAtomicIntPointer(dest);
491 }
492
493 bool powerOf2Size = (size & (size - 1)) == 0;
494 bool useLibCall = !powerOf2Size || (size > 16);
495
496 // For atomics larger than 16 bytes, emit a libcall from the frontend. This
497 // avoids the overhead of dealing with excessively-large value types in IR.
498 // Non-power-of-2 values also lower to libcall here, as they are not currently
499 // permitted in IR instructions (although that constraint could be relaxed in
500 // the future). For other cases where a libcall is required on a given
501 // platform, we let the backend handle it (this includes handling for all of
502 // the size-optimized libcall variants, which are only valid up to 16 bytes.)
503 //
504 // See: https://llvm.org/docs/Atomics.html#libcalls-atomic
505 if (useLibCall) {
507 cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: emit atomic lib call");
508 return RValue::get(nullptr);
509 }
510
511 bool isStore = e->getOp() == AtomicExpr::AO__c11_atomic_store ||
512 e->getOp() == AtomicExpr::AO__opencl_atomic_store ||
513 e->getOp() == AtomicExpr::AO__hip_atomic_store ||
514 e->getOp() == AtomicExpr::AO__atomic_store ||
515 e->getOp() == AtomicExpr::AO__atomic_store_n ||
516 e->getOp() == AtomicExpr::AO__scoped_atomic_store ||
517 e->getOp() == AtomicExpr::AO__scoped_atomic_store_n ||
518 e->getOp() == AtomicExpr::AO__atomic_clear;
519 bool isLoad = e->getOp() == AtomicExpr::AO__c11_atomic_load ||
520 e->getOp() == AtomicExpr::AO__opencl_atomic_load ||
521 e->getOp() == AtomicExpr::AO__hip_atomic_load ||
522 e->getOp() == AtomicExpr::AO__atomic_load ||
523 e->getOp() == AtomicExpr::AO__atomic_load_n ||
524 e->getOp() == AtomicExpr::AO__scoped_atomic_load ||
525 e->getOp() == AtomicExpr::AO__scoped_atomic_load_n;
526
527 if (!order) {
528 // We have evaluated the memory order as an integer constant in orderConst.
529 // We should not ever get to a case where the ordering isn't a valid CABI
530 // value, but it's hard to enforce that in general.
531 uint64_t ord = orderConst.Val.getInt().getZExtValue();
532 if (isMemOrderValid(ord, isStore, isLoad))
533 emitAtomicOp(*this, e, dest, ptr, val1, size,
534 static_cast<cir::MemOrder>(ord));
535 } else {
537 cgm.errorNYI(e->getSourceRange(), "emitAtomicExpr: dynamic memory order");
538 return RValue::get(nullptr);
539 }
540
541 if (resultTy->isVoidType())
542 return RValue::get(nullptr);
543
544 return convertTempToRValue(
545 dest.withElementType(builder, convertTypeForMem(resultTy)), resultTy,
546 e->getExprLoc());
547}
548
550 AtomicInfo atomics(*this, dest, getLoc(init->getSourceRange()));
551
552 switch (atomics.getEvaluationKind()) {
553 case cir::TEK_Scalar: {
554 mlir::Value value = emitScalarExpr(init);
555 atomics.emitCopyIntoMemory(RValue::get(value));
556 return;
557 }
558
559 case cir::TEK_Complex:
560 cgm.errorNYI(init->getSourceRange(), "emitAtomicInit: complex type");
561 return;
562
564 cgm.errorNYI(init->getSourceRange(), "emitAtomicInit: aggregate type");
565 return;
566 }
567
568 llvm_unreachable("bad evaluation kind");
569}
static Address emitValToTemp(CIRGenFunction &cgf, Expr *e)
static void emitAtomicOp(CIRGenFunction &cgf, AtomicExpr *expr, Address dest, Address ptr, Address val1, uint64_t size, cir::MemOrder order)
static bool isMemOrderValid(uint64_t order, bool isStore, bool isLoad)
static bool isFullSizeType(CIRGenModule &cgm, mlir::Type ty, uint64_t expectedSize)
Does a store of the given IR type modify the full expected width?
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
llvm::TypeSize getTypeStoreSize(mlir::Type ty) const
Returns the maximum number of bytes that may be overwritten by storing the specified type.
Definition: CIRDataLayout.h:52
APSInt & getInt()
Definition: APValue.h:489
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:188
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
TypeInfoChars getTypeInfoInChars(const Type *T) const
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*, __atomic_load,...
Definition: Expr.h:6816
Expr * getOrder() const
Definition: Expr.h:6850
AtomicOp getOp() const
Definition: Expr.h:6879
Expr * getVal1() const
Definition: Expr.h:6857
Expr * getPtr() const
Definition: Expr.h:6847
mlir::Type getElementType() const
Definition: Address.h:101
static Address invalid()
Definition: Address.h:66
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
bool isValid() const
Definition: Address.h:67
cir::LoadOp createLoad(mlir::Location loc, Address addr, bool isVolatile=false)
cir::IntType getUIntNTy(int n)
cir::StoreOp createStore(mlir::Location loc, mlir::Value val, Address dst, bool isVolatile=false, mlir::IntegerAttr align={}, cir::MemOrderAttr order={})
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
Definition: CIRGenExpr.cpp:67
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, bool isInit=false, bool isNontemporal=false)
Definition: CIRGenExpr.cpp:312
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
RValue emitAtomicExpr(AtomicExpr *e)
mlir::Type convertTypeForMem(QualType t)
mlir::Value emitScalarExpr(const clang::Expr *e)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
void emitAtomicInit(Expr *init, LValue dest)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
clang::ASTContext & getContext() const
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
This class organizes the cross-function state that is used while generating CIR code.
Definition: CIRGenModule.h:56
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
const cir::CIRDataLayout getDataLayout() const
Definition: CIRGenModule.h:112
Address getAddress() const
Definition: CIRGenValue.h:211
clang::QualType getType() const
Definition: CIRGenValue.h:202
mlir::Value getPointer() const
Definition: CIRGenValue.h:204
bool isBitField() const
Definition: CIRGenValue.h:192
void setAlignment(clang::CharUnits a)
Definition: CIRGenValue.h:209
clang::CharUnits getAlignment() const
Definition: CIRGenValue.h:206
LValueBaseInfo getBaseInfo() const
Definition: CIRGenValue.h:226
bool isGlobalReg() const
Definition: CIRGenValue.h:193
bool isSimple() const
Definition: CIRGenValue.h:190
This trivial value class is used to represent the result of an expression that is evaluated.
Definition: CIRGenValue.h:33
bool isAggregate() const
Definition: CIRGenValue.h:51
static RValue get(mlir::Value v)
Definition: CIRGenValue.h:82
mlir::Value getValue() const
Return the value of this scalar value.
Definition: CIRGenValue.h:56
bool isScalar() const
Definition: CIRGenValue.h:49
CharUnits - This is an opaque type for sizes expressed in character units.
Definition: CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition: CharUnits.h:122
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:185
This represents one expression.
Definition: Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition: Expr.cpp:273
QualType getType() const
Definition: Expr.h:144
A (possibly-)qualified type.
Definition: TypeBase.h:937
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition: TypeBase.h:8383
QualType getUnqualifiedType() const
Retrieve the unqualified variant of the given type, removing as little sugar as possible.
Definition: TypeBase.h:8437
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition: Stmt.cpp:334
bool isVoidType() const
Definition: TypeBase.h:8936
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition: Type.cpp:752
const T * getAs() const
Member-template getAs<specific type>'.
Definition: TypeBase.h:9159
Definition: ABIArgInfo.h:22
bool isValidCIRAtomicOrderingCABI(Int value)
Definition: CIROpsEnums.h:123
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
unsigned long uint64_t
static bool atomicInfoGetAtomicPointer()
static bool atomicExpr()
static bool atomicInfo()
static bool atomicScope()
static bool atomicUseLibCall()
static bool atomicSyncScopeID()
static bool atomicInfoGetAtomicAddress()
EvalResult is a struct with detailed info about an evaluated expression.
Definition: Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition: Expr.h:647
uint64_t Width
Definition: ASTContext.h:159
unsigned Align
Definition: ASTContext.h:160