clang 22.0.0git
CIRGenBuiltin.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as CIR or a function call to be
10// later resolved.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CIRGenCall.h"
15#include "CIRGenFunction.h"
16#include "CIRGenModule.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/BuiltinAttributes.h"
19#include "mlir/IR/Value.h"
20#include "mlir/Support/LLVM.h"
21#include "clang/AST/DeclBase.h"
22#include "clang/AST/Expr.h"
28#include "llvm/Support/ErrorHandling.h"
29
30using namespace clang;
31using namespace clang::CIRGen;
32using namespace llvm;
33
35 const CallExpr *e, mlir::Operation *calleeValue) {
36 CIRGenCallee callee = CIRGenCallee::forDirect(calleeValue, GlobalDecl(fd));
37 return cgf.emitCall(e->getCallee()->getType(), callee, e, ReturnValueSlot());
38}
39
40template <typename Op>
42 bool poisonZero = false) {
44
45 mlir::Value arg = cgf.emitScalarExpr(e->getArg(0));
46 CIRGenBuilderTy &builder = cgf.getBuilder();
47
48 Op op;
49 if constexpr (std::is_same_v<Op, cir::BitClzOp> ||
50 std::is_same_v<Op, cir::BitCtzOp>)
51 op = Op::create(builder, cgf.getLoc(e->getSourceRange()), arg, poisonZero);
52 else
53 op = Op::create(builder, cgf.getLoc(e->getSourceRange()), arg);
54
55 mlir::Value result = op.getResult();
56 mlir::Type exprTy = cgf.convertType(e->getType());
57 if (exprTy != result.getType())
58 result = builder.createIntCast(result, exprTy);
59
60 return RValue::get(result);
61}
62
64 cir::SyncScopeKind syncScope) {
65 CIRGenBuilderTy &builder = cgf.getBuilder();
66 mlir::Value orderingVal = cgf.emitScalarExpr(expr->getArg(0));
67
68 auto constOrdering = orderingVal.getDefiningOp<cir::ConstantOp>();
69
70 if (!constOrdering) {
71 // TODO(cir): Emit code to switch on `orderingVal`,
72 // and creating the fence op for valid values.
73 cgf.cgm.errorNYI("Variable atomic fence ordering");
74 return;
75 }
76
77 auto constOrderingAttr = constOrdering.getValueAttr<cir::IntAttr>();
78 assert(constOrderingAttr && "Expected integer constant for ordering");
79
80 auto ordering = static_cast<cir::MemOrder>(constOrderingAttr.getUInt());
81
82 cir::AtomicFenceOp::create(
83 builder, cgf.getLoc(expr->getSourceRange()), ordering,
84 cir::SyncScopeKindAttr::get(&cgf.getMLIRContext(), syncScope));
85}
86
87namespace {
88struct WidthAndSignedness {
89 unsigned width;
90 bool isSigned;
91};
92} // namespace
93
94static WidthAndSignedness
96 const clang::QualType type) {
97 assert(type->isIntegerType() && "Given type is not an integer.");
98 unsigned width = type->isBooleanType() ? 1
99 : type->isBitIntType() ? astContext.getIntWidth(type)
100 : astContext.getTypeInfo(type).Width;
101 bool isSigned = type->isSignedIntegerType();
102 return {width, isSigned};
103}
104
105// Given one or more integer types, this function produces an integer type that
106// encompasses them: any value in one of the given types could be expressed in
107// the encompassing type.
108static struct WidthAndSignedness
109EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> types) {
110 assert(types.size() > 0 && "Empty list of types.");
111
112 // If any of the given types is signed, we must return a signed type.
113 bool isSigned = llvm::any_of(types, [](const auto &t) { return t.isSigned; });
114
115 // The encompassing type must have a width greater than or equal to the width
116 // of the specified types. Additionally, if the encompassing type is signed,
117 // its width must be strictly greater than the width of any unsigned types
118 // given.
119 unsigned width = 0;
120 for (const auto &type : types)
121 width = std::max(width, type.width + (isSigned && !type.isSigned));
122
123 return {width, isSigned};
124}
125
126RValue CIRGenFunction::emitRotate(const CallExpr *e, bool isRotateLeft) {
127 mlir::Value input = emitScalarExpr(e->getArg(0));
128 mlir::Value amount = emitScalarExpr(e->getArg(1));
129
130 // TODO(cir): MSVC flavor bit rotate builtins use different types for input
131 // and amount, but cir.rotate requires them to have the same type. Cast amount
132 // to the type of input when necessary.
134
135 auto r = cir::RotateOp::create(builder, getLoc(e->getSourceRange()), input,
136 amount, isRotateLeft);
137 return RValue::get(r);
138}
139
140template <class Operation>
142 const CallExpr &e) {
143 mlir::Value arg = cgf.emitScalarExpr(e.getArg(0));
144
147
148 auto call =
149 Operation::create(cgf.getBuilder(), arg.getLoc(), arg.getType(), arg);
150 return RValue::get(call->getResult(0));
151}
152
153template <class Operation>
155 mlir::Value arg = cgf.emitScalarExpr(e.getArg(0));
156 auto call =
157 Operation::create(cgf.getBuilder(), arg.getLoc(), arg.getType(), arg);
158 return RValue::get(call->getResult(0));
159}
160
162 unsigned builtinID) {
163
164 if (cgf.getContext().BuiltinInfo.isLibFunction(builtinID)) {
165 cgf.cgm.errorNYI(
166 e->getSourceRange(),
167 std::string("unimplemented X86 library function builtin call: ") +
168 cgf.getContext().BuiltinInfo.getName(builtinID));
169 } else {
170 cgf.cgm.errorNYI(e->getSourceRange(),
171 std::string("unimplemented X86 builtin call: ") +
172 cgf.getContext().BuiltinInfo.getName(builtinID));
173 }
174
175 return cgf.getUndefRValue(e->getType());
176}
177
179 unsigned builtinID) {
180 assert(builtinID == Builtin::BI__builtin_alloca ||
181 builtinID == Builtin::BI__builtin_alloca_uninitialized ||
182 builtinID == Builtin::BIalloca || builtinID == Builtin::BI_alloca);
183
184 // Get alloca size input
185 mlir::Value size = cgf.emitScalarExpr(e->getArg(0));
186
187 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
188 const TargetInfo &ti = cgf.getContext().getTargetInfo();
189 const CharUnits suitableAlignmentInBytes =
191
192 // Emit the alloca op with type `u8 *` to match the semantics of
193 // `llvm.alloca`. We later bitcast the type to `void *` to match the
194 // semantics of C/C++
195 // FIXME(cir): It may make sense to allow AllocaOp of type `u8` to return a
196 // pointer of type `void *`. This will require a change to the allocaOp
197 // verifier.
198 CIRGenBuilderTy &builder = cgf.getBuilder();
199 mlir::Value allocaAddr = builder.createAlloca(
200 cgf.getLoc(e->getSourceRange()), builder.getUInt8PtrTy(),
201 builder.getUInt8Ty(), "bi_alloca", suitableAlignmentInBytes, size);
202
203 // Initialize the allocated buffer if required.
204 if (builtinID != Builtin::BI__builtin_alloca_uninitialized) {
205 // Initialize the alloca with the given size and alignment according to
206 // the lang opts. Only the trivial non-initialization is supported for
207 // now.
208
209 switch (cgf.getLangOpts().getTrivialAutoVarInit()) {
211 // Nothing to initialize.
212 break;
215 cgf.cgm.errorNYI("trivial auto var init");
216 break;
217 }
218 }
219
220 // An alloca will always return a pointer to the alloca (stack) address
221 // space. This address space need not be the same as the AST / Language
222 // default (e.g. in C / C++ auto vars are in the generic address space). At
223 // the AST level this is handled within CreateTempAlloca et al., but for the
224 // builtin / dynamic alloca we have to handle it here.
225
229 cgf.cgm.errorNYI(e->getSourceRange(),
230 "Non-default address space for alloca");
231 }
232
233 // Bitcast the alloca to the expected type.
234 return RValue::get(builder.createBitcast(
235 allocaAddr, builder.getVoidPtrTy(cgf.getCIRAllocaAddressSpace())));
236}
237
239 const CallExpr *e,
241 mlir::Location loc = getLoc(e->getSourceRange());
242
243 // See if we can constant fold this builtin. If so, don't emit it at all.
244 // TODO: Extend this handling to all builtin calls that we can constant-fold.
245 Expr::EvalResult result;
246 if (e->isPRValue() && e->EvaluateAsRValue(result, cgm.getASTContext()) &&
247 !result.hasSideEffects()) {
248 if (result.Val.isInt())
249 return RValue::get(builder.getConstInt(loc, result.Val.getInt()));
250 if (result.Val.isFloat()) {
251 // Note: we are using result type of CallExpr to determine the type of
252 // the constant. Classic codegen uses the result value to determine the
253 // type. We feel it should be Ok to use expression type because it is
254 // hard to imagine a builtin function evaluates to a value that
255 // over/underflows its own defined type.
256 mlir::Type type = convertType(e->getType());
257 return RValue::get(builder.getConstFP(loc, type, result.Val.getFloat()));
258 }
259 }
260
261 const FunctionDecl *fd = gd.getDecl()->getAsFunction();
262
264
265 // If the builtin has been declared explicitly with an assembler label,
266 // disable the specialized emitting below. Ideally we should communicate the
267 // rename in IR, or at least avoid generating the intrinsic calls that are
268 // likely to get lowered to the renamed library functions.
269 unsigned builtinIDIfNoAsmLabel = fd->hasAttr<AsmLabelAttr>() ? 0 : builtinID;
270
273
274 switch (builtinIDIfNoAsmLabel) {
275 default:
276 break;
277
278 // C stdarg builtins.
279 case Builtin::BI__builtin_stdarg_start:
280 case Builtin::BI__builtin_va_start:
281 case Builtin::BI__va_start: {
282 mlir::Value vaList = builtinID == Builtin::BI__va_start
283 ? emitScalarExpr(e->getArg(0))
285 mlir::Value count = emitScalarExpr(e->getArg(1));
286 emitVAStart(vaList, count);
287 return {};
288 }
289
290 case Builtin::BI__builtin_va_end:
292 return {};
293 case Builtin::BI__builtin_va_copy: {
294 mlir::Value dstPtr = emitVAListRef(e->getArg(0)).getPointer();
295 mlir::Value srcPtr = emitVAListRef(e->getArg(1)).getPointer();
296 cir::VACopyOp::create(builder, dstPtr.getLoc(), dstPtr, srcPtr);
297 return {};
298 }
299 case Builtin::BIcos:
300 case Builtin::BIcosf:
301 case Builtin::BIcosl:
302 case Builtin::BI__builtin_cos:
303 case Builtin::BI__builtin_cosf:
304 case Builtin::BI__builtin_cosf16:
305 case Builtin::BI__builtin_cosl:
306 case Builtin::BI__builtin_cosf128:
309
310 case Builtin::BIceil:
311 case Builtin::BIceilf:
312 case Builtin::BIceill:
313 case Builtin::BI__builtin_ceil:
314 case Builtin::BI__builtin_ceilf:
315 case Builtin::BI__builtin_ceilf16:
316 case Builtin::BI__builtin_ceill:
317 case Builtin::BI__builtin_ceilf128:
320
321 case Builtin::BIexp:
322 case Builtin::BIexpf:
323 case Builtin::BIexpl:
324 case Builtin::BI__builtin_exp:
325 case Builtin::BI__builtin_expf:
326 case Builtin::BI__builtin_expf16:
327 case Builtin::BI__builtin_expl:
328 case Builtin::BI__builtin_expf128:
331
332 case Builtin::BIexp2:
333 case Builtin::BIexp2f:
334 case Builtin::BIexp2l:
335 case Builtin::BI__builtin_exp2:
336 case Builtin::BI__builtin_exp2f:
337 case Builtin::BI__builtin_exp2f16:
338 case Builtin::BI__builtin_exp2l:
339 case Builtin::BI__builtin_exp2f128:
342
343 case Builtin::BIfabs:
344 case Builtin::BIfabsf:
345 case Builtin::BIfabsl:
346 case Builtin::BI__builtin_fabs:
347 case Builtin::BI__builtin_fabsf:
348 case Builtin::BI__builtin_fabsf16:
349 case Builtin::BI__builtin_fabsl:
350 case Builtin::BI__builtin_fabsf128:
352
353 case Builtin::BIfloor:
354 case Builtin::BIfloorf:
355 case Builtin::BIfloorl:
356 case Builtin::BI__builtin_floor:
357 case Builtin::BI__builtin_floorf:
358 case Builtin::BI__builtin_floorf16:
359 case Builtin::BI__builtin_floorl:
360 case Builtin::BI__builtin_floorf128:
362
363 case Builtin::BI__assume:
364 case Builtin::BI__builtin_assume: {
365 if (e->getArg(0)->HasSideEffects(getContext()))
366 return RValue::get(nullptr);
367
368 mlir::Value argValue = emitCheckedArgForAssume(e->getArg(0));
369 cir::AssumeOp::create(builder, loc, argValue);
370 return RValue::get(nullptr);
371 }
372
373 case Builtin::BI__builtin_assume_separate_storage: {
374 mlir::Value value0 = emitScalarExpr(e->getArg(0));
375 mlir::Value value1 = emitScalarExpr(e->getArg(1));
376 cir::AssumeSepStorageOp::create(builder, loc, value0, value1);
377 return RValue::get(nullptr);
378 }
379
380 case Builtin::BI__builtin_assume_aligned: {
381 const Expr *ptrExpr = e->getArg(0);
382 mlir::Value ptrValue = emitScalarExpr(ptrExpr);
383 mlir::Value offsetValue =
384 (e->getNumArgs() > 2) ? emitScalarExpr(e->getArg(2)) : nullptr;
385
386 std::optional<llvm::APSInt> alignment =
388 assert(alignment.has_value() &&
389 "the second argument to __builtin_assume_aligned must be an "
390 "integral constant expression");
391
392 mlir::Value result =
393 emitAlignmentAssumption(ptrValue, ptrExpr, ptrExpr->getExprLoc(),
394 alignment->getSExtValue(), offsetValue);
395 return RValue::get(result);
396 }
397
398 case Builtin::BI__builtin_complex: {
399 mlir::Value real = emitScalarExpr(e->getArg(0));
400 mlir::Value imag = emitScalarExpr(e->getArg(1));
401 mlir::Value complex = builder.createComplexCreate(loc, real, imag);
402 return RValue::getComplex(complex);
403 }
404
405 case Builtin::BI__builtin_creal:
406 case Builtin::BI__builtin_crealf:
407 case Builtin::BI__builtin_creall:
408 case Builtin::BIcreal:
409 case Builtin::BIcrealf:
410 case Builtin::BIcreall: {
411 mlir::Value complex = emitComplexExpr(e->getArg(0));
412 mlir::Value real = builder.createComplexReal(loc, complex);
413 return RValue::get(real);
414 }
415
416 case Builtin::BI__builtin_cimag:
417 case Builtin::BI__builtin_cimagf:
418 case Builtin::BI__builtin_cimagl:
419 case Builtin::BIcimag:
420 case Builtin::BIcimagf:
421 case Builtin::BIcimagl: {
422 mlir::Value complex = emitComplexExpr(e->getArg(0));
423 mlir::Value imag = builder.createComplexImag(loc, complex);
424 return RValue::get(imag);
425 }
426
427 case Builtin::BI__builtin_conj:
428 case Builtin::BI__builtin_conjf:
429 case Builtin::BI__builtin_conjl:
430 case Builtin::BIconj:
431 case Builtin::BIconjf:
432 case Builtin::BIconjl: {
433 mlir::Value complex = emitComplexExpr(e->getArg(0));
434 mlir::Value conj = builder.createUnaryOp(getLoc(e->getExprLoc()),
435 cir::UnaryOpKind::Not, complex);
436 return RValue::getComplex(conj);
437 }
438
439 case Builtin::BI__builtin_clrsb:
440 case Builtin::BI__builtin_clrsbl:
441 case Builtin::BI__builtin_clrsbll:
442 return emitBuiltinBitOp<cir::BitClrsbOp>(*this, e);
443
444 case Builtin::BI__builtin_ctzs:
445 case Builtin::BI__builtin_ctz:
446 case Builtin::BI__builtin_ctzl:
447 case Builtin::BI__builtin_ctzll:
448 case Builtin::BI__builtin_ctzg:
450 return emitBuiltinBitOp<cir::BitCtzOp>(*this, e, /*poisonZero=*/true);
451
452 case Builtin::BI__builtin_clzs:
453 case Builtin::BI__builtin_clz:
454 case Builtin::BI__builtin_clzl:
455 case Builtin::BI__builtin_clzll:
456 case Builtin::BI__builtin_clzg:
458 return emitBuiltinBitOp<cir::BitClzOp>(*this, e, /*poisonZero=*/true);
459
460 case Builtin::BI__builtin_ffs:
461 case Builtin::BI__builtin_ffsl:
462 case Builtin::BI__builtin_ffsll:
463 return emitBuiltinBitOp<cir::BitFfsOp>(*this, e);
464
465 case Builtin::BI__builtin_parity:
466 case Builtin::BI__builtin_parityl:
467 case Builtin::BI__builtin_parityll:
468 return emitBuiltinBitOp<cir::BitParityOp>(*this, e);
469
470 case Builtin::BI__lzcnt16:
471 case Builtin::BI__lzcnt:
472 case Builtin::BI__lzcnt64:
474 return emitBuiltinBitOp<cir::BitClzOp>(*this, e, /*poisonZero=*/false);
475
476 case Builtin::BI__popcnt16:
477 case Builtin::BI__popcnt:
478 case Builtin::BI__popcnt64:
479 case Builtin::BI__builtin_popcount:
480 case Builtin::BI__builtin_popcountl:
481 case Builtin::BI__builtin_popcountll:
482 case Builtin::BI__builtin_popcountg:
484
485 case Builtin::BI__builtin_expect:
486 case Builtin::BI__builtin_expect_with_probability: {
487 mlir::Value argValue = emitScalarExpr(e->getArg(0));
488 mlir::Value expectedValue = emitScalarExpr(e->getArg(1));
489
490 mlir::FloatAttr probAttr;
491 if (builtinIDIfNoAsmLabel == Builtin::BI__builtin_expect_with_probability) {
492 llvm::APFloat probability(0.0);
493 const Expr *probArg = e->getArg(2);
494 [[maybe_unused]] bool evalSucceeded =
495 probArg->EvaluateAsFloat(probability, cgm.getASTContext());
496 assert(evalSucceeded &&
497 "probability should be able to evaluate as float");
498 bool loseInfo = false; // ignored
499 probability.convert(llvm::APFloat::IEEEdouble(),
500 llvm::RoundingMode::Dynamic, &loseInfo);
501 probAttr = mlir::FloatAttr::get(mlir::Float64Type::get(&getMLIRContext()),
502 probability);
503 }
504
505 auto result = cir::ExpectOp::create(builder, loc, argValue.getType(),
506 argValue, expectedValue, probAttr);
507 return RValue::get(result);
508 }
509
510 case Builtin::BI__builtin_bswap16:
511 case Builtin::BI__builtin_bswap32:
512 case Builtin::BI__builtin_bswap64:
513 case Builtin::BI_byteswap_ushort:
514 case Builtin::BI_byteswap_ulong:
515 case Builtin::BI_byteswap_uint64: {
516 mlir::Value arg = emitScalarExpr(e->getArg(0));
517 return RValue::get(cir::ByteSwapOp::create(builder, loc, arg));
518 }
519
520 case Builtin::BI__builtin_bitreverse8:
521 case Builtin::BI__builtin_bitreverse16:
522 case Builtin::BI__builtin_bitreverse32:
523 case Builtin::BI__builtin_bitreverse64: {
524 mlir::Value arg = emitScalarExpr(e->getArg(0));
525 return RValue::get(cir::BitReverseOp::create(builder, loc, arg));
526 }
527
528 case Builtin::BI__builtin_rotateleft8:
529 case Builtin::BI__builtin_rotateleft16:
530 case Builtin::BI__builtin_rotateleft32:
531 case Builtin::BI__builtin_rotateleft64:
532 return emitRotate(e, /*isRotateLeft=*/true);
533
534 case Builtin::BI__builtin_rotateright8:
535 case Builtin::BI__builtin_rotateright16:
536 case Builtin::BI__builtin_rotateright32:
537 case Builtin::BI__builtin_rotateright64:
538 return emitRotate(e, /*isRotateLeft=*/false);
539
540 case Builtin::BI__builtin_coro_id:
541 case Builtin::BI__builtin_coro_promise:
542 case Builtin::BI__builtin_coro_resume:
543 case Builtin::BI__builtin_coro_noop:
544 case Builtin::BI__builtin_coro_destroy:
545 case Builtin::BI__builtin_coro_done:
546 case Builtin::BI__builtin_coro_alloc:
547 case Builtin::BI__builtin_coro_begin:
548 case Builtin::BI__builtin_coro_end:
549 case Builtin::BI__builtin_coro_suspend:
550 case Builtin::BI__builtin_coro_align:
551 cgm.errorNYI(e->getSourceRange(), "BI__builtin_coro_id like NYI");
552 return getUndefRValue(e->getType());
553
554 case Builtin::BI__builtin_coro_frame: {
555 return emitCoroutineFrame();
556 }
557 case Builtin::BI__builtin_coro_free:
558 case Builtin::BI__builtin_coro_size: {
559 GlobalDecl gd{fd};
560 mlir::Type ty = cgm.getTypes().getFunctionType(
561 cgm.getTypes().arrangeGlobalDeclaration(gd));
562 const auto *nd = cast<NamedDecl>(gd.getDecl());
563 cir::FuncOp fnOp =
564 cgm.getOrCreateCIRFunction(nd->getName(), ty, gd, /*ForVTable=*/false);
565 fnOp.setBuiltin(true);
566 return emitCall(e->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), e,
568 }
569
570 case Builtin::BI__builtin_constant_p: {
571 mlir::Type resultType = convertType(e->getType());
572
573 const Expr *arg = e->getArg(0);
574 QualType argType = arg->getType();
575 // FIXME: The allowance for Obj-C pointers and block pointers is historical
576 // and likely a mistake.
577 if (!argType->isIntegralOrEnumerationType() && !argType->isFloatingType() &&
578 !argType->isObjCObjectPointerType() && !argType->isBlockPointerType()) {
579 // Per the GCC documentation, only numeric constants are recognized after
580 // inlining.
581 return RValue::get(
582 builder.getConstInt(getLoc(e->getSourceRange()),
583 mlir::cast<cir::IntType>(resultType), 0));
584 }
585
586 if (arg->HasSideEffects(getContext())) {
587 // The argument is unevaluated, so be conservative if it might have
588 // side-effects.
589 return RValue::get(
590 builder.getConstInt(getLoc(e->getSourceRange()),
591 mlir::cast<cir::IntType>(resultType), 0));
592 }
593
594 mlir::Value argValue = emitScalarExpr(arg);
595 if (argType->isObjCObjectPointerType()) {
596 cgm.errorNYI(e->getSourceRange(),
597 "__builtin_constant_p: Obj-C object pointer");
598 return {};
599 }
600 argValue = builder.createBitcast(argValue, convertType(argType));
601
602 mlir::Value result = cir::IsConstantOp::create(
603 builder, getLoc(e->getSourceRange()), argValue);
604 // IsConstantOp returns a bool, but __builtin_constant_p returns an int.
605 result = builder.createBoolToInt(result, resultType);
606 return RValue::get(result);
607 }
608 case Builtin::BI__builtin_dynamic_object_size:
609 case Builtin::BI__builtin_object_size: {
610 unsigned type =
611 e->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
612 auto resType = mlir::cast<cir::IntType>(convertType(e->getType()));
613
614 // We pass this builtin onto the optimizer so that it can figure out the
615 // object size in more complex cases.
616 bool isDynamic = builtinID == Builtin::BI__builtin_dynamic_object_size;
617 return RValue::get(emitBuiltinObjectSize(e->getArg(0), type, resType,
618 /*EmittedE=*/nullptr, isDynamic));
619 }
620
621 case Builtin::BI__builtin_prefetch: {
622 auto evaluateOperandAsInt = [&](const Expr *arg) {
624 [[maybe_unused]] bool evalSucceed =
625 arg->EvaluateAsInt(res, cgm.getASTContext());
626 assert(evalSucceed && "expression should be able to evaluate as int");
627 return res.Val.getInt().getZExtValue();
628 };
629
630 bool isWrite = false;
631 if (e->getNumArgs() > 1)
632 isWrite = evaluateOperandAsInt(e->getArg(1));
633
634 int locality = 3;
635 if (e->getNumArgs() > 2)
636 locality = evaluateOperandAsInt(e->getArg(2));
637
638 mlir::Value address = emitScalarExpr(e->getArg(0));
639 cir::PrefetchOp::create(builder, loc, address, locality, isWrite);
640 return RValue::get(nullptr);
641 }
642 case Builtin::BI__builtin_readcyclecounter:
643 case Builtin::BI__builtin_readsteadycounter:
644 case Builtin::BI__builtin___clear_cache:
645 return errorBuiltinNYI(*this, e, builtinID);
646 case Builtin::BI__builtin_trap:
647 emitTrap(loc, /*createNewBlock=*/true);
648 return RValue::getIgnored();
649 case Builtin::BI__builtin_verbose_trap:
650 case Builtin::BI__debugbreak:
651 return errorBuiltinNYI(*this, e, builtinID);
652 case Builtin::BI__builtin_unreachable:
653 emitUnreachable(e->getExprLoc(), /*createNewBlock=*/true);
654 return RValue::getIgnored();
655 case Builtin::BI__builtin_powi:
656 case Builtin::BI__builtin_powif:
657 case Builtin::BI__builtin_powil:
658 case Builtin::BI__builtin_frexpl:
659 case Builtin::BI__builtin_frexp:
660 case Builtin::BI__builtin_frexpf:
661 case Builtin::BI__builtin_frexpf128:
662 case Builtin::BI__builtin_frexpf16:
663 case Builtin::BImodf:
664 case Builtin::BImodff:
665 case Builtin::BImodfl:
666 case Builtin::BI__builtin_modf:
667 case Builtin::BI__builtin_modff:
668 case Builtin::BI__builtin_modfl:
669 case Builtin::BI__builtin_isgreater:
670 case Builtin::BI__builtin_isgreaterequal:
671 case Builtin::BI__builtin_isless:
672 case Builtin::BI__builtin_islessequal:
673 case Builtin::BI__builtin_islessgreater:
674 case Builtin::BI__builtin_isunordered:
675 // From https://clang.llvm.org/docs/LanguageExtensions.html#builtin-isfpclass
676 //
677 // The `__builtin_isfpclass()` builtin is a generalization of functions
678 // isnan, isinf, isfinite and some others defined by the C standard. It tests
679 // if the floating-point value, specified by the first argument, falls into
680 // any of data classes, specified by the second argument.
681 case Builtin::BI__builtin_isnan: {
683 mlir::Value v = emitScalarExpr(e->getArg(0));
685 mlir::Location loc = getLoc(e->getBeginLoc());
686 return RValue::get(builder.createBoolToInt(
687 builder.createIsFPClass(loc, v, cir::FPClassTest::Nan),
688 convertType(e->getType())));
689 }
690
691 case Builtin::BI__builtin_issignaling: {
693 mlir::Value v = emitScalarExpr(e->getArg(0));
694 mlir::Location loc = getLoc(e->getBeginLoc());
695 return RValue::get(builder.createBoolToInt(
696 builder.createIsFPClass(loc, v, cir::FPClassTest::SignalingNaN),
697 convertType(e->getType())));
698 }
699
700 case Builtin::BI__builtin_isinf: {
702 mlir::Value v = emitScalarExpr(e->getArg(0));
704 mlir::Location loc = getLoc(e->getBeginLoc());
705 return RValue::get(builder.createBoolToInt(
706 builder.createIsFPClass(loc, v, cir::FPClassTest::Infinity),
707 convertType(e->getType())));
708 }
709 case Builtin::BIfinite:
710 case Builtin::BI__finite:
711 case Builtin::BIfinitef:
712 case Builtin::BI__finitef:
713 case Builtin::BIfinitel:
714 case Builtin::BI__finitel:
715 case Builtin::BI__builtin_isfinite: {
717 mlir::Value v = emitScalarExpr(e->getArg(0));
719 mlir::Location loc = getLoc(e->getBeginLoc());
720 return RValue::get(builder.createBoolToInt(
721 builder.createIsFPClass(loc, v, cir::FPClassTest::Finite),
722 convertType(e->getType())));
723 }
724
725 case Builtin::BI__builtin_isnormal: {
727 mlir::Value v = emitScalarExpr(e->getArg(0));
728 mlir::Location loc = getLoc(e->getBeginLoc());
729 return RValue::get(builder.createBoolToInt(
730 builder.createIsFPClass(loc, v, cir::FPClassTest::Normal),
731 convertType(e->getType())));
732 }
733
734 case Builtin::BI__builtin_issubnormal: {
736 mlir::Value v = emitScalarExpr(e->getArg(0));
737 mlir::Location loc = getLoc(e->getBeginLoc());
738 return RValue::get(builder.createBoolToInt(
739 builder.createIsFPClass(loc, v, cir::FPClassTest::Subnormal),
740 convertType(e->getType())));
741 }
742
743 case Builtin::BI__builtin_iszero: {
745 mlir::Value v = emitScalarExpr(e->getArg(0));
746 mlir::Location loc = getLoc(e->getBeginLoc());
747 return RValue::get(builder.createBoolToInt(
748 builder.createIsFPClass(loc, v, cir::FPClassTest::Zero),
749 convertType(e->getType())));
750 }
751 case Builtin::BI__builtin_isfpclass: {
752 Expr::EvalResult result;
753 if (!e->getArg(1)->EvaluateAsInt(result, cgm.getASTContext()))
754 break;
755
757 mlir::Value v = emitScalarExpr(e->getArg(0));
758 uint64_t test = result.Val.getInt().getLimitedValue();
759 mlir::Location loc = getLoc(e->getBeginLoc());
760 //
761 return RValue::get(builder.createBoolToInt(
762 builder.createIsFPClass(loc, v, cir::FPClassTest(test)),
763 convertType(e->getType())));
764 }
765 case Builtin::BI__builtin_nondeterministic_value:
766 case Builtin::BI__builtin_elementwise_abs:
767 return errorBuiltinNYI(*this, e, builtinID);
768 case Builtin::BI__builtin_elementwise_acos:
769 return emitUnaryFPBuiltin<cir::ACosOp>(*this, *e);
770 case Builtin::BI__builtin_elementwise_asin:
771 return emitUnaryFPBuiltin<cir::ASinOp>(*this, *e);
772 case Builtin::BI__builtin_elementwise_atan:
773 return emitUnaryFPBuiltin<cir::ATanOp>(*this, *e);
774 case Builtin::BI__builtin_elementwise_atan2:
775 case Builtin::BI__builtin_elementwise_ceil:
776 case Builtin::BI__builtin_elementwise_exp:
777 case Builtin::BI__builtin_elementwise_exp2:
778 case Builtin::BI__builtin_elementwise_exp10:
779 case Builtin::BI__builtin_elementwise_ldexp:
780 case Builtin::BI__builtin_elementwise_log:
781 case Builtin::BI__builtin_elementwise_log2:
782 case Builtin::BI__builtin_elementwise_log10:
783 case Builtin::BI__builtin_elementwise_pow:
784 case Builtin::BI__builtin_elementwise_bitreverse:
785 return errorBuiltinNYI(*this, e, builtinID);
786 case Builtin::BI__builtin_elementwise_cos:
787 return emitUnaryFPBuiltin<cir::CosOp>(*this, *e);
788 case Builtin::BI__builtin_elementwise_cosh:
789 case Builtin::BI__builtin_elementwise_floor:
790 case Builtin::BI__builtin_elementwise_popcount:
791 case Builtin::BI__builtin_elementwise_roundeven:
792 case Builtin::BI__builtin_elementwise_round:
793 case Builtin::BI__builtin_elementwise_rint:
794 case Builtin::BI__builtin_elementwise_nearbyint:
795 case Builtin::BI__builtin_elementwise_sin:
796 case Builtin::BI__builtin_elementwise_sinh:
797 case Builtin::BI__builtin_elementwise_tan:
798 case Builtin::BI__builtin_elementwise_tanh:
799 case Builtin::BI__builtin_elementwise_trunc:
800 case Builtin::BI__builtin_elementwise_canonicalize:
801 case Builtin::BI__builtin_elementwise_copysign:
802 case Builtin::BI__builtin_elementwise_fma:
803 case Builtin::BI__builtin_elementwise_fshl:
804 case Builtin::BI__builtin_elementwise_fshr:
805 case Builtin::BI__builtin_elementwise_add_sat:
806 case Builtin::BI__builtin_elementwise_sub_sat:
807 case Builtin::BI__builtin_elementwise_max:
808 case Builtin::BI__builtin_elementwise_min:
809 case Builtin::BI__builtin_elementwise_maxnum:
810 case Builtin::BI__builtin_elementwise_minnum:
811 case Builtin::BI__builtin_elementwise_maximum:
812 case Builtin::BI__builtin_elementwise_minimum:
813 case Builtin::BI__builtin_elementwise_maximumnum:
814 case Builtin::BI__builtin_elementwise_minimumnum:
815 case Builtin::BI__builtin_reduce_max:
816 case Builtin::BI__builtin_reduce_min:
817 case Builtin::BI__builtin_reduce_add:
818 case Builtin::BI__builtin_reduce_mul:
819 case Builtin::BI__builtin_reduce_xor:
820 case Builtin::BI__builtin_reduce_or:
821 case Builtin::BI__builtin_reduce_and:
822 case Builtin::BI__builtin_reduce_maximum:
823 case Builtin::BI__builtin_reduce_minimum:
824 case Builtin::BI__builtin_matrix_transpose:
825 case Builtin::BI__builtin_matrix_column_major_load:
826 case Builtin::BI__builtin_matrix_column_major_store:
827 case Builtin::BI__builtin_masked_load:
828 case Builtin::BI__builtin_masked_expand_load:
829 case Builtin::BI__builtin_masked_gather:
830 case Builtin::BI__builtin_masked_store:
831 case Builtin::BI__builtin_masked_compress_store:
832 case Builtin::BI__builtin_masked_scatter:
833 case Builtin::BI__builtin_isinf_sign:
834 case Builtin::BI__builtin_flt_rounds:
835 case Builtin::BI__builtin_set_flt_rounds:
836 case Builtin::BI__builtin_fpclassify:
837 return errorBuiltinNYI(*this, e, builtinID);
838 case Builtin::BIalloca:
839 case Builtin::BI_alloca:
840 case Builtin::BI__builtin_alloca_uninitialized:
841 case Builtin::BI__builtin_alloca:
842 return emitBuiltinAlloca(*this, e, builtinID);
843 case Builtin::BI__builtin_alloca_with_align_uninitialized:
844 case Builtin::BI__builtin_alloca_with_align:
845 case Builtin::BI__builtin_infer_alloc_token:
846 case Builtin::BIbzero:
847 case Builtin::BI__builtin_bzero:
848 case Builtin::BIbcopy:
849 case Builtin::BI__builtin_bcopy:
850 return errorBuiltinNYI(*this, e, builtinID);
851 case Builtin::BImemcpy:
852 case Builtin::BI__builtin_memcpy:
853 case Builtin::BImempcpy:
854 case Builtin::BI__builtin_mempcpy:
855 case Builtin::BI__builtin_memcpy_inline:
856 case Builtin::BI__builtin_char_memchr:
857 case Builtin::BI__builtin___memcpy_chk:
858 case Builtin::BI__builtin_objc_memmove_collectable:
859 case Builtin::BI__builtin___memmove_chk:
860 case Builtin::BI__builtin_trivially_relocate:
861 case Builtin::BImemmove:
862 case Builtin::BI__builtin_memmove:
863 case Builtin::BImemset:
864 case Builtin::BI__builtin_memset:
865 case Builtin::BI__builtin_memset_inline:
866 case Builtin::BI__builtin___memset_chk:
867 case Builtin::BI__builtin_wmemchr:
868 case Builtin::BI__builtin_wmemcmp:
869 break; // Handled as library calls below.
870 case Builtin::BI__builtin_dwarf_cfa:
871 return errorBuiltinNYI(*this, e, builtinID);
872 case Builtin::BI__builtin_return_address:
873 case Builtin::BI_ReturnAddress:
874 case Builtin::BI__builtin_frame_address: {
875 mlir::Location loc = getLoc(e->getExprLoc());
876 llvm::APSInt level = e->getArg(0)->EvaluateKnownConstInt(getContext());
877 if (builtinID == Builtin::BI__builtin_return_address) {
878 return RValue::get(cir::ReturnAddrOp::create(
879 builder, loc,
880 builder.getConstAPInt(loc, builder.getUInt32Ty(), level)));
881 }
882 return RValue::get(cir::FrameAddrOp::create(
883 builder, loc,
884 builder.getConstAPInt(loc, builder.getUInt32Ty(), level)));
885 }
886 case Builtin::BI__builtin_extract_return_addr:
887 case Builtin::BI__builtin_frob_return_addr:
888 case Builtin::BI__builtin_dwarf_sp_column:
889 case Builtin::BI__builtin_init_dwarf_reg_size_table:
890 case Builtin::BI__builtin_eh_return:
891 case Builtin::BI__builtin_unwind_init:
892 case Builtin::BI__builtin_extend_pointer:
893 case Builtin::BI__builtin_setjmp:
894 case Builtin::BI__builtin_longjmp:
895 case Builtin::BI__builtin_launder:
896 case Builtin::BI__sync_fetch_and_add:
897 case Builtin::BI__sync_fetch_and_sub:
898 case Builtin::BI__sync_fetch_and_or:
899 case Builtin::BI__sync_fetch_and_and:
900 case Builtin::BI__sync_fetch_and_xor:
901 case Builtin::BI__sync_fetch_and_nand:
902 case Builtin::BI__sync_add_and_fetch:
903 case Builtin::BI__sync_sub_and_fetch:
904 case Builtin::BI__sync_and_and_fetch:
905 case Builtin::BI__sync_or_and_fetch:
906 case Builtin::BI__sync_xor_and_fetch:
907 case Builtin::BI__sync_nand_and_fetch:
908 case Builtin::BI__sync_val_compare_and_swap:
909 case Builtin::BI__sync_bool_compare_and_swap:
910 case Builtin::BI__sync_lock_test_and_set:
911 case Builtin::BI__sync_lock_release:
912 case Builtin::BI__sync_swap:
913 case Builtin::BI__sync_fetch_and_add_1:
914 case Builtin::BI__sync_fetch_and_add_2:
915 case Builtin::BI__sync_fetch_and_add_4:
916 case Builtin::BI__sync_fetch_and_add_8:
917 case Builtin::BI__sync_fetch_and_add_16:
918 case Builtin::BI__sync_fetch_and_sub_1:
919 case Builtin::BI__sync_fetch_and_sub_2:
920 case Builtin::BI__sync_fetch_and_sub_4:
921 case Builtin::BI__sync_fetch_and_sub_8:
922 case Builtin::BI__sync_fetch_and_sub_16:
923 case Builtin::BI__sync_fetch_and_or_1:
924 case Builtin::BI__sync_fetch_and_or_2:
925 case Builtin::BI__sync_fetch_and_or_4:
926 case Builtin::BI__sync_fetch_and_or_8:
927 case Builtin::BI__sync_fetch_and_or_16:
928 case Builtin::BI__sync_fetch_and_and_1:
929 case Builtin::BI__sync_fetch_and_and_2:
930 case Builtin::BI__sync_fetch_and_and_4:
931 case Builtin::BI__sync_fetch_and_and_8:
932 case Builtin::BI__sync_fetch_and_and_16:
933 case Builtin::BI__sync_fetch_and_xor_1:
934 case Builtin::BI__sync_fetch_and_xor_2:
935 case Builtin::BI__sync_fetch_and_xor_4:
936 case Builtin::BI__sync_fetch_and_xor_8:
937 case Builtin::BI__sync_fetch_and_xor_16:
938 case Builtin::BI__sync_fetch_and_nand_1:
939 case Builtin::BI__sync_fetch_and_nand_2:
940 case Builtin::BI__sync_fetch_and_nand_4:
941 case Builtin::BI__sync_fetch_and_nand_8:
942 case Builtin::BI__sync_fetch_and_nand_16:
943 case Builtin::BI__sync_fetch_and_min:
944 case Builtin::BI__sync_fetch_and_max:
945 case Builtin::BI__sync_fetch_and_umin:
946 case Builtin::BI__sync_fetch_and_umax:
947 case Builtin::BI__sync_add_and_fetch_1:
948 case Builtin::BI__sync_add_and_fetch_2:
949 case Builtin::BI__sync_add_and_fetch_4:
950 case Builtin::BI__sync_add_and_fetch_8:
951 case Builtin::BI__sync_add_and_fetch_16:
952 case Builtin::BI__sync_sub_and_fetch_1:
953 case Builtin::BI__sync_sub_and_fetch_2:
954 case Builtin::BI__sync_sub_and_fetch_4:
955 case Builtin::BI__sync_sub_and_fetch_8:
956 case Builtin::BI__sync_sub_and_fetch_16:
957 case Builtin::BI__sync_and_and_fetch_1:
958 case Builtin::BI__sync_and_and_fetch_2:
959 case Builtin::BI__sync_and_and_fetch_4:
960 case Builtin::BI__sync_and_and_fetch_8:
961 case Builtin::BI__sync_and_and_fetch_16:
962 case Builtin::BI__sync_or_and_fetch_1:
963 case Builtin::BI__sync_or_and_fetch_2:
964 case Builtin::BI__sync_or_and_fetch_4:
965 case Builtin::BI__sync_or_and_fetch_8:
966 case Builtin::BI__sync_or_and_fetch_16:
967 case Builtin::BI__sync_xor_and_fetch_1:
968 case Builtin::BI__sync_xor_and_fetch_2:
969 case Builtin::BI__sync_xor_and_fetch_4:
970 case Builtin::BI__sync_xor_and_fetch_8:
971 case Builtin::BI__sync_xor_and_fetch_16:
972 case Builtin::BI__sync_nand_and_fetch_1:
973 case Builtin::BI__sync_nand_and_fetch_2:
974 case Builtin::BI__sync_nand_and_fetch_4:
975 case Builtin::BI__sync_nand_and_fetch_8:
976 case Builtin::BI__sync_nand_and_fetch_16:
977 case Builtin::BI__sync_val_compare_and_swap_1:
978 case Builtin::BI__sync_val_compare_and_swap_2:
979 case Builtin::BI__sync_val_compare_and_swap_4:
980 case Builtin::BI__sync_val_compare_and_swap_8:
981 case Builtin::BI__sync_val_compare_and_swap_16:
982 case Builtin::BI__sync_bool_compare_and_swap_1:
983 case Builtin::BI__sync_bool_compare_and_swap_2:
984 case Builtin::BI__sync_bool_compare_and_swap_4:
985 case Builtin::BI__sync_bool_compare_and_swap_8:
986 case Builtin::BI__sync_bool_compare_and_swap_16:
987 case Builtin::BI__sync_swap_1:
988 case Builtin::BI__sync_swap_2:
989 case Builtin::BI__sync_swap_4:
990 case Builtin::BI__sync_swap_8:
991 case Builtin::BI__sync_swap_16:
992 case Builtin::BI__sync_lock_test_and_set_1:
993 case Builtin::BI__sync_lock_test_and_set_2:
994 case Builtin::BI__sync_lock_test_and_set_4:
995 case Builtin::BI__sync_lock_test_and_set_8:
996 case Builtin::BI__sync_lock_test_and_set_16:
997 case Builtin::BI__sync_lock_release_1:
998 case Builtin::BI__sync_lock_release_2:
999 case Builtin::BI__sync_lock_release_4:
1000 case Builtin::BI__sync_lock_release_8:
1001 case Builtin::BI__sync_lock_release_16:
1002 case Builtin::BI__sync_synchronize:
1003 case Builtin::BI__builtin_nontemporal_load:
1004 case Builtin::BI__builtin_nontemporal_store:
1005 case Builtin::BI__c11_atomic_is_lock_free:
1006 case Builtin::BI__atomic_is_lock_free:
1007 case Builtin::BI__atomic_test_and_set:
1008 case Builtin::BI__atomic_clear:
1009 return errorBuiltinNYI(*this, e, builtinID);
1010 case Builtin::BI__atomic_thread_fence: {
1011 emitAtomicFenceOp(*this, e, cir::SyncScopeKind::System);
1012 return RValue::get(nullptr);
1013 }
1014 case Builtin::BI__atomic_signal_fence: {
1015 emitAtomicFenceOp(*this, e, cir::SyncScopeKind::SingleThread);
1016 return RValue::get(nullptr);
1017 }
1018 case Builtin::BI__c11_atomic_thread_fence:
1019 case Builtin::BI__c11_atomic_signal_fence:
1020 case Builtin::BI__scoped_atomic_thread_fence:
1021 case Builtin::BI__builtin_signbit:
1022 case Builtin::BI__builtin_signbitf:
1023 case Builtin::BI__builtin_signbitl:
1024 case Builtin::BI__warn_memset_zero_len:
1025 case Builtin::BI__annotation:
1026 case Builtin::BI__builtin_annotation:
1027 case Builtin::BI__builtin_addcb:
1028 case Builtin::BI__builtin_addcs:
1029 case Builtin::BI__builtin_addc:
1030 case Builtin::BI__builtin_addcl:
1031 case Builtin::BI__builtin_addcll:
1032 case Builtin::BI__builtin_subcb:
1033 case Builtin::BI__builtin_subcs:
1034 case Builtin::BI__builtin_subc:
1035 case Builtin::BI__builtin_subcl:
1036 case Builtin::BI__builtin_subcll:
1037 return errorBuiltinNYI(*this, e, builtinID);
1038
1039 case Builtin::BI__builtin_add_overflow:
1040 case Builtin::BI__builtin_sub_overflow:
1041 case Builtin::BI__builtin_mul_overflow: {
1042 const clang::Expr *leftArg = e->getArg(0);
1043 const clang::Expr *rightArg = e->getArg(1);
1044 const clang::Expr *resultArg = e->getArg(2);
1045
1046 clang::QualType resultQTy =
1047 resultArg->getType()->castAs<clang::PointerType>()->getPointeeType();
1048
1049 WidthAndSignedness leftInfo =
1050 getIntegerWidthAndSignedness(cgm.getASTContext(), leftArg->getType());
1051 WidthAndSignedness rightInfo =
1052 getIntegerWidthAndSignedness(cgm.getASTContext(), rightArg->getType());
1053 WidthAndSignedness resultInfo =
1054 getIntegerWidthAndSignedness(cgm.getASTContext(), resultQTy);
1055
1056 // Note we compute the encompassing type with the consideration to the
1057 // result type, so later in LLVM lowering we don't get redundant integral
1058 // extension casts.
1059 WidthAndSignedness encompassingInfo =
1060 EncompassingIntegerType({leftInfo, rightInfo, resultInfo});
1061
1062 auto encompassingCIRTy = cir::IntType::get(
1063 &getMLIRContext(), encompassingInfo.width, encompassingInfo.isSigned);
1064 auto resultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(resultQTy));
1065
1066 mlir::Value left = emitScalarExpr(leftArg);
1067 mlir::Value right = emitScalarExpr(rightArg);
1068 Address resultPtr = emitPointerWithAlignment(resultArg);
1069
1070 // Extend each operand to the encompassing type, if necessary.
1071 if (left.getType() != encompassingCIRTy)
1072 left =
1073 builder.createCast(cir::CastKind::integral, left, encompassingCIRTy);
1074 if (right.getType() != encompassingCIRTy)
1075 right =
1076 builder.createCast(cir::CastKind::integral, right, encompassingCIRTy);
1077
1078 // Perform the operation on the extended values.
1079 cir::BinOpOverflowKind opKind;
1080 switch (builtinID) {
1081 default:
1082 llvm_unreachable("Unknown overflow builtin id.");
1083 case Builtin::BI__builtin_add_overflow:
1084 opKind = cir::BinOpOverflowKind::Add;
1085 break;
1086 case Builtin::BI__builtin_sub_overflow:
1087 opKind = cir::BinOpOverflowKind::Sub;
1088 break;
1089 case Builtin::BI__builtin_mul_overflow:
1090 opKind = cir::BinOpOverflowKind::Mul;
1091 break;
1092 }
1093
1094 mlir::Location loc = getLoc(e->getSourceRange());
1095 auto arithOp = cir::BinOpOverflowOp::create(builder, loc, resultCIRTy,
1096 opKind, left, right);
1097
1098 // Here is a slight difference from the original clang CodeGen:
1099 // - In the original clang CodeGen, the checked arithmetic result is
1100 // first computed as a value of the encompassing type, and then it is
1101 // truncated to the actual result type with a second overflow checking.
1102 // - In CIRGen, the checked arithmetic operation directly produce the
1103 // checked arithmetic result in its expected type.
1104 //
1105 // So we don't need a truncation and a second overflow checking here.
1106
1107 // Finally, store the result using the pointer.
1108 bool isVolatile =
1109 resultArg->getType()->getPointeeType().isVolatileQualified();
1110 builder.createStore(loc, emitToMemory(arithOp.getResult(), resultQTy),
1111 resultPtr, isVolatile);
1112
1113 return RValue::get(arithOp.getOverflow());
1114 }
1115
1116 case Builtin::BI__builtin_uadd_overflow:
1117 case Builtin::BI__builtin_uaddl_overflow:
1118 case Builtin::BI__builtin_uaddll_overflow:
1119 case Builtin::BI__builtin_usub_overflow:
1120 case Builtin::BI__builtin_usubl_overflow:
1121 case Builtin::BI__builtin_usubll_overflow:
1122 case Builtin::BI__builtin_umul_overflow:
1123 case Builtin::BI__builtin_umull_overflow:
1124 case Builtin::BI__builtin_umulll_overflow:
1125 case Builtin::BI__builtin_sadd_overflow:
1126 case Builtin::BI__builtin_saddl_overflow:
1127 case Builtin::BI__builtin_saddll_overflow:
1128 case Builtin::BI__builtin_ssub_overflow:
1129 case Builtin::BI__builtin_ssubl_overflow:
1130 case Builtin::BI__builtin_ssubll_overflow:
1131 case Builtin::BI__builtin_smul_overflow:
1132 case Builtin::BI__builtin_smull_overflow:
1133 case Builtin::BI__builtin_smulll_overflow: {
1134 // Scalarize our inputs.
1135 mlir::Value x = emitScalarExpr(e->getArg(0));
1136 mlir::Value y = emitScalarExpr(e->getArg(1));
1137
1138 const clang::Expr *resultArg = e->getArg(2);
1139 Address resultPtr = emitPointerWithAlignment(resultArg);
1140
1141 // Decide which of the arithmetic operation we are lowering to:
1142 cir::BinOpOverflowKind arithKind;
1143 switch (builtinID) {
1144 default:
1145 llvm_unreachable("Unknown overflow builtin id.");
1146 case Builtin::BI__builtin_uadd_overflow:
1147 case Builtin::BI__builtin_uaddl_overflow:
1148 case Builtin::BI__builtin_uaddll_overflow:
1149 case Builtin::BI__builtin_sadd_overflow:
1150 case Builtin::BI__builtin_saddl_overflow:
1151 case Builtin::BI__builtin_saddll_overflow:
1152 arithKind = cir::BinOpOverflowKind::Add;
1153 break;
1154 case Builtin::BI__builtin_usub_overflow:
1155 case Builtin::BI__builtin_usubl_overflow:
1156 case Builtin::BI__builtin_usubll_overflow:
1157 case Builtin::BI__builtin_ssub_overflow:
1158 case Builtin::BI__builtin_ssubl_overflow:
1159 case Builtin::BI__builtin_ssubll_overflow:
1160 arithKind = cir::BinOpOverflowKind::Sub;
1161 break;
1162 case Builtin::BI__builtin_umul_overflow:
1163 case Builtin::BI__builtin_umull_overflow:
1164 case Builtin::BI__builtin_umulll_overflow:
1165 case Builtin::BI__builtin_smul_overflow:
1166 case Builtin::BI__builtin_smull_overflow:
1167 case Builtin::BI__builtin_smulll_overflow:
1168 arithKind = cir::BinOpOverflowKind::Mul;
1169 break;
1170 }
1171
1172 clang::QualType resultQTy =
1173 resultArg->getType()->castAs<clang::PointerType>()->getPointeeType();
1174 auto resultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(resultQTy));
1175
1176 mlir::Location loc = getLoc(e->getSourceRange());
1177 cir::BinOpOverflowOp arithOp = cir::BinOpOverflowOp::create(
1178 builder, loc, resultCIRTy, arithKind, x, y);
1179
1180 bool isVolatile =
1181 resultArg->getType()->getPointeeType().isVolatileQualified();
1182 builder.createStore(loc, emitToMemory(arithOp.getResult(), resultQTy),
1183 resultPtr, isVolatile);
1184
1185 return RValue::get(arithOp.getOverflow());
1186 }
1187
1188 case Builtin::BIaddressof:
1189 case Builtin::BI__addressof:
1190 case Builtin::BI__builtin_addressof:
1191 case Builtin::BI__builtin_function_start:
1192 return errorBuiltinNYI(*this, e, builtinID);
1193 case Builtin::BI__builtin_operator_new:
1195 e->getCallee()->getType()->castAs<FunctionProtoType>(), e, OO_New);
1196 case Builtin::BI__builtin_operator_delete:
1198 e->getCallee()->getType()->castAs<FunctionProtoType>(), e, OO_Delete);
1199 return RValue::get(nullptr);
1200 case Builtin::BI__builtin_is_aligned:
1201 case Builtin::BI__builtin_align_up:
1202 case Builtin::BI__builtin_align_down:
1203 case Builtin::BI__noop:
1204 case Builtin::BI__builtin_call_with_static_chain:
1205 case Builtin::BI_InterlockedExchange8:
1206 case Builtin::BI_InterlockedExchange16:
1207 case Builtin::BI_InterlockedExchange:
1208 case Builtin::BI_InterlockedExchangePointer:
1209 case Builtin::BI_InterlockedCompareExchangePointer:
1210 case Builtin::BI_InterlockedCompareExchangePointer_nf:
1211 case Builtin::BI_InterlockedCompareExchange8:
1212 case Builtin::BI_InterlockedCompareExchange16:
1213 case Builtin::BI_InterlockedCompareExchange:
1214 case Builtin::BI_InterlockedCompareExchange64:
1215 case Builtin::BI_InterlockedIncrement16:
1216 case Builtin::BI_InterlockedIncrement:
1217 case Builtin::BI_InterlockedDecrement16:
1218 case Builtin::BI_InterlockedDecrement:
1219 case Builtin::BI_InterlockedAnd8:
1220 case Builtin::BI_InterlockedAnd16:
1221 case Builtin::BI_InterlockedAnd:
1222 case Builtin::BI_InterlockedExchangeAdd8:
1223 case Builtin::BI_InterlockedExchangeAdd16:
1224 case Builtin::BI_InterlockedExchangeAdd:
1225 case Builtin::BI_InterlockedExchangeSub8:
1226 case Builtin::BI_InterlockedExchangeSub16:
1227 case Builtin::BI_InterlockedExchangeSub:
1228 case Builtin::BI_InterlockedOr8:
1229 case Builtin::BI_InterlockedOr16:
1230 case Builtin::BI_InterlockedOr:
1231 case Builtin::BI_InterlockedXor8:
1232 case Builtin::BI_InterlockedXor16:
1233 case Builtin::BI_InterlockedXor:
1234 case Builtin::BI_bittest64:
1235 case Builtin::BI_bittest:
1236 case Builtin::BI_bittestandcomplement64:
1237 case Builtin::BI_bittestandcomplement:
1238 case Builtin::BI_bittestandreset64:
1239 case Builtin::BI_bittestandreset:
1240 case Builtin::BI_bittestandset64:
1241 case Builtin::BI_bittestandset:
1242 case Builtin::BI_interlockedbittestandreset:
1243 case Builtin::BI_interlockedbittestandreset64:
1244 case Builtin::BI_interlockedbittestandreset64_acq:
1245 case Builtin::BI_interlockedbittestandreset64_rel:
1246 case Builtin::BI_interlockedbittestandreset64_nf:
1247 case Builtin::BI_interlockedbittestandset64:
1248 case Builtin::BI_interlockedbittestandset64_acq:
1249 case Builtin::BI_interlockedbittestandset64_rel:
1250 case Builtin::BI_interlockedbittestandset64_nf:
1251 case Builtin::BI_interlockedbittestandset:
1252 case Builtin::BI_interlockedbittestandset_acq:
1253 case Builtin::BI_interlockedbittestandset_rel:
1254 case Builtin::BI_interlockedbittestandset_nf:
1255 case Builtin::BI_interlockedbittestandreset_acq:
1256 case Builtin::BI_interlockedbittestandreset_rel:
1257 case Builtin::BI_interlockedbittestandreset_nf:
1258 case Builtin::BI__iso_volatile_load8:
1259 case Builtin::BI__iso_volatile_load16:
1260 case Builtin::BI__iso_volatile_load32:
1261 case Builtin::BI__iso_volatile_load64:
1262 case Builtin::BI__iso_volatile_store8:
1263 case Builtin::BI__iso_volatile_store16:
1264 case Builtin::BI__iso_volatile_store32:
1265 case Builtin::BI__iso_volatile_store64:
1266 case Builtin::BI__builtin_ptrauth_sign_constant:
1267 case Builtin::BI__builtin_ptrauth_auth:
1268 case Builtin::BI__builtin_ptrauth_auth_and_resign:
1269 case Builtin::BI__builtin_ptrauth_blend_discriminator:
1270 case Builtin::BI__builtin_ptrauth_sign_generic_data:
1271 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
1272 case Builtin::BI__builtin_ptrauth_strip:
1273 case Builtin::BI__builtin_get_vtable_pointer:
1274 case Builtin::BI__exception_code:
1275 case Builtin::BI_exception_code:
1276 case Builtin::BI__exception_info:
1277 case Builtin::BI_exception_info:
1278 case Builtin::BI__abnormal_termination:
1279 case Builtin::BI_abnormal_termination:
1280 case Builtin::BI_setjmpex:
1281 case Builtin::BI_setjmp:
1282 case Builtin::BImove:
1283 case Builtin::BImove_if_noexcept:
1284 case Builtin::BIforward:
1285 case Builtin::BIforward_like:
1286 case Builtin::BIas_const:
1287 case Builtin::BI__GetExceptionInfo:
1288 case Builtin::BI__fastfail:
1289 case Builtin::BIread_pipe:
1290 case Builtin::BIwrite_pipe:
1291 case Builtin::BIreserve_read_pipe:
1292 case Builtin::BIreserve_write_pipe:
1293 case Builtin::BIwork_group_reserve_read_pipe:
1294 case Builtin::BIwork_group_reserve_write_pipe:
1295 case Builtin::BIsub_group_reserve_read_pipe:
1296 case Builtin::BIsub_group_reserve_write_pipe:
1297 case Builtin::BIcommit_read_pipe:
1298 case Builtin::BIcommit_write_pipe:
1299 case Builtin::BIwork_group_commit_read_pipe:
1300 case Builtin::BIwork_group_commit_write_pipe:
1301 case Builtin::BIsub_group_commit_read_pipe:
1302 case Builtin::BIsub_group_commit_write_pipe:
1303 case Builtin::BIget_pipe_num_packets:
1304 case Builtin::BIget_pipe_max_packets:
1305 case Builtin::BIto_global:
1306 case Builtin::BIto_local:
1307 case Builtin::BIto_private:
1308 case Builtin::BIenqueue_kernel:
1309 case Builtin::BIget_kernel_work_group_size:
1310 case Builtin::BIget_kernel_preferred_work_group_size_multiple:
1311 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
1312 case Builtin::BIget_kernel_sub_group_count_for_ndrange:
1313 case Builtin::BI__builtin_store_half:
1314 case Builtin::BI__builtin_store_halff:
1315 case Builtin::BI__builtin_load_half:
1316 case Builtin::BI__builtin_load_halff:
1317 return errorBuiltinNYI(*this, e, builtinID);
1318 case Builtin::BI__builtin_printf:
1319 case Builtin::BIprintf:
1320 break;
1321 case Builtin::BI__builtin_canonicalize:
1322 case Builtin::BI__builtin_canonicalizef:
1323 case Builtin::BI__builtin_canonicalizef16:
1324 case Builtin::BI__builtin_canonicalizel:
1325 case Builtin::BI__builtin_thread_pointer:
1326 case Builtin::BI__builtin_os_log_format:
1327 case Builtin::BI__xray_customevent:
1328 case Builtin::BI__xray_typedevent:
1329 case Builtin::BI__builtin_ms_va_start:
1330 case Builtin::BI__builtin_ms_va_end:
1331 case Builtin::BI__builtin_ms_va_copy:
1332 case Builtin::BI__builtin_get_device_side_mangled_name:
1333 return errorBuiltinNYI(*this, e, builtinID);
1334 }
1335
1336 // If this is an alias for a lib function (e.g. __builtin_sin), emit
1337 // the call using the normal call path, but using the unmangled
1338 // version of the function name.
1339 if (getContext().BuiltinInfo.isLibFunction(builtinID))
1340 return emitLibraryCall(*this, fd, e,
1341 cgm.getBuiltinLibFunction(fd, builtinID));
1342
1343 // Some target-specific builtins can have aggregate return values, e.g.
1344 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
1345 // returnValue to be non-null, so that the target-specific emission code can
1346 // always just emit into it.
1348 if (evalKind == cir::TEK_Aggregate && returnValue.isNull()) {
1349 cgm.errorNYI(e->getSourceRange(), "aggregate return value from builtin");
1350 return getUndefRValue(e->getType());
1351 }
1352
1353 // Now see if we can emit a target-specific builtin.
1354 // FIXME: This is a temporary mechanism (double-optional semantics) that will
1355 // go away once everything is implemented:
1356 // 1. return `mlir::Value{}` for cases where we have issued the diagnostic.
1357 // 2. return `std::nullopt` in cases where we didn't issue a diagnostic
1358 // but also didn't handle the builtin.
1359 if (std::optional<mlir::Value> rst =
1360 emitTargetBuiltinExpr(builtinID, e, returnValue)) {
1361 mlir::Value v = rst.value();
1362 // CIR dialect operations may have no results, no values will be returned
1363 // even if it executes successfully.
1364 if (!v)
1365 return RValue::get(nullptr);
1366
1367 switch (evalKind) {
1368 case cir::TEK_Scalar:
1369 if (mlir::isa<cir::VoidType>(v.getType()))
1370 return RValue::get(nullptr);
1371 return RValue::get(v);
1372 case cir::TEK_Aggregate:
1373 cgm.errorNYI(e->getSourceRange(), "aggregate return value from builtin");
1374 return getUndefRValue(e->getType());
1375 case cir::TEK_Complex:
1376 llvm_unreachable("No current target builtin returns complex");
1377 }
1378 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
1379 }
1380
1381 cgm.errorNYI(e->getSourceRange(),
1382 std::string("unimplemented builtin call: ") +
1383 getContext().BuiltinInfo.getName(builtinID));
1384 return getUndefRValue(e->getType());
1385}
1386
1387static std::optional<mlir::Value>
1389 const CallExpr *e, ReturnValueSlot &returnValue,
1390 llvm::Triple::ArchType arch) {
1391 // When compiling in HipStdPar mode we have to be conservative in rejecting
1392 // target specific features in the FE, and defer the possible error to the
1393 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
1394 // referenced by an accelerator executable function, we emit an error.
1395 // Returning nullptr here leads to the builtin being handled in
1396 // EmitStdParUnsupportedBuiltin.
1397 if (cgf->getLangOpts().HIPStdPar && cgf->getLangOpts().CUDAIsDevice &&
1398 arch != cgf->getTarget().getTriple().getArch())
1399 return std::nullopt;
1400
1401 switch (arch) {
1402 case llvm::Triple::arm:
1403 case llvm::Triple::armeb:
1404 case llvm::Triple::thumb:
1405 case llvm::Triple::thumbeb:
1406 // These are actually NYI, but that will be reported by emitBuiltinExpr.
1407 // At this point, we don't even know that the builtin is target-specific.
1408 return std::nullopt;
1409 case llvm::Triple::aarch64:
1410 case llvm::Triple::aarch64_32:
1411 case llvm::Triple::aarch64_be:
1412 return cgf->emitAArch64BuiltinExpr(builtinID, e, returnValue, arch);
1413 case llvm::Triple::bpfeb:
1414 case llvm::Triple::bpfel:
1415 // These are actually NYI, but that will be reported by emitBuiltinExpr.
1416 // At this point, we don't even know that the builtin is target-specific.
1417 return std::nullopt;
1418
1419 case llvm::Triple::x86:
1420 case llvm::Triple::x86_64:
1421 return cgf->emitX86BuiltinExpr(builtinID, e);
1422
1423 case llvm::Triple::ppc:
1424 case llvm::Triple::ppcle:
1425 case llvm::Triple::ppc64:
1426 case llvm::Triple::ppc64le:
1427 case llvm::Triple::r600:
1428 case llvm::Triple::amdgcn:
1429 case llvm::Triple::systemz:
1430 case llvm::Triple::nvptx:
1431 case llvm::Triple::nvptx64:
1432 case llvm::Triple::wasm32:
1433 case llvm::Triple::wasm64:
1434 case llvm::Triple::hexagon:
1435 case llvm::Triple::riscv32:
1436 case llvm::Triple::riscv64:
1437 // These are actually NYI, but that will be reported by emitBuiltinExpr.
1438 // At this point, we don't even know that the builtin is target-specific.
1439 return std::nullopt;
1440 default:
1441 return std::nullopt;
1442 }
1443}
1444
1445std::optional<mlir::Value>
1448 if (getContext().BuiltinInfo.isAuxBuiltinID(builtinID)) {
1449 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
1451 this, getContext().BuiltinInfo.getAuxBuiltinID(builtinID), e,
1452 returnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
1453 }
1454
1455 return emitTargetArchBuiltinExpr(this, builtinID, e, returnValue,
1456 getTarget().getTriple().getArch());
1457}
1458
1460 const unsigned iceArguments, const unsigned idx, const Expr *argExpr) {
1461 mlir::Value arg = {};
1462 if ((iceArguments & (1 << idx)) == 0) {
1463 arg = emitScalarExpr(argExpr);
1464 } else {
1465 // If this is required to be a constant, constant fold it so that we
1466 // know that the generated intrinsic gets a ConstantInt.
1467 const std::optional<llvm::APSInt> result =
1469 assert(result && "Expected argument to be a constant");
1470 arg = builder.getConstInt(getLoc(argExpr->getSourceRange()), *result);
1471 }
1472 return arg;
1473}
1474
1475/// Given a builtin id for a function like "__builtin_fabsf", return a Function*
1476/// for "fabsf".
1478 unsigned builtinID) {
1479 assert(astContext.BuiltinInfo.isLibFunction(builtinID));
1480
1481 // Get the name, skip over the __builtin_ prefix (if necessary). We may have
1482 // to build this up so provide a small stack buffer to handle the vast
1483 // majority of names.
1485
1487 name = astContext.BuiltinInfo.getName(builtinID).substr(10);
1488
1489 GlobalDecl d(fd);
1490 mlir::Type type = convertType(fd->getType());
1491 return getOrCreateCIRFunction(name, type, d, /*forVTable=*/false);
1492}
1493
1495 mlir::Value argValue = evaluateExprAsBool(e);
1496 if (!sanOpts.has(SanitizerKind::Builtin))
1497 return argValue;
1498
1500 cgm.errorNYI(e->getSourceRange(),
1501 "emitCheckedArgForAssume: sanitizers are NYI");
1502 return {};
1503}
1504
1505void CIRGenFunction::emitVAStart(mlir::Value vaList, mlir::Value count) {
1506 // LLVM codegen casts to *i8, no real gain on doing this for CIRGen this
1507 // early, defer to LLVM lowering.
1508 cir::VAStartOp::create(builder, vaList.getLoc(), vaList, count);
1509}
1510
1511void CIRGenFunction::emitVAEnd(mlir::Value vaList) {
1512 cir::VAEndOp::create(builder, vaList.getLoc(), vaList);
1513}
1514
1515// FIXME(cir): This completely abstracts away the ABI with a generic CIR Op. By
1516// default this lowers to llvm.va_arg which is incomplete and not ABI-compliant
1517// on most targets so cir.va_arg will need some ABI handling in LoweringPrepare
1519 assert(!cir::MissingFeatures::msabi());
1520 assert(!cir::MissingFeatures::vlas());
1521 mlir::Location loc = cgm.getLoc(ve->getExprLoc());
1522 mlir::Type type = convertType(ve->getType());
1523 mlir::Value vaList = emitVAListRef(ve->getSubExpr()).getPointer();
1524 return cir::VAArgOp::create(builder, loc, type, vaList);
1525}
1526
1527mlir::Value CIRGenFunction::emitBuiltinObjectSize(const Expr *e, unsigned type,
1528 cir::IntType resType,
1529 mlir::Value emittedE,
1530 bool isDynamic) {
1532
1533 // LLVM can't handle type=3 appropriately, and __builtin_object_size shouldn't
1534 // evaluate e for side-effects. In either case, just like original LLVM
1535 // lowering, we shouldn't lower to `cir.objsize` but to a constant instead.
1536 if (type == 3 || (!emittedE && e->HasSideEffects(getContext())))
1537 return builder.getConstInt(getLoc(e->getSourceRange()), resType,
1538 (type & 2) ? 0 : -1);
1539
1540 mlir::Value ptr = emittedE ? emittedE : emitScalarExpr(e);
1541 assert(mlir::isa<cir::PointerType>(ptr.getType()) &&
1542 "Non-pointer passed to __builtin_object_size?");
1543
1545
1546 // Extract the min/max mode from type. CIR only supports type 0
1547 // (max, whole object) and type 2 (min, whole object), not type 1 or 3
1548 // (closest subobject variants).
1549 const bool min = ((type & 2) != 0);
1550 // For GCC compatibility, __builtin_object_size treats NULL as unknown size.
1551 auto op =
1552 cir::ObjSizeOp::create(builder, getLoc(e->getSourceRange()), resType, ptr,
1553 min, /*nullUnknown=*/true, isDynamic);
1554 return op.getResult();
1555}
1556
1558 const Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE,
1559 bool isDynamic) {
1560 uint64_t objectSize;
1561 if (!e->tryEvaluateObjectSize(objectSize, getContext(), type))
1562 return emitBuiltinObjectSize(e, type, resType, emittedE, isDynamic);
1563 return builder.getConstInt(getLoc(e->getSourceRange()), resType, objectSize);
1564}
Defines enum values for all the target-independent builtin functions.
static RValue emitUnaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static std::optional< mlir::Value > emitTargetArchBuiltinExpr(CIRGenFunction *cgf, unsigned builtinID, const CallExpr *e, ReturnValueSlot &returnValue, llvm::Triple::ArchType arch)
static RValue emitUnaryFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitLibraryCall(CIRGenFunction &cgf, const FunctionDecl *fd, const CallExpr *e, mlir::Operation *calleeValue)
static WidthAndSignedness getIntegerWidthAndSignedness(const clang::ASTContext &astContext, const clang::QualType type)
static RValue emitBuiltinBitOp(CIRGenFunction &cgf, const CallExpr *e, bool poisonZero=false)
static void emitAtomicFenceOp(CIRGenFunction &cgf, const CallExpr *expr, cir::SyncScopeKind syncScope)
static struct WidthAndSignedness EncompassingIntegerType(ArrayRef< struct WidthAndSignedness > types)
static RValue emitBuiltinAlloca(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static RValue errorBuiltinNYI(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
Defines an enumeration for C++ overloaded operators.
static QualType getPointeeType(const MemRegion *R)
__DEVICE__ int min(int __a, int __b)
mlir::Value createIntCast(mlir::Value src, mlir::Type newTy)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
cir::PointerType getVoidPtrTy(clang::LangAS langAS=clang::LangAS::Default)
mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, mlir::Type type, llvm::StringRef name, mlir::IntegerAttr alignment, mlir::Value dynAllocSize)
APSInt & getInt()
Definition APValue.h:489
bool isFloat() const
Definition APValue.h:468
bool isInt() const
Definition APValue.h:467
APFloat & getFloat()
Definition APValue.h:503
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
unsigned getIntWidth(QualType T) const
Builtin::Context & BuiltinInfo
Definition ASTContext.h:793
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:910
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
bool isLibFunction(unsigned ID) const
Return true if this is a builtin for a libc/libm function, with a "__builtin_" prefix (e....
Definition Builtins.h:309
std::string getName(unsigned ID) const
Return the identifier name for the specified builtin, e.g.
Definition Builtins.cpp:80
mlir::Value getPointer() const
Definition Address.h:90
cir::PointerType getUInt8PtrTy()
static CIRGenCallee forDirect(mlir::Operation *funcPtr, const CIRGenCalleeInfo &abstractInfo=CIRGenCalleeInfo())
Definition CIRGenCall.h:90
mlir::Type convertType(clang::QualType t)
mlir::Value emitCheckedArgForAssume(const Expr *e)
Emits an argument for a call to a __builtin_assume.
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
const clang::LangOptions & getLangOpts() const
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
const TargetInfo & getTarget() const
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitVAStart(mlir::Value vaList, mlir::Value count)
Emits the start of a CIR variable-argument operation (cir.va_start)
mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
Returns a Value corresponding to the size of the given expression by emitting a cir....
std::optional< mlir::Value > emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot &returnValue)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
Address returnValue
The temporary alloca to hold the return value.
std::optional< mlir::Value > emitX86BuiltinExpr(unsigned builtinID, const CallExpr *expr)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
std::optional< mlir::Value > emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr, ReturnValueSlot returnValue, llvm::Triple::ArchType arch)
void emitVAEnd(mlir::Value vaList)
Emits the end of a CIR variable-argument operation (cir.va_start)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
RValue emitNewOrDeleteBuiltinCall(const FunctionProtoType *type, const CallExpr *callExpr, OverloadedOperatorKind op)
clang::ASTContext & getContext() const
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx, const Expr *argExpr)
mlir::Value emitVAArg(VAArgExpr *ve)
Generate code to get an argument from the passed in pointer and update it accordingly.
RValue emitRotate(const CallExpr *e, bool isRotateLeft)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
mlir::Type convertType(clang::QualType type)
cir::FuncOp getBuiltinLibFunction(const FunctionDecl *fd, unsigned builtinID)
Given a builtin id for a function like "__builtin_fabsf", return a Function* for "fabsf".
cir::FuncOp getOrCreateCIRFunction(llvm::StringRef mangledName, mlir::Type funcType, clang::GlobalDecl gd, bool forVTable, bool dontDefer=false, bool isThunk=false, ForDefinition_t isForDefinition=NotForDefinition, mlir::ArrayAttr extraAttrs={})
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
static RValue getComplex(mlir::Value v)
Definition CIRGenValue.h:91
static RValue getIgnored()
Definition CIRGenValue.h:78
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:254
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3081
SourceLocation getBeginLoc() const
Definition Expr.h:3211
Expr * getCallee()
Definition Expr.h:3024
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3068
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
FunctionDecl * getAsFunction() LLVM_READONLY
Returns the function itself, or the templated function if this is a function template.
Definition DeclBase.cpp:273
bool hasAttr() const
Definition DeclBase.h:577
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
bool EvaluateAsFloat(llvm::APFloat &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsFloat - Return true if this is a constant which we can fold and convert to a floating point...
std::optional< llvm::APSInt > getIntegerConstantExpr(const ASTContext &Ctx) const
isIntegerConstantExpr - Return the value if this expression is a valid integer constant expression.
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3669
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
bool tryEvaluateObjectSize(uint64_t &Result, ASTContext &Ctx, unsigned Type) const
If the current Expr is a pointer, this will try to statically determine the number of bytes available...
Represents a function declaration or definition.
Definition Decl.h:2000
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5269
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3329
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8377
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8419
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
Exposes information about the current target.
Definition TargetInfo.h:226
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
unsigned getSuitableAlign() const
Return the alignment that is the largest alignment ever used for any scalar/SIMD data type on the tar...
Definition TargetInfo.h:747
bool isBlockPointerType() const
Definition TypeBase.h:8550
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9179
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
Definition TypeBase.h:9010
bool isObjCObjectPointerType() const
Definition TypeBase.h:8705
bool isFloatingType() const
Definition Type.cpp:2304
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4891
const Expr * getSubExpr() const
Definition Expr.h:4907
QualType getType() const
Definition Decl.h:723
bool isMatchingAddressSpace(cir::TargetAddressSpaceAttr cirAS, clang::LangAS as)
Definition CIRTypes.cpp:843
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
U cast(CodeGen::Address addr)
Definition Address.h:327
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
static bool builtinCheckKind()
static bool asmLabelAttr()
static bool msvcBuiltins()
static bool cgFPOptionsRAII()
static bool builtinCallF128()
static bool fpConstraints()
static bool countedBySize()
static bool builtinCallMathErrno()
static bool opCallImplicitObjectSizeArgs()
static bool fastMathFlags()
static bool builtinCall()
cir::TargetAddressSpaceAttr getCIRAllocaAddressSpace() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool hasSideEffects() const
Return true if the evaluated expression has side effects.
Definition Expr.h:639
#define conj(__x)
Definition tgmath.h:1303