clang 22.0.0git
CIRGenBuiltin.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as CIR or a function call to be
10// later resolved.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CIRGenCall.h"
16#include "CIRGenFunction.h"
17#include "CIRGenModule.h"
18#include "CIRGenValue.h"
19#include "mlir/IR/BuiltinAttributes.h"
20#include "mlir/IR/Value.h"
21#include "mlir/Support/LLVM.h"
22#include "clang/AST/Expr.h"
26#include "llvm/Support/ErrorHandling.h"
27
28using namespace clang;
29using namespace clang::CIRGen;
30using namespace llvm;
31
33 const CallExpr *e, mlir::Operation *calleeValue) {
34 CIRGenCallee callee = CIRGenCallee::forDirect(calleeValue, GlobalDecl(fd));
35 return cgf.emitCall(e->getCallee()->getType(), callee, e, ReturnValueSlot());
36}
37
38template <typename Op>
40 bool poisonZero = false) {
42
43 mlir::Value arg = cgf.emitScalarExpr(e->getArg(0));
44 CIRGenBuilderTy &builder = cgf.getBuilder();
45
46 Op op;
47 if constexpr (std::is_same_v<Op, cir::BitClzOp> ||
48 std::is_same_v<Op, cir::BitCtzOp>)
49 op = Op::create(builder, cgf.getLoc(e->getSourceRange()), arg, poisonZero);
50 else
51 op = Op::create(builder, cgf.getLoc(e->getSourceRange()), arg);
52
53 mlir::Value result = op.getResult();
54 mlir::Type exprTy = cgf.convertType(e->getType());
55 if (exprTy != result.getType())
56 result = builder.createIntCast(result, exprTy);
57
58 return RValue::get(result);
59}
60
61RValue CIRGenFunction::emitRotate(const CallExpr *e, bool isRotateLeft) {
62 mlir::Value input = emitScalarExpr(e->getArg(0));
63 mlir::Value amount = emitScalarExpr(e->getArg(1));
64
65 // TODO(cir): MSVC flavor bit rotate builtins use different types for input
66 // and amount, but cir.rotate requires them to have the same type. Cast amount
67 // to the type of input when necessary.
69
70 auto r = cir::RotateOp::create(builder, getLoc(e->getSourceRange()), input,
71 amount, isRotateLeft);
72 return RValue::get(r);
73}
74
75template <class Operation>
77 const CallExpr &e) {
78 mlir::Value arg = cgf.emitScalarExpr(e.getArg(0));
79
82
83 auto call =
84 Operation::create(cgf.getBuilder(), arg.getLoc(), arg.getType(), arg);
85 return RValue::get(call->getResult(0));
86}
87
88template <class Operation>
90 mlir::Value arg = cgf.emitScalarExpr(e.getArg(0));
91 auto call =
92 Operation::create(cgf.getBuilder(), arg.getLoc(), arg.getType(), arg);
93 return RValue::get(call->getResult(0));
94}
95
97 const CallExpr *e,
99 mlir::Location loc = getLoc(e->getSourceRange());
100
101 // See if we can constant fold this builtin. If so, don't emit it at all.
102 // TODO: Extend this handling to all builtin calls that we can constant-fold.
103 Expr::EvalResult result;
104 if (e->isPRValue() && e->EvaluateAsRValue(result, cgm.getASTContext()) &&
105 !result.hasSideEffects()) {
106 if (result.Val.isInt())
107 return RValue::get(builder.getConstInt(loc, result.Val.getInt()));
108 if (result.Val.isFloat()) {
109 // Note: we are using result type of CallExpr to determine the type of
110 // the constant. Classic codegen uses the result value to determine the
111 // type. We feel it should be Ok to use expression type because it is
112 // hard to imagine a builtin function evaluates to a value that
113 // over/underflows its own defined type.
114 mlir::Type type = convertType(e->getType());
115 return RValue::get(builder.getConstFP(loc, type, result.Val.getFloat()));
116 }
117 }
118
119 const FunctionDecl *fd = gd.getDecl()->getAsFunction();
120
122
123 // If the builtin has been declared explicitly with an assembler label,
124 // disable the specialized emitting below. Ideally we should communicate the
125 // rename in IR, or at least avoid generating the intrinsic calls that are
126 // likely to get lowered to the renamed library functions.
127 unsigned builtinIDIfNoAsmLabel = fd->hasAttr<AsmLabelAttr>() ? 0 : builtinID;
128
131
132 switch (builtinIDIfNoAsmLabel) {
133 default:
134 break;
135
136 // C stdarg builtins.
137 case Builtin::BI__builtin_stdarg_start:
138 case Builtin::BI__builtin_va_start:
139 case Builtin::BI__va_start: {
140 mlir::Value vaList = builtinID == Builtin::BI__va_start
141 ? emitScalarExpr(e->getArg(0))
143 mlir::Value count = emitScalarExpr(e->getArg(1));
144 emitVAStart(vaList, count);
145 return {};
146 }
147
148 case Builtin::BI__builtin_va_end:
150 return {};
151
152 case Builtin::BIalloca:
153 case Builtin::BI_alloca:
154 case Builtin::BI__builtin_alloca_uninitialized:
155 case Builtin::BI__builtin_alloca: {
156 // Get alloca size input
157 mlir::Value size = emitScalarExpr(e->getArg(0));
158
159 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
160 const TargetInfo &ti = getContext().getTargetInfo();
161 const CharUnits suitableAlignmentInBytes =
163
164 // Emit the alloca op with type `u8 *` to match the semantics of
165 // `llvm.alloca`. We later bitcast the type to `void *` to match the
166 // semantics of C/C++
167 // FIXME(cir): It may make sense to allow AllocaOp of type `u8` to return a
168 // pointer of type `void *`. This will require a change to the allocaOp
169 // verifier.
170 mlir::Value allocaAddr = builder.createAlloca(
171 getLoc(e->getSourceRange()), builder.getUInt8PtrTy(),
172 builder.getUInt8Ty(), "bi_alloca", suitableAlignmentInBytes, size);
173
174 // Initialize the allocated buffer if required.
175 if (builtinID != Builtin::BI__builtin_alloca_uninitialized) {
176 // Initialize the alloca with the given size and alignment according to
177 // the lang opts. Only the trivial non-initialization is supported for
178 // now.
179
180 switch (getLangOpts().getTrivialAutoVarInit()) {
182 // Nothing to initialize.
183 break;
186 cgm.errorNYI("trivial auto var init");
187 break;
188 }
189 }
190
191 // An alloca will always return a pointer to the alloca (stack) address
192 // space. This address space need not be the same as the AST / Language
193 // default (e.g. in C / C++ auto vars are in the generic address space). At
194 // the AST level this is handled within CreateTempAlloca et al., but for the
195 // builtin / dynamic alloca we have to handle it here.
197
198 // Bitcast the alloca to the expected type.
199 return RValue::get(
200 builder.createBitcast(allocaAddr, builder.getVoidPtrTy()));
201 }
202
203 case Builtin::BIcos:
204 case Builtin::BIcosf:
205 case Builtin::BIcosl:
206 case Builtin::BI__builtin_cos:
207 case Builtin::BI__builtin_cosf:
208 case Builtin::BI__builtin_cosf16:
209 case Builtin::BI__builtin_cosl:
210 case Builtin::BI__builtin_cosf128:
213
214 case Builtin::BIceil:
215 case Builtin::BIceilf:
216 case Builtin::BIceill:
217 case Builtin::BI__builtin_ceil:
218 case Builtin::BI__builtin_ceilf:
219 case Builtin::BI__builtin_ceilf16:
220 case Builtin::BI__builtin_ceill:
221 case Builtin::BI__builtin_ceilf128:
224
225 case Builtin::BIexp:
226 case Builtin::BIexpf:
227 case Builtin::BIexpl:
228 case Builtin::BI__builtin_exp:
229 case Builtin::BI__builtin_expf:
230 case Builtin::BI__builtin_expf16:
231 case Builtin::BI__builtin_expl:
232 case Builtin::BI__builtin_expf128:
235
236 case Builtin::BIfabs:
237 case Builtin::BIfabsf:
238 case Builtin::BIfabsl:
239 case Builtin::BI__builtin_fabs:
240 case Builtin::BI__builtin_fabsf:
241 case Builtin::BI__builtin_fabsf16:
242 case Builtin::BI__builtin_fabsl:
243 case Builtin::BI__builtin_fabsf128:
245
246 case Builtin::BI__assume:
247 case Builtin::BI__builtin_assume: {
248 if (e->getArg(0)->HasSideEffects(getContext()))
249 return RValue::get(nullptr);
250
251 mlir::Value argValue = emitCheckedArgForAssume(e->getArg(0));
252 cir::AssumeOp::create(builder, loc, argValue);
253 return RValue::get(nullptr);
254 }
255
256 case Builtin::BI__builtin_assume_separate_storage: {
257 mlir::Value value0 = emitScalarExpr(e->getArg(0));
258 mlir::Value value1 = emitScalarExpr(e->getArg(1));
259 cir::AssumeSepStorageOp::create(builder, loc, value0, value1);
260 return RValue::get(nullptr);
261 }
262
263 case Builtin::BI__builtin_assume_aligned: {
264 const Expr *ptrExpr = e->getArg(0);
265 mlir::Value ptrValue = emitScalarExpr(ptrExpr);
266 mlir::Value offsetValue =
267 (e->getNumArgs() > 2) ? emitScalarExpr(e->getArg(2)) : nullptr;
268
269 std::optional<llvm::APSInt> alignment =
271 assert(alignment.has_value() &&
272 "the second argument to __builtin_assume_aligned must be an "
273 "integral constant expression");
274
275 mlir::Value result =
276 emitAlignmentAssumption(ptrValue, ptrExpr, ptrExpr->getExprLoc(),
277 alignment->getSExtValue(), offsetValue);
278 return RValue::get(result);
279 }
280
281 case Builtin::BI__builtin_complex: {
282 mlir::Value real = emitScalarExpr(e->getArg(0));
283 mlir::Value imag = emitScalarExpr(e->getArg(1));
284 mlir::Value complex = builder.createComplexCreate(loc, real, imag);
285 return RValue::getComplex(complex);
286 }
287
288 case Builtin::BI__builtin_creal:
289 case Builtin::BI__builtin_crealf:
290 case Builtin::BI__builtin_creall:
291 case Builtin::BIcreal:
292 case Builtin::BIcrealf:
293 case Builtin::BIcreall: {
294 mlir::Value complex = emitComplexExpr(e->getArg(0));
295 mlir::Value real = builder.createComplexReal(loc, complex);
296 return RValue::get(real);
297 }
298
299 case Builtin::BI__builtin_cimag:
300 case Builtin::BI__builtin_cimagf:
301 case Builtin::BI__builtin_cimagl:
302 case Builtin::BIcimag:
303 case Builtin::BIcimagf:
304 case Builtin::BIcimagl: {
305 mlir::Value complex = emitComplexExpr(e->getArg(0));
306 mlir::Value imag = builder.createComplexImag(loc, complex);
307 return RValue::get(imag);
308 }
309
310 case Builtin::BI__builtin_conj:
311 case Builtin::BI__builtin_conjf:
312 case Builtin::BI__builtin_conjl:
313 case Builtin::BIconj:
314 case Builtin::BIconjf:
315 case Builtin::BIconjl: {
316 mlir::Value complex = emitComplexExpr(e->getArg(0));
317 mlir::Value conj = builder.createUnaryOp(getLoc(e->getExprLoc()),
318 cir::UnaryOpKind::Not, complex);
319 return RValue::getComplex(conj);
320 }
321
322 case Builtin::BI__builtin_clrsb:
323 case Builtin::BI__builtin_clrsbl:
324 case Builtin::BI__builtin_clrsbll:
325 return emitBuiltinBitOp<cir::BitClrsbOp>(*this, e);
326
327 case Builtin::BI__builtin_ctzs:
328 case Builtin::BI__builtin_ctz:
329 case Builtin::BI__builtin_ctzl:
330 case Builtin::BI__builtin_ctzll:
331 case Builtin::BI__builtin_ctzg:
333 return emitBuiltinBitOp<cir::BitCtzOp>(*this, e, /*poisonZero=*/true);
334
335 case Builtin::BI__builtin_clzs:
336 case Builtin::BI__builtin_clz:
337 case Builtin::BI__builtin_clzl:
338 case Builtin::BI__builtin_clzll:
339 case Builtin::BI__builtin_clzg:
341 return emitBuiltinBitOp<cir::BitClzOp>(*this, e, /*poisonZero=*/true);
342
343 case Builtin::BI__builtin_ffs:
344 case Builtin::BI__builtin_ffsl:
345 case Builtin::BI__builtin_ffsll:
346 return emitBuiltinBitOp<cir::BitFfsOp>(*this, e);
347
348 case Builtin::BI__builtin_parity:
349 case Builtin::BI__builtin_parityl:
350 case Builtin::BI__builtin_parityll:
351 return emitBuiltinBitOp<cir::BitParityOp>(*this, e);
352
353 case Builtin::BI__lzcnt16:
354 case Builtin::BI__lzcnt:
355 case Builtin::BI__lzcnt64:
357 return emitBuiltinBitOp<cir::BitClzOp>(*this, e, /*poisonZero=*/false);
358
359 case Builtin::BI__popcnt16:
360 case Builtin::BI__popcnt:
361 case Builtin::BI__popcnt64:
362 case Builtin::BI__builtin_popcount:
363 case Builtin::BI__builtin_popcountl:
364 case Builtin::BI__builtin_popcountll:
365 case Builtin::BI__builtin_popcountg:
367
368 case Builtin::BI__builtin_expect:
369 case Builtin::BI__builtin_expect_with_probability: {
370 mlir::Value argValue = emitScalarExpr(e->getArg(0));
371 mlir::Value expectedValue = emitScalarExpr(e->getArg(1));
372
373 mlir::FloatAttr probAttr;
374 if (builtinIDIfNoAsmLabel == Builtin::BI__builtin_expect_with_probability) {
375 llvm::APFloat probability(0.0);
376 const Expr *probArg = e->getArg(2);
377 [[maybe_unused]] bool evalSucceeded =
378 probArg->EvaluateAsFloat(probability, cgm.getASTContext());
379 assert(evalSucceeded &&
380 "probability should be able to evaluate as float");
381 bool loseInfo = false; // ignored
382 probability.convert(llvm::APFloat::IEEEdouble(),
383 llvm::RoundingMode::Dynamic, &loseInfo);
384 probAttr = mlir::FloatAttr::get(mlir::Float64Type::get(&getMLIRContext()),
385 probability);
386 }
387
388 auto result = cir::ExpectOp::create(builder, loc, argValue.getType(),
389 argValue, expectedValue, probAttr);
390 return RValue::get(result);
391 }
392
393 case Builtin::BI__builtin_bswap16:
394 case Builtin::BI__builtin_bswap32:
395 case Builtin::BI__builtin_bswap64:
396 case Builtin::BI_byteswap_ushort:
397 case Builtin::BI_byteswap_ulong:
398 case Builtin::BI_byteswap_uint64: {
399 mlir::Value arg = emitScalarExpr(e->getArg(0));
400 return RValue::get(cir::ByteSwapOp::create(builder, loc, arg));
401 }
402
403 case Builtin::BI__builtin_bitreverse8:
404 case Builtin::BI__builtin_bitreverse16:
405 case Builtin::BI__builtin_bitreverse32:
406 case Builtin::BI__builtin_bitreverse64: {
407 mlir::Value arg = emitScalarExpr(e->getArg(0));
408 return RValue::get(cir::BitReverseOp::create(builder, loc, arg));
409 }
410
411 case Builtin::BI__builtin_rotateleft8:
412 case Builtin::BI__builtin_rotateleft16:
413 case Builtin::BI__builtin_rotateleft32:
414 case Builtin::BI__builtin_rotateleft64:
415 return emitRotate(e, /*isRotateLeft=*/true);
416
417 case Builtin::BI__builtin_rotateright8:
418 case Builtin::BI__builtin_rotateright16:
419 case Builtin::BI__builtin_rotateright32:
420 case Builtin::BI__builtin_rotateright64:
421 return emitRotate(e, /*isRotateLeft=*/false);
422
423 case Builtin::BI__builtin_return_address:
424 case Builtin::BI__builtin_frame_address: {
425 mlir::Location loc = getLoc(e->getExprLoc());
426 llvm::APSInt level = e->getArg(0)->EvaluateKnownConstInt(getContext());
427 if (builtinID == Builtin::BI__builtin_return_address) {
428 return RValue::get(cir::ReturnAddrOp::create(
429 builder, loc,
430 builder.getConstAPInt(loc, builder.getUInt32Ty(), level)));
431 }
432 return RValue::get(cir::FrameAddrOp::create(
433 builder, loc,
434 builder.getConstAPInt(loc, builder.getUInt32Ty(), level)));
435 }
436
437 case Builtin::BI__builtin_trap:
438 emitTrap(loc, /*createNewBlock=*/true);
439 return RValue::get(nullptr);
440
441 case Builtin::BI__builtin_unreachable:
442 emitUnreachable(e->getExprLoc(), /*createNewBlock=*/true);
443 return RValue::get(nullptr);
444
445 case Builtin::BI__builtin_elementwise_acos:
446 return emitUnaryFPBuiltin<cir::ACosOp>(*this, *e);
447 case Builtin::BI__builtin_elementwise_asin:
448 return emitUnaryFPBuiltin<cir::ASinOp>(*this, *e);
449 case Builtin::BI__builtin_elementwise_atan:
450 return emitUnaryFPBuiltin<cir::ATanOp>(*this, *e);
451 case Builtin::BI__builtin_elementwise_cos:
452 return emitUnaryFPBuiltin<cir::CosOp>(*this, *e);
453 case Builtin::BI__builtin_coro_id:
454 case Builtin::BI__builtin_coro_promise:
455 case Builtin::BI__builtin_coro_resume:
456 case Builtin::BI__builtin_coro_noop:
457 case Builtin::BI__builtin_coro_destroy:
458 case Builtin::BI__builtin_coro_done:
459 case Builtin::BI__builtin_coro_alloc:
460 case Builtin::BI__builtin_coro_begin:
461 case Builtin::BI__builtin_coro_end:
462 case Builtin::BI__builtin_coro_suspend:
463 case Builtin::BI__builtin_coro_align:
464 cgm.errorNYI(e->getSourceRange(), "BI__builtin_coro_id like NYI");
465 return getUndefRValue(e->getType());
466
467 case Builtin::BI__builtin_coro_frame: {
468 cgm.errorNYI(e->getSourceRange(), "BI__builtin_coro_frame NYI");
470 return getUndefRValue(e->getType());
471 }
472 case Builtin::BI__builtin_coro_free:
473 case Builtin::BI__builtin_coro_size: {
474 GlobalDecl gd{fd};
475 mlir::Type ty = cgm.getTypes().getFunctionType(
476 cgm.getTypes().arrangeGlobalDeclaration(gd));
477 const auto *nd = cast<NamedDecl>(gd.getDecl());
478 cir::FuncOp fnOp =
479 cgm.getOrCreateCIRFunction(nd->getName(), ty, gd, /*ForVTable=*/false);
480 fnOp.setBuiltin(true);
481 return emitCall(e->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), e,
483 }
484 case Builtin::BI__builtin_dynamic_object_size:
485 case Builtin::BI__builtin_object_size: {
486 unsigned type =
487 e->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
488 auto resType = mlir::cast<cir::IntType>(convertType(e->getType()));
489
490 // We pass this builtin onto the optimizer so that it can figure out the
491 // object size in more complex cases.
492 bool isDynamic = builtinID == Builtin::BI__builtin_dynamic_object_size;
493 return RValue::get(emitBuiltinObjectSize(e->getArg(0), type, resType,
494 /*EmittedE=*/nullptr, isDynamic));
495 }
496
497 case Builtin::BI__builtin_prefetch: {
498 auto evaluateOperandAsInt = [&](const Expr *arg) {
500 [[maybe_unused]] bool evalSucceed =
501 arg->EvaluateAsInt(res, cgm.getASTContext());
502 assert(evalSucceed && "expression should be able to evaluate as int");
503 return res.Val.getInt().getZExtValue();
504 };
505
506 bool isWrite = false;
507 if (e->getNumArgs() > 1)
508 isWrite = evaluateOperandAsInt(e->getArg(1));
509
510 int locality = 3;
511 if (e->getNumArgs() > 2)
512 locality = evaluateOperandAsInt(e->getArg(2));
513
514 mlir::Value address = emitScalarExpr(e->getArg(0));
515 cir::PrefetchOp::create(builder, loc, address, locality, isWrite);
516 return RValue::get(nullptr);
517 }
518 }
519
520 // If this is an alias for a lib function (e.g. __builtin_sin), emit
521 // the call using the normal call path, but using the unmangled
522 // version of the function name.
523 if (getContext().BuiltinInfo.isLibFunction(builtinID))
524 return emitLibraryCall(*this, fd, e,
525 cgm.getBuiltinLibFunction(fd, builtinID));
526
527 // Some target-specific builtins can have aggregate return values, e.g.
528 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
529 // returnValue to be non-null, so that the target-specific emission code can
530 // always just emit into it.
532 if (evalKind == cir::TEK_Aggregate && returnValue.isNull()) {
533 cgm.errorNYI(e->getSourceRange(), "aggregate return value from builtin");
534 return getUndefRValue(e->getType());
535 }
536
537 // Now see if we can emit a target-specific builtin.
538 if (mlir::Value v = emitTargetBuiltinExpr(builtinID, e, returnValue)) {
539 switch (evalKind) {
540 case cir::TEK_Scalar:
541 if (mlir::isa<cir::VoidType>(v.getType()))
542 return RValue::get(nullptr);
543 return RValue::get(v);
545 cgm.errorNYI(e->getSourceRange(), "aggregate return value from builtin");
546 return getUndefRValue(e->getType());
547 case cir::TEK_Complex:
548 llvm_unreachable("No current target builtin returns complex");
549 }
550 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
551 }
552
553 cgm.errorNYI(e->getSourceRange(),
554 std::string("unimplemented builtin call: ") +
555 getContext().BuiltinInfo.getName(builtinID));
556 return getUndefRValue(e->getType());
557}
558
560 unsigned builtinID,
561 const CallExpr *e,
562 ReturnValueSlot &returnValue,
563 llvm::Triple::ArchType arch) {
564 // When compiling in HipStdPar mode we have to be conservative in rejecting
565 // target specific features in the FE, and defer the possible error to the
566 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
567 // referenced by an accelerator executable function, we emit an error.
568 // Returning nullptr here leads to the builtin being handled in
569 // EmitStdParUnsupportedBuiltin.
570 if (cgf->getLangOpts().HIPStdPar && cgf->getLangOpts().CUDAIsDevice &&
571 arch != cgf->getTarget().getTriple().getArch())
572 return {};
573
574 switch (arch) {
575 case llvm::Triple::arm:
576 case llvm::Triple::armeb:
577 case llvm::Triple::thumb:
578 case llvm::Triple::thumbeb:
579 case llvm::Triple::aarch64:
580 case llvm::Triple::aarch64_32:
581 case llvm::Triple::aarch64_be:
582 case llvm::Triple::bpfeb:
583 case llvm::Triple::bpfel:
584 // These are actually NYI, but that will be reported by emitBuiltinExpr.
585 // At this point, we don't even know that the builtin is target-specific.
586 return nullptr;
587
588 case llvm::Triple::x86:
589 case llvm::Triple::x86_64:
590 return cgf->emitX86BuiltinExpr(builtinID, e);
591
592 case llvm::Triple::ppc:
593 case llvm::Triple::ppcle:
594 case llvm::Triple::ppc64:
595 case llvm::Triple::ppc64le:
596 case llvm::Triple::r600:
597 case llvm::Triple::amdgcn:
598 case llvm::Triple::systemz:
599 case llvm::Triple::nvptx:
600 case llvm::Triple::nvptx64:
601 case llvm::Triple::wasm32:
602 case llvm::Triple::wasm64:
603 case llvm::Triple::hexagon:
604 case llvm::Triple::riscv32:
605 case llvm::Triple::riscv64:
606 // These are actually NYI, but that will be reported by emitBuiltinExpr.
607 // At this point, we don't even know that the builtin is target-specific.
608 return {};
609 default:
610 return {};
611 }
612}
613
614mlir::Value
617 if (getContext().BuiltinInfo.isAuxBuiltinID(builtinID)) {
618 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
620 this, getContext().BuiltinInfo.getAuxBuiltinID(builtinID), e,
621 returnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
622 }
623
624 return emitTargetArchBuiltinExpr(this, builtinID, e, returnValue,
625 getTarget().getTriple().getArch());
626}
627
628/// Given a builtin id for a function like "__builtin_fabsf", return a Function*
629/// for "fabsf".
631 unsigned builtinID) {
632 assert(astContext.BuiltinInfo.isLibFunction(builtinID));
633
634 // Get the name, skip over the __builtin_ prefix (if necessary). We may have
635 // to build this up so provide a small stack buffer to handle the vast
636 // majority of names.
638
640 name = astContext.BuiltinInfo.getName(builtinID).substr(10);
641
642 GlobalDecl d(fd);
643 mlir::Type type = convertType(fd->getType());
644 return getOrCreateCIRFunction(name, type, d, /*forVTable=*/false);
645}
646
648 mlir::Value argValue = evaluateExprAsBool(e);
649 if (!sanOpts.has(SanitizerKind::Builtin))
650 return argValue;
651
653 cgm.errorNYI(e->getSourceRange(),
654 "emitCheckedArgForAssume: sanitizers are NYI");
655 return {};
656}
657
658void CIRGenFunction::emitVAStart(mlir::Value vaList, mlir::Value count) {
659 // LLVM codegen casts to *i8, no real gain on doing this for CIRGen this
660 // early, defer to LLVM lowering.
661 cir::VAStartOp::create(builder, vaList.getLoc(), vaList, count);
662}
663
664void CIRGenFunction::emitVAEnd(mlir::Value vaList) {
665 cir::VAEndOp::create(builder, vaList.getLoc(), vaList);
666}
667
668// FIXME(cir): This completely abstracts away the ABI with a generic CIR Op. By
669// default this lowers to llvm.va_arg which is incomplete and not ABI-compliant
670// on most targets so cir.va_arg will need some ABI handling in LoweringPrepare
674 mlir::Location loc = cgm.getLoc(ve->getExprLoc());
675 mlir::Type type = convertType(ve->getType());
676 mlir::Value vaList = emitVAListRef(ve->getSubExpr()).getPointer();
677 return cir::VAArgOp::create(builder, loc, type, vaList);
678}
679
680mlir::Value CIRGenFunction::emitBuiltinObjectSize(const Expr *e, unsigned type,
681 cir::IntType resType,
682 mlir::Value emittedE,
683 bool isDynamic) {
685
686 // LLVM can't handle type=3 appropriately, and __builtin_object_size shouldn't
687 // evaluate e for side-effects. In either case, just like original LLVM
688 // lowering, we shouldn't lower to `cir.objsize` but to a constant instead.
689 if (type == 3 || (!emittedE && e->HasSideEffects(getContext())))
690 return builder.getConstInt(getLoc(e->getSourceRange()), resType,
691 (type & 2) ? 0 : -1);
692
693 mlir::Value ptr = emittedE ? emittedE : emitScalarExpr(e);
694 assert(mlir::isa<cir::PointerType>(ptr.getType()) &&
695 "Non-pointer passed to __builtin_object_size?");
696
698
699 // Extract the min/max mode from type. CIR only supports type 0
700 // (max, whole object) and type 2 (min, whole object), not type 1 or 3
701 // (closest subobject variants).
702 const bool min = ((type & 2) != 0);
703 // For GCC compatibility, __builtin_object_size treats NULL as unknown size.
704 auto op =
705 cir::ObjSizeOp::create(builder, getLoc(e->getSourceRange()), resType, ptr,
706 min, /*nullUnknown=*/true, isDynamic);
707 return op.getResult();
708}
709
711 const Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE,
712 bool isDynamic) {
713 uint64_t objectSize;
714 if (!e->tryEvaluateObjectSize(objectSize, getContext(), type))
715 return emitBuiltinObjectSize(e, type, resType, emittedE, isDynamic);
716 return builder.getConstInt(getLoc(e->getSourceRange()), resType, objectSize);
717}
Defines enum values for all the target-independent builtin functions.
static RValue emitUnaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static mlir::Value emitTargetArchBuiltinExpr(CIRGenFunction *cgf, unsigned builtinID, const CallExpr *e, ReturnValueSlot &returnValue, llvm::Triple::ArchType arch)
static RValue emitUnaryFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitLibraryCall(CIRGenFunction &cgf, const FunctionDecl *fd, const CallExpr *e, mlir::Operation *calleeValue)
static RValue emitBuiltinBitOp(CIRGenFunction &cgf, const CallExpr *e, bool poisonZero=false)
__DEVICE__ int min(int __a, int __b)
mlir::Value createIntCast(mlir::Value src, mlir::Type newTy)
APSInt & getInt()
Definition APValue.h:489
bool isFloat() const
Definition APValue.h:468
bool isInt() const
Definition APValue.h:467
APFloat & getFloat()
Definition APValue.h:503
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:891
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
mlir::Value getPointer() const
Definition Address.h:82
static CIRGenCallee forDirect(mlir::Operation *funcPtr, const CIRGenCalleeInfo &abstractInfo=CIRGenCalleeInfo())
Definition CIRGenCall.h:90
mlir::Type convertType(clang::QualType t)
mlir::Value emitCheckedArgForAssume(const Expr *e)
Emits an argument for a call to a __builtin_assume.
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
const clang::LangOptions & getLangOpts() const
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
const TargetInfo & getTarget() const
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
mlir::Value emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot &returnValue)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitVAStart(mlir::Value vaList, mlir::Value count)
Emits the start of a CIR variable-argument operation (cir.va_start)
mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
Returns a Value corresponding to the size of the given expression by emitting a cir....
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
Address returnValue
The temporary alloca to hold the return value.
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
void emitVAEnd(mlir::Value vaList)
Emits the end of a CIR variable-argument operation (cir.va_start)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
mlir::Value emitX86BuiltinExpr(unsigned builtinID, const CallExpr *e)
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
clang::ASTContext & getContext() const
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::Value emitVAArg(VAArgExpr *ve)
Generate code to get an argument from the passed in pointer and update it accordingly.
RValue emitRotate(const CallExpr *e, bool isRotateLeft)
mlir::Type convertType(clang::QualType type)
cir::FuncOp getBuiltinLibFunction(const FunctionDecl *fd, unsigned builtinID)
Given a builtin id for a function like "__builtin_fabsf", return a Function* for "fabsf".
cir::FuncOp getOrCreateCIRFunction(llvm::StringRef mangledName, mlir::Type funcType, clang::GlobalDecl gd, bool forVTable, bool dontDefer=false, bool isThunk=false, ForDefinition_t isForDefinition=NotForDefinition, mlir::ArrayAttr extraAttrs={})
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
static RValue get(mlir::Value v)
Definition CIRGenValue.h:82
static RValue getComplex(mlir::Value v)
Definition CIRGenValue.h:90
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:254
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3081
Expr * getCallee()
Definition Expr.h:3024
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3068
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
FunctionDecl * getAsFunction() LLVM_READONLY
Returns the function itself, or the templated function if this is a function template.
Definition DeclBase.cpp:273
bool hasAttr() const
Definition DeclBase.h:577
This represents one expression.
Definition Expr.h:112
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
bool EvaluateAsFloat(llvm::APFloat &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsFloat - Return true if this is a constant which we can fold and convert to a floating point...
std::optional< llvm::APSInt > getIntegerConstantExpr(const ASTContext &Ctx) const
isIntegerConstantExpr - Return the value if this expression is a valid integer constant expression.
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3665
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
bool tryEvaluateObjectSize(uint64_t &Result, ASTContext &Ctx, unsigned Type) const
If the current Expr is a pointer, this will try to statically determine the number of bytes available...
Represents a function declaration or definition.
Definition Decl.h:2000
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
Exposes information about the current target.
Definition TargetInfo.h:226
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
unsigned getSuitableAlign() const
Return the alignment that is the largest alignment ever used for any scalar/SIMD data type on the tar...
Definition TargetInfo.h:742
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4891
const Expr * getSubExpr() const
Definition Expr.h:4907
QualType getType() const
Definition Decl.h:723
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
The JSON file list parser is used to communicate input to InstallAPI.
U cast(CodeGen::Address addr)
Definition Address.h:327
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
static bool builtinCheckKind()
static bool addressSpace()
static bool asmLabelAttr()
static bool msvcBuiltins()
static bool coroutineFrame()
static bool cgFPOptionsRAII()
static bool builtinCallF128()
static bool fpConstraints()
static bool countedBySize()
static bool builtinCallMathErrno()
static bool opCallImplicitObjectSizeArgs()
static bool fastMathFlags()
static bool builtinCall()
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool hasSideEffects() const
Return true if the evaluated expression has side effects.
Definition Expr.h:639
#define conj(__x)
Definition tgmath.h:1303