clang 23.0.0git
CIRGenBuiltin.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as CIR or a function call to be
10// later resolved.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CIRGenCall.h"
15#include "CIRGenFunction.h"
16#include "CIRGenModule.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/BuiltinAttributes.h"
19#include "mlir/IR/Value.h"
20#include "mlir/Support/LLVM.h"
21#include "clang/AST/DeclBase.h"
22#include "clang/AST/Expr.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/Support/ErrorHandling.h"
31
32using namespace clang;
33using namespace clang::CIRGen;
34using namespace llvm;
35
36static bool shouldEmitBuiltinAsIR(unsigned builtinID,
37 const Builtin::Context &bi,
38 const CIRGenFunction &cgf) {
39 if (!cgf.cgm.getLangOpts().MathErrno &&
43 switch (builtinID) {
44 default:
45 return false;
46 case Builtin::BIlogbf:
47 case Builtin::BI__builtin_logbf:
48 case Builtin::BIlogb:
49 case Builtin::BI__builtin_logb:
50 case Builtin::BIscalbnf:
51 case Builtin::BI__builtin_scalbnf:
52 case Builtin::BIscalbn:
53 case Builtin::BI__builtin_scalbn:
54 return true;
55 }
56 }
57 return false;
58}
59
61 const CallExpr *e, mlir::Operation *calleeValue) {
62 CIRGenCallee callee = CIRGenCallee::forDirect(calleeValue, GlobalDecl(fd));
63 return cgf.emitCall(e->getCallee()->getType(), callee, e, ReturnValueSlot());
64}
65
66template <typename Op, typename... Args>
67static mlir::Value createBuiltinBitOp(CIRGenFunction &cgf, const CallExpr *e,
68 mlir::Value arg, Args... args) {
69 CIRGenBuilderTy &builder = cgf.getBuilder();
70 mlir::Location loc = cgf.getLoc(e->getSourceRange());
71 auto op = Op::create(builder, loc, arg, args...);
72 mlir::Value result = op.getResult();
73 mlir::Type resultTy = cgf.convertType(e->getType());
74 if (resultTy != result.getType())
75 result = builder.createIntCast(result, resultTy);
76 return result;
77}
78
79template <typename Op, typename... Args>
81 Args... args) {
82 mlir::Value arg = cgf.emitScalarExpr(e->getArg(0));
83 return RValue::get(createBuiltinBitOp<Op>(cgf, e, arg, args...));
84}
85
86/// Emit a clz/ctz bit op with optional fallback for __builtin_c[lt]zg.
87/// When a fallback is present, the result is the fallback value if the input is
88/// zero, otherwise the bit count.
89template <typename Op>
91 const CallExpr *e) {
92 bool hasFallback = e->getNumArgs() > 1;
93 bool poisonZero = hasFallback || cgf.getTarget().isCLZForZeroUndef();
94
95 if (!hasFallback) {
97 return emitBuiltinBitOp<Op>(cgf, e, poisonZero);
98 }
99
101 mlir::Value arg = cgf.emitScalarExpr(e->getArg(0));
102 mlir::Value result = createBuiltinBitOp<Op>(cgf, e, arg, poisonZero);
103
104 CIRGenBuilderTy &builder = cgf.getBuilder();
105 mlir::Location loc = cgf.getLoc(e->getSourceRange());
106 mlir::Value zero = builder.getNullValue(arg.getType(), loc);
107 mlir::Value isZero =
108 builder.createCompare(loc, cir::CmpOpKind::eq, arg, zero);
109 mlir::Value fallbackValue = cgf.emitScalarExpr(e->getArg(1));
110 return RValue::get(builder.createSelect(loc, isZero, fallbackValue, result));
111}
112
113/// Emit the conversions required to turn the given value into an
114/// integer of the given size.
115static mlir::Value emitToInt(CIRGenFunction &cgf, mlir::Value v, QualType t,
116 cir::IntType intType) {
117 v = cgf.emitToMemory(v, t);
118
119 if (mlir::isa<cir::PointerType>(v.getType()))
120 return cgf.getBuilder().createPtrToInt(v, intType);
121
122 assert(v.getType() == intType);
123 return v;
124}
125
126static mlir::Value emitFromInt(CIRGenFunction &cgf, mlir::Value v, QualType t,
127 mlir::Type resultType) {
128 v = cgf.emitFromMemory(v, t);
129
130 if (mlir::isa<cir::PointerType>(resultType))
131 return cgf.getBuilder().createIntToPtr(v, resultType);
132
133 assert(v.getType() == resultType);
134 return v;
135}
136
137static mlir::Value emitSignBit(mlir::Location loc, CIRGenFunction &cgf,
138 mlir::Value val) {
140 cir::SignBitOp returnValue = cgf.getBuilder().createSignBit(loc, val);
141 return returnValue->getResult(0);
142}
143
145 ASTContext &astContext = cgf.getContext();
146 Address ptr = cgf.emitPointerWithAlignment(e->getArg(0));
147 unsigned bytes =
148 mlir::isa<cir::PointerType>(ptr.getElementType())
149 ? astContext.getTypeSizeInChars(astContext.VoidPtrTy).getQuantity()
152
153 unsigned align = ptr.getAlignment().getQuantity();
154 if (align % bytes != 0) {
155 DiagnosticsEngine &diags = cgf.cgm.getDiags();
156 diags.Report(e->getBeginLoc(), diag::warn_sync_op_misaligned);
157 // Force address to be at least naturally-aligned.
159 }
160 return ptr;
161}
162
163/// Utility to insert an atomic instruction based on Intrinsic::ID
164/// and the expression node.
165static mlir::Value makeBinaryAtomicValue(
166 CIRGenFunction &cgf, cir::AtomicFetchKind kind, const CallExpr *expr,
167 mlir::Type *originalArgType = nullptr,
168 mlir::Value *emittedArgValue = nullptr,
169 cir::MemOrder ordering = cir::MemOrder::SequentiallyConsistent) {
170
171 QualType type = expr->getType();
172 QualType ptrType = expr->getArg(0)->getType();
173
174 assert(ptrType->isPointerType());
175 assert(
178 expr->getArg(1)->getType()));
179
180 Address destAddr = checkAtomicAlignment(cgf, expr);
181 CIRGenBuilderTy &builder = cgf.getBuilder();
182
183 mlir::Value val = cgf.emitScalarExpr(expr->getArg(1));
184 mlir::Type valueType = val.getType();
185 mlir::Value destValue = destAddr.emitRawPointer();
186
187 if (ptrType->getPointeeType()->isPointerType()) {
188 // Pointer to pointer
189 // `cir.atomic.fetch` expects a pointer to an integer type, so we cast
190 // ptr<ptr<T>> to ptr<intPtrSize>
191 cir::IntType ptrSizeInt =
192 builder.getSIntNTy(cgf.getContext().getTypeSize(ptrType));
193 destValue =
194 builder.createBitcast(destValue, builder.getPointerTo(ptrSizeInt));
195 val = emitToInt(cgf, val, type, ptrSizeInt);
196 } else {
197 // Pointer to integer type
198 cir::IntType intType =
200 ? builder.getUIntNTy(cgf.getContext().getTypeSize(type))
201 : builder.getSIntNTy(cgf.getContext().getTypeSize(type));
202 val = emitToInt(cgf, val, type, intType);
203 }
204
205 // This output argument is needed for post atomic fetch operations
206 // that calculate the result of the operation as return value of
207 // <binop>_and_fetch builtins. The `AtomicFetch` operation only updates the
208 // memory location and returns the old value.
209 if (emittedArgValue) {
210 *emittedArgValue = val;
211 assert(originalArgType != nullptr &&
212 "originalArgType must be provided when emittedArgValue is set");
213 *originalArgType = valueType;
214 }
215
216 auto rmwi = cir::AtomicFetchOp::create(
217 builder, cgf.getLoc(expr->getSourceRange()), destValue, val, kind,
218 ordering, cir::SyncScopeKind::System, false, /* is volatile */
219 true); /* fetch first */
220 return rmwi->getResult(0);
221}
222
224 cir::AtomicFetchKind atomicOpkind,
225 const CallExpr *e) {
226 return RValue::get(makeBinaryAtomicValue(cgf, atomicOpkind, e));
227}
228
229template <typename BinOp>
231 cir::AtomicFetchKind atomicOpkind,
232 const CallExpr *e, bool invert = false) {
233 mlir::Value emittedArgValue;
234 mlir::Type originalArgType;
235 clang::QualType typ = e->getType();
236 mlir::Value result = makeBinaryAtomicValue(
237 cgf, atomicOpkind, e, &originalArgType, &emittedArgValue);
239 result = BinOp::create(builder, result.getLoc(), result, emittedArgValue);
240
241 if (invert)
242 result = builder.createNot(result);
243
244 result = emitFromInt(cgf, result, typ, originalArgType);
245 return RValue::get(result);
246}
247
249 cir::SyncScopeKind syncScope) {
250 CIRGenBuilderTy &builder = cgf.getBuilder();
251 mlir::Location loc = cgf.getLoc(expr->getSourceRange());
252
253 auto emitAtomicOpCallBackFn = [&](cir::MemOrder memOrder) {
254 cir::AtomicFenceOp::create(
255 builder, loc, memOrder,
256 cir::SyncScopeKindAttr::get(&cgf.getMLIRContext(), syncScope));
257 };
258
259 cgf.emitAtomicExprWithMemOrder(expr->getArg(0), /*isStore*/ false,
260 /*isLoad*/ false, /*isFence*/ true,
261 emitAtomicOpCallBackFn);
262}
263
264namespace {
265struct WidthAndSignedness {
266 unsigned width;
267 bool isSigned;
268};
269} // namespace
270
271static WidthAndSignedness
273 const clang::QualType type) {
274 assert(type->isIntegerType() && "Given type is not an integer.");
275 unsigned width = type->isBooleanType() ? 1
276 : type->isBitIntType() ? astContext.getIntWidth(type)
277 : astContext.getTypeInfo(type).Width;
278 bool isSigned = type->isSignedIntegerType();
279 return {width, isSigned};
280}
281
282/// Create a checked overflow arithmetic op and return its result and overflow
283/// flag.
284template <typename OpTy>
285static std::pair<mlir::Value, mlir::Value>
286emitOverflowOp(CIRGenBuilderTy &builder, mlir::Location loc,
287 cir::IntType resultTy, mlir::Value lhs, mlir::Value rhs) {
288 auto op = OpTy::create(builder, loc, resultTy, lhs, rhs);
289 return {op.getResult(), op.getOverflow()};
290}
291
292// Given one or more integer types, this function produces an integer type that
293// encompasses them: any value in one of the given types could be expressed in
294// the encompassing type.
295static struct WidthAndSignedness
296EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> types) {
297 assert(types.size() > 0 && "Empty list of types.");
298
299 // If any of the given types is signed, we must return a signed type.
300 bool isSigned = llvm::any_of(types, [](const auto &t) { return t.isSigned; });
301
302 // The encompassing type must have a width greater than or equal to the width
303 // of the specified types. Additionally, if the encompassing type is signed,
304 // its width must be strictly greater than the width of any unsigned types
305 // given.
306 unsigned width = 0;
307 for (const auto &type : types)
308 width = std::max(width, type.width + (isSigned && !type.isSigned));
309
310 return {width, isSigned};
311}
312
313RValue CIRGenFunction::emitRotate(const CallExpr *e, bool isRotateLeft) {
314 mlir::Value input = emitScalarExpr(e->getArg(0));
315 mlir::Value amount = emitScalarExpr(e->getArg(1));
316
317 // TODO(cir): MSVC flavor bit rotate builtins use different types for input
318 // and amount, but cir.rotate requires them to have the same type. Cast amount
319 // to the type of input when necessary.
321
322 auto r = cir::RotateOp::create(builder, getLoc(e->getSourceRange()), input,
323 amount, isRotateLeft);
324 return RValue::get(r);
325}
326
327template <class Operation>
329 const CallExpr &e) {
330 mlir::Value arg = cgf.emitScalarExpr(e.getArg(0));
331
332 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(cgf, &e);
334
335 auto call =
336 Operation::create(cgf.getBuilder(), arg.getLoc(), arg.getType(), arg);
337 return RValue::get(call->getResult(0));
338}
339
340template <class Operation>
342 mlir::Value arg = cgf.emitScalarExpr(e.getArg(0));
343 auto call =
344 Operation::create(cgf.getBuilder(), arg.getLoc(), arg.getType(), arg);
345 return RValue::get(call->getResult(0));
346}
347
348template <typename Op>
350 const CallExpr &e) {
351 mlir::Type resultType = cgf.convertType(e.getType());
352 mlir::Value src = cgf.emitScalarExpr(e.getArg(0));
353
355
356 auto call = Op::create(cgf.getBuilder(), src.getLoc(), resultType, src);
357 return RValue::get(call->getResult(0));
358}
359
360template <typename Op>
362 mlir::Value arg0 = cgf.emitScalarExpr(e.getArg(0));
363 mlir::Value arg1 = cgf.emitScalarExpr(e.getArg(1));
364
365 mlir::Location loc = cgf.getLoc(e.getExprLoc());
366 mlir::Type ty = cgf.convertType(e.getType());
367 auto call = Op::create(cgf.getBuilder(), loc, ty, arg0, arg1);
368
369 return RValue::get(call->getResult(0));
370}
371
372template <typename Op>
374 const CallExpr &e) {
375 mlir::Value arg0 = cgf.emitScalarExpr(e.getArg(0));
376 mlir::Value arg1 = cgf.emitScalarExpr(e.getArg(1));
377
378 mlir::Location loc = cgf.getLoc(e.getExprLoc());
379 mlir::Type ty = cgf.convertType(e.getType());
380
382
383 auto call = Op::create(cgf.getBuilder(), loc, ty, arg0, arg1);
384 return call->getResult(0);
385}
386
388 unsigned builtinID) {
389
390 if (cgf.getContext().BuiltinInfo.isLibFunction(builtinID)) {
391 cgf.cgm.errorNYI(
392 e->getSourceRange(),
393 std::string("unimplemented X86 library function builtin call: ") +
394 cgf.getContext().BuiltinInfo.getName(builtinID));
395 } else {
396 cgf.cgm.errorNYI(e->getSourceRange(),
397 std::string("unimplemented X86 builtin call: ") +
398 cgf.getContext().BuiltinInfo.getName(builtinID));
399 }
400
401 return cgf.getUndefRValue(e->getType());
402}
403
405 unsigned builtinID) {
406 assert(builtinID == Builtin::BI__builtin_alloca ||
407 builtinID == Builtin::BI__builtin_alloca_uninitialized ||
408 builtinID == Builtin::BIalloca || builtinID == Builtin::BI_alloca);
409
410 // Get alloca size input
411 mlir::Value size = cgf.emitScalarExpr(e->getArg(0));
412
413 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
414 const TargetInfo &ti = cgf.getContext().getTargetInfo();
415 const CharUnits suitableAlignmentInBytes =
417
418 // Emit the alloca op with type `u8 *` to match the semantics of
419 // `llvm.alloca`. We later bitcast the type to `void *` to match the
420 // semantics of C/C++
421 // FIXME(cir): It may make sense to allow AllocaOp of type `u8` to return a
422 // pointer of type `void *`. This will require a change to the allocaOp
423 // verifier.
424 CIRGenBuilderTy &builder = cgf.getBuilder();
425 mlir::Value allocaAddr = builder.createAlloca(
426 cgf.getLoc(e->getSourceRange()), builder.getUInt8PtrTy(),
427 builder.getUInt8Ty(), "bi_alloca", suitableAlignmentInBytes, size);
428
429 // Initialize the allocated buffer if required.
430 if (builtinID != Builtin::BI__builtin_alloca_uninitialized) {
431 // Initialize the alloca with the given size and alignment according to
432 // the lang opts. Only the trivial non-initialization is supported for
433 // now.
434
435 switch (cgf.getLangOpts().getTrivialAutoVarInit()) {
437 // Nothing to initialize.
438 break;
441 cgf.cgm.errorNYI("trivial auto var init");
442 break;
443 }
444 }
445
446 // An alloca will always return a pointer to the alloca (stack) address
447 // space. This address space need not be the same as the AST / Language
448 // default (e.g. in C / C++ auto vars are in the generic address space). At
449 // the AST level this is handled within CreateTempAlloca et al., but for the
450 // builtin / dynamic alloca we have to handle it here.
451
455 cgf.cgm.errorNYI(e->getSourceRange(),
456 "Address Space Cast for builtin alloca");
457 }
458
459 // Bitcast the alloca to the expected type.
460 return RValue::get(builder.createBitcast(
461 allocaAddr, builder.getVoidPtrTy(cgf.getCIRAllocaAddressSpace())));
462}
463
465 unsigned builtinID) {
466 std::optional<bool> errnoOverriden;
467 // ErrnoOverriden is true if math-errno is overriden via the
468 // '#pragma float_control(precise, on)'. This pragma disables fast-math,
469 // which implies math-errno.
470 if (e->hasStoredFPFeatures()) {
472 if (op.hasMathErrnoOverride())
473 errnoOverriden = op.getMathErrnoOverride();
474 }
475 // True if 'attribute__((optnone))' is used. This attribute overrides
476 // fast-math which implies math-errno.
477 bool optNone =
478 cgf.curFuncDecl && cgf.curFuncDecl->hasAttr<OptimizeNoneAttr>();
479 bool isOptimizationEnabled = cgf.cgm.getCodeGenOpts().OptimizationLevel != 0;
480 bool generateFPMathIntrinsics =
482 builtinID, cgf.cgm.getTriple(), errnoOverriden,
483 cgf.getLangOpts().MathErrno, optNone, isOptimizationEnabled);
484 return generateFPMathIntrinsics;
485}
486
488 unsigned builtinID) {
490 switch (builtinID) {
491 case Builtin::BIacos:
492 case Builtin::BIacosf:
493 case Builtin::BIacosl:
494 case Builtin::BI__builtin_acos:
495 case Builtin::BI__builtin_acosf:
496 case Builtin::BI__builtin_acosf16:
497 case Builtin::BI__builtin_acosl:
498 case Builtin::BI__builtin_acosf128:
499 case Builtin::BI__builtin_elementwise_acos:
501 case Builtin::BIasin:
502 case Builtin::BIasinf:
503 case Builtin::BIasinl:
504 case Builtin::BI__builtin_asin:
505 case Builtin::BI__builtin_asinf:
506 case Builtin::BI__builtin_asinf16:
507 case Builtin::BI__builtin_asinl:
508 case Builtin::BI__builtin_asinf128:
509 case Builtin::BI__builtin_elementwise_asin:
511 case Builtin::BIatan:
512 case Builtin::BIatanf:
513 case Builtin::BIatanl:
514 case Builtin::BI__builtin_atan:
515 case Builtin::BI__builtin_atanf:
516 case Builtin::BI__builtin_atanf16:
517 case Builtin::BI__builtin_atanl:
518 case Builtin::BI__builtin_atanf128:
519 case Builtin::BI__builtin_elementwise_atan:
521 case Builtin::BIatan2:
522 case Builtin::BIatan2f:
523 case Builtin::BIatan2l:
524 case Builtin::BI__builtin_atan2:
525 case Builtin::BI__builtin_atan2f:
526 case Builtin::BI__builtin_atan2f16:
527 case Builtin::BI__builtin_atan2l:
528 case Builtin::BI__builtin_atan2f128:
529 case Builtin::BI__builtin_elementwise_atan2:
530 return RValue::get(
532 case Builtin::BIceil:
533 case Builtin::BIceilf:
534 case Builtin::BIceill:
535 case Builtin::BI__builtin_ceil:
536 case Builtin::BI__builtin_ceilf:
537 case Builtin::BI__builtin_ceilf16:
538 case Builtin::BI__builtin_ceill:
539 case Builtin::BI__builtin_ceilf128:
541 case Builtin::BI__builtin_elementwise_ceil:
542 return RValue::getIgnored();
543 case Builtin::BIcopysign:
544 case Builtin::BIcopysignf:
545 case Builtin::BIcopysignl:
546 case Builtin::BI__builtin_copysign:
547 case Builtin::BI__builtin_copysignf:
548 case Builtin::BI__builtin_copysignf16:
549 case Builtin::BI__builtin_copysignl:
550 case Builtin::BI__builtin_copysignf128:
552 case Builtin::BIcos:
553 case Builtin::BIcosf:
554 case Builtin::BIcosl:
555 case Builtin::BI__builtin_cos:
556 case Builtin::BI__builtin_cosf:
557 case Builtin::BI__builtin_cosf16:
558 case Builtin::BI__builtin_cosl:
559 case Builtin::BI__builtin_cosf128:
561 case Builtin::BI__builtin_elementwise_cos:
562 case Builtin::BIcosh:
563 case Builtin::BIcoshf:
564 case Builtin::BIcoshl:
565 case Builtin::BI__builtin_cosh:
566 case Builtin::BI__builtin_coshf:
567 case Builtin::BI__builtin_coshf16:
568 case Builtin::BI__builtin_coshl:
569 case Builtin::BI__builtin_coshf128:
570 case Builtin::BI__builtin_elementwise_cosh:
571 return RValue::getIgnored();
572 case Builtin::BIexp:
573 case Builtin::BIexpf:
574 case Builtin::BIexpl:
575 case Builtin::BI__builtin_exp:
576 case Builtin::BI__builtin_expf:
577 case Builtin::BI__builtin_expf16:
578 case Builtin::BI__builtin_expl:
579 case Builtin::BI__builtin_expf128:
581 case Builtin::BI__builtin_elementwise_exp:
582 return RValue::getIgnored();
583 case Builtin::BIexp2:
584 case Builtin::BIexp2f:
585 case Builtin::BIexp2l:
586 case Builtin::BI__builtin_exp2:
587 case Builtin::BI__builtin_exp2f:
588 case Builtin::BI__builtin_exp2f16:
589 case Builtin::BI__builtin_exp2l:
590 case Builtin::BI__builtin_exp2f128:
592 case Builtin::BI__builtin_elementwise_exp2:
593 case Builtin::BI__builtin_exp10:
594 case Builtin::BI__builtin_exp10f:
595 case Builtin::BI__builtin_exp10f16:
596 case Builtin::BI__builtin_exp10l:
597 case Builtin::BI__builtin_exp10f128:
598 case Builtin::BI__builtin_elementwise_exp10:
599 return RValue::getIgnored();
600 case Builtin::BIfabs:
601 case Builtin::BIfabsf:
602 case Builtin::BIfabsl:
603 case Builtin::BI__builtin_fabs:
604 case Builtin::BI__builtin_fabsf:
605 case Builtin::BI__builtin_fabsf16:
606 case Builtin::BI__builtin_fabsl:
607 case Builtin::BI__builtin_fabsf128:
609 case Builtin::BIfloor:
610 case Builtin::BIfloorf:
611 case Builtin::BIfloorl:
612 case Builtin::BI__builtin_floor:
613 case Builtin::BI__builtin_floorf:
614 case Builtin::BI__builtin_floorf16:
615 case Builtin::BI__builtin_floorl:
616 case Builtin::BI__builtin_floorf128:
618 case Builtin::BI__builtin_elementwise_floor:
619 case Builtin::BIfma:
620 case Builtin::BIfmaf:
621 case Builtin::BIfmal:
622 case Builtin::BI__builtin_fma:
623 case Builtin::BI__builtin_fmaf:
624 case Builtin::BI__builtin_fmaf16:
625 case Builtin::BI__builtin_fmal:
626 case Builtin::BI__builtin_fmaf128:
627 case Builtin::BI__builtin_elementwise_fma:
628 return RValue::getIgnored();
629 case Builtin::BIfmax:
630 case Builtin::BIfmaxf:
631 case Builtin::BIfmaxl:
632 case Builtin::BI__builtin_fmax:
633 case Builtin::BI__builtin_fmaxf:
634 case Builtin::BI__builtin_fmaxf16:
635 case Builtin::BI__builtin_fmaxl:
636 case Builtin::BI__builtin_fmaxf128:
637 return RValue::get(
639 case Builtin::BIfmin:
640 case Builtin::BIfminf:
641 case Builtin::BIfminl:
642 case Builtin::BI__builtin_fmin:
643 case Builtin::BI__builtin_fminf:
644 case Builtin::BI__builtin_fminf16:
645 case Builtin::BI__builtin_fminl:
646 case Builtin::BI__builtin_fminf128:
647 return RValue::get(
649 case Builtin::BIfmaximum_num:
650 case Builtin::BIfmaximum_numf:
651 case Builtin::BIfmaximum_numl:
652 case Builtin::BI__builtin_fmaximum_num:
653 case Builtin::BI__builtin_fmaximum_numf:
654 case Builtin::BI__builtin_fmaximum_numf16:
655 case Builtin::BI__builtin_fmaximum_numl:
656 case Builtin::BI__builtin_fmaximum_numf128:
657 case Builtin::BIfminimum_num:
658 case Builtin::BIfminimum_numf:
659 case Builtin::BIfminimum_numl:
660 case Builtin::BI__builtin_fminimum_num:
661 case Builtin::BI__builtin_fminimum_numf:
662 case Builtin::BI__builtin_fminimum_numf16:
663 case Builtin::BI__builtin_fminimum_numl:
664 case Builtin::BI__builtin_fminimum_numf128:
665 return RValue::getIgnored();
666 case Builtin::BIfmod:
667 case Builtin::BIfmodf:
668 case Builtin::BIfmodl:
669 case Builtin::BI__builtin_fmod:
670 case Builtin::BI__builtin_fmodf:
671 case Builtin::BI__builtin_fmodf16:
672 case Builtin::BI__builtin_fmodl:
673 case Builtin::BI__builtin_fmodf128:
674 case Builtin::BI__builtin_elementwise_fmod:
675 return RValue::get(
677 case Builtin::BIlog:
678 case Builtin::BIlogf:
679 case Builtin::BIlogl:
680 case Builtin::BI__builtin_log:
681 case Builtin::BI__builtin_logf:
682 case Builtin::BI__builtin_logf16:
683 case Builtin::BI__builtin_logl:
684 case Builtin::BI__builtin_logf128:
685 case Builtin::BI__builtin_elementwise_log:
687 case Builtin::BIlog10:
688 case Builtin::BIlog10f:
689 case Builtin::BIlog10l:
690 case Builtin::BI__builtin_log10:
691 case Builtin::BI__builtin_log10f:
692 case Builtin::BI__builtin_log10f16:
693 case Builtin::BI__builtin_log10l:
694 case Builtin::BI__builtin_log10f128:
695 case Builtin::BI__builtin_elementwise_log10:
697 case Builtin::BIlog2:
698 case Builtin::BIlog2f:
699 case Builtin::BIlog2l:
700 case Builtin::BI__builtin_log2:
701 case Builtin::BI__builtin_log2f:
702 case Builtin::BI__builtin_log2f16:
703 case Builtin::BI__builtin_log2l:
704 case Builtin::BI__builtin_log2f128:
705 case Builtin::BI__builtin_elementwise_log2:
707 case Builtin::BInearbyint:
708 case Builtin::BInearbyintf:
709 case Builtin::BInearbyintl:
710 case Builtin::BI__builtin_nearbyint:
711 case Builtin::BI__builtin_nearbyintf:
712 case Builtin::BI__builtin_nearbyintl:
713 case Builtin::BI__builtin_nearbyintf128:
714 case Builtin::BI__builtin_elementwise_nearbyint:
716 case Builtin::BIpow:
717 case Builtin::BIpowf:
718 case Builtin::BIpowl:
719 case Builtin::BI__builtin_pow:
720 case Builtin::BI__builtin_powf:
721 case Builtin::BI__builtin_powf16:
722 case Builtin::BI__builtin_powl:
723 case Builtin::BI__builtin_powf128:
724 return RValue::get(
726 case Builtin::BI__builtin_elementwise_pow:
727 return RValue::getIgnored();
728 case Builtin::BIrint:
729 case Builtin::BIrintf:
730 case Builtin::BIrintl:
731 case Builtin::BI__builtin_rint:
732 case Builtin::BI__builtin_rintf:
733 case Builtin::BI__builtin_rintf16:
734 case Builtin::BI__builtin_rintl:
735 case Builtin::BI__builtin_rintf128:
736 case Builtin::BI__builtin_elementwise_rint:
738 case Builtin::BIround:
739 case Builtin::BIroundf:
740 case Builtin::BIroundl:
741 case Builtin::BI__builtin_round:
742 case Builtin::BI__builtin_roundf:
743 case Builtin::BI__builtin_roundf16:
744 case Builtin::BI__builtin_roundl:
745 case Builtin::BI__builtin_roundf128:
746 case Builtin::BI__builtin_elementwise_round:
748 case Builtin::BIroundeven:
749 case Builtin::BIroundevenf:
750 case Builtin::BIroundevenl:
751 case Builtin::BI__builtin_roundeven:
752 case Builtin::BI__builtin_roundevenf:
753 case Builtin::BI__builtin_roundevenf16:
754 case Builtin::BI__builtin_roundevenl:
755 case Builtin::BI__builtin_roundevenf128:
756 case Builtin::BI__builtin_elementwise_roundeven:
758 case Builtin::BIsin:
759 case Builtin::BIsinf:
760 case Builtin::BIsinl:
761 case Builtin::BI__builtin_sin:
762 case Builtin::BI__builtin_sinf:
763 case Builtin::BI__builtin_sinf16:
764 case Builtin::BI__builtin_sinl:
765 case Builtin::BI__builtin_sinf128:
766 case Builtin::BI__builtin_elementwise_sin:
768 case Builtin::BIsinh:
769 case Builtin::BIsinhf:
770 case Builtin::BIsinhl:
771 case Builtin::BI__builtin_sinh:
772 case Builtin::BI__builtin_sinhf:
773 case Builtin::BI__builtin_sinhf16:
774 case Builtin::BI__builtin_sinhl:
775 case Builtin::BI__builtin_sinhf128:
776 case Builtin::BI__builtin_elementwise_sinh:
777 case Builtin::BI__builtin_sincospi:
778 case Builtin::BI__builtin_sincospif:
779 case Builtin::BI__builtin_sincospil:
780 case Builtin::BIsincos:
781 case Builtin::BIsincosf:
782 case Builtin::BIsincosl:
783 case Builtin::BI__builtin_sincos:
784 case Builtin::BI__builtin_sincosf:
785 case Builtin::BI__builtin_sincosf16:
786 case Builtin::BI__builtin_sincosl:
787 case Builtin::BI__builtin_sincosf128:
788 return RValue::getIgnored();
789 case Builtin::BIsqrt:
790 case Builtin::BIsqrtf:
791 case Builtin::BIsqrtl:
792 case Builtin::BI__builtin_sqrt:
793 case Builtin::BI__builtin_sqrtf:
794 case Builtin::BI__builtin_sqrtf16:
795 case Builtin::BI__builtin_sqrtl:
796 case Builtin::BI__builtin_sqrtf128:
797 case Builtin::BI__builtin_elementwise_sqrt:
799 case Builtin::BItan:
800 case Builtin::BItanf:
801 case Builtin::BItanl:
802 case Builtin::BI__builtin_tan:
803 case Builtin::BI__builtin_tanf:
804 case Builtin::BI__builtin_tanf16:
805 case Builtin::BI__builtin_tanl:
806 case Builtin::BI__builtin_tanf128:
807 case Builtin::BI__builtin_elementwise_tan:
809 case Builtin::BItanh:
810 case Builtin::BItanhf:
811 case Builtin::BItanhl:
812 case Builtin::BI__builtin_tanh:
813 case Builtin::BI__builtin_tanhf:
814 case Builtin::BI__builtin_tanhf16:
815 case Builtin::BI__builtin_tanhl:
816 case Builtin::BI__builtin_tanhf128:
817 case Builtin::BI__builtin_elementwise_tanh:
818 return RValue::getIgnored();
819 case Builtin::BItrunc:
820 case Builtin::BItruncf:
821 case Builtin::BItruncl:
822 case Builtin::BI__builtin_trunc:
823 case Builtin::BI__builtin_truncf:
824 case Builtin::BI__builtin_truncf16:
825 case Builtin::BI__builtin_truncl:
826 case Builtin::BI__builtin_truncf128:
827 case Builtin::BI__builtin_elementwise_trunc:
829 case Builtin::BIlround:
830 case Builtin::BIlroundf:
831 case Builtin::BIlroundl:
832 case Builtin::BI__builtin_lround:
833 case Builtin::BI__builtin_lroundf:
834 case Builtin::BI__builtin_lroundl:
835 case Builtin::BI__builtin_lroundf128:
837 case Builtin::BIllround:
838 case Builtin::BIllroundf:
839 case Builtin::BIllroundl:
840 case Builtin::BI__builtin_llround:
841 case Builtin::BI__builtin_llroundf:
842 case Builtin::BI__builtin_llroundl:
843 case Builtin::BI__builtin_llroundf128:
845 case Builtin::BIlrint:
846 case Builtin::BIlrintf:
847 case Builtin::BIlrintl:
848 case Builtin::BI__builtin_lrint:
849 case Builtin::BI__builtin_lrintf:
850 case Builtin::BI__builtin_lrintl:
851 case Builtin::BI__builtin_lrintf128:
853 case Builtin::BIllrint:
854 case Builtin::BIllrintf:
855 case Builtin::BIllrintl:
856 case Builtin::BI__builtin_llrint:
857 case Builtin::BI__builtin_llrintf:
858 case Builtin::BI__builtin_llrintl:
859 case Builtin::BI__builtin_llrintf128:
861 case Builtin::BI__builtin_ldexp:
862 case Builtin::BI__builtin_ldexpf:
863 case Builtin::BI__builtin_ldexpl:
864 case Builtin::BI__builtin_ldexpf16:
865 case Builtin::BI__builtin_ldexpf128:
866 case Builtin::BI__builtin_elementwise_ldexp:
867 default:
868 break;
869 }
870
871 return RValue::getIgnored();
872}
873
874// FIXME: Remove cgf parameter when all descriptor kinds are implemented
875static mlir::Type
878 mlir::MLIRContext *context) {
879 using namespace llvm::Intrinsic;
880
881 IITDescriptor descriptor = infos.front();
882 infos = infos.slice(1);
883
884 switch (descriptor.Kind) {
885 case IITDescriptor::Void:
886 return cir::VoidType::get(context);
887 case IITDescriptor::Half:
888 return cir::FP16Type::get(context);
889 case IITDescriptor::BFloat:
890 return cir::BF16Type::get(context);
891 case IITDescriptor::Float:
892 return cir::SingleType::get(context);
893 case IITDescriptor::Double:
894 return cir::DoubleType::get(context);
895 case IITDescriptor::Quad:
896 return cir::FP128Type::get(context);
897 // If the intrinsic expects unsigned integers, the signedness is corrected in
898 // correctIntegerSignedness()
899 case IITDescriptor::Integer:
900 return cir::IntType::get(context, descriptor.IntegerWidth,
901 /*isSigned=*/true);
902 case IITDescriptor::Vector: {
903 mlir::Type elementType = decodeFixedType(cgf, infos, context);
904 unsigned numElements = descriptor.VectorWidth.getFixedValue();
905 return cir::VectorType::get(elementType, numElements);
906 }
907 case IITDescriptor::Pointer: {
908 mlir::Builder builder(context);
909 auto addrSpace = cir::TargetAddressSpaceAttr::get(
910 context, descriptor.PointerAddressSpace);
911 return cir::PointerType::get(cir::VoidType::get(context), addrSpace);
912 }
913 default:
914 cgf.cgm.errorNYI("Unimplemented intrinsic type descriptor");
915 return cir::VoidType::get(context);
916 }
917}
918
919/// Helper function to correct integer signedness for intrinsic arguments and
920/// return type. IIT always returns signed integers, but the actual intrinsic
921/// may expect unsigned integers based on the AST FunctionDecl parameter types.
922static mlir::Type correctIntegerSignedness(mlir::Type iitType, QualType astType,
923 mlir::MLIRContext *context) {
924 auto intTy = dyn_cast<cir::IntType>(iitType);
925 if (!intTy)
926 return iitType;
927
928 if (astType->isUnsignedIntegerType())
929 return cir::IntType::get(context, intTy.getWidth(), /*isSigned=*/false);
930
931 return iitType;
932}
933
934static mlir::Value getCorrectedPtr(mlir::Value argValue, mlir::Type expectedTy,
935 CIRGenBuilderTy &builder) {
936 auto ptrType = mlir::cast<cir::PointerType>(argValue.getType());
937
938 auto expectedPtrType = mlir::cast<cir::PointerType>(expectedTy);
939 assert(ptrType != expectedPtrType && "types should not match");
940
941 if (ptrType.getAddrSpace() != expectedPtrType.getAddrSpace()) {
943 "address space handling not yet implemented");
944 auto newPtrType = cir::PointerType::get(ptrType.getPointee(),
945 expectedPtrType.getAddrSpace());
946 return builder.createAddrSpaceCast(argValue, newPtrType);
947 }
948
949 return builder.createBitcast(argValue, expectedTy);
950}
951
952static cir::FuncType getIntrinsicType(CIRGenFunction &cgf,
953 mlir::MLIRContext *context,
954 llvm::Intrinsic::ID id) {
955 using namespace llvm::Intrinsic;
956
958 getIntrinsicInfoTableEntries(id, table);
959
960 ArrayRef<IITDescriptor> tableRef = table;
961 mlir::Type resultTy = decodeFixedType(cgf, tableRef, context);
962
964 bool isVarArg = false;
965 while (!tableRef.empty()) {
966 IITDescriptor::IITDescriptorKind kind = tableRef.front().Kind;
967 if (kind == IITDescriptor::VarArg) {
968 isVarArg = true;
969 break; // VarArg is last
970 }
971 argTypes.push_back(decodeFixedType(cgf, tableRef, context));
972 }
973
974 // CIR convention: no explicit void return type
975 if (isa<cir::VoidType>(resultTy))
976 return cir::FuncType::get(context, argTypes, /*optionalReturnType=*/nullptr,
977 isVarArg);
978
979 return cir::FuncType::get(context, argTypes, resultTy, isVarArg);
980}
981
983 const CallExpr *e,
985 mlir::Location loc = getLoc(e->getSourceRange());
986
987 // See if we can constant fold this builtin. If so, don't emit it at all.
988 // TODO: Extend this handling to all builtin calls that we can constant-fold.
989 // Do not constant-fold immediate (target-specific) builtins; their ASTs can
990 // trigger the constant evaluator in cases it cannot safely handle.
991 // Skip EvaluateAsRValue for those.
992 Expr::EvalResult result;
993 if (e->isPRValue() && !getContext().BuiltinInfo.isImmediate(builtinID) &&
994 e->EvaluateAsRValue(result, cgm.getASTContext()) &&
995 !result.hasSideEffects()) {
996 if (result.Val.isInt()) {
997 QualType type = e->getType();
998 if (type->isBooleanType())
999 return RValue::get(
1000 builder.getBool(result.Val.getInt().getBoolValue(), loc));
1001 return RValue::get(builder.getConstInt(loc, result.Val.getInt()));
1002 }
1003 if (result.Val.isFloat()) {
1004 // Note: we are using result type of CallExpr to determine the type of
1005 // the constant. Classic codegen uses the result value to determine the
1006 // type. We feel it should be Ok to use expression type because it is
1007 // hard to imagine a builtin function evaluates to a value that
1008 // over/underflows its own defined type.
1009 mlir::Type type = convertType(e->getType());
1010 return RValue::get(builder.getConstFP(loc, type, result.Val.getFloat()));
1011 }
1012 }
1013
1014 const FunctionDecl *fd = gd.getDecl()->getAsFunction();
1015
1017
1018 // If the builtin has been declared explicitly with an assembler label,
1019 // disable the specialized emitting below. Ideally we should communicate the
1020 // rename in IR, or at least avoid generating the intrinsic calls that are
1021 // likely to get lowered to the renamed library functions.
1022 unsigned builtinIDIfNoAsmLabel = fd->hasAttr<AsmLabelAttr>() ? 0 : builtinID;
1023
1024 bool generateFPMathIntrinsics =
1025 shouldCIREmitFPMathIntrinsic(*this, e, builtinID);
1026
1027 if (generateFPMathIntrinsics) {
1028 // Try to match the builtinID with a floating point math builtin.
1029 RValue rv = tryEmitFPMathIntrinsic(*this, e, builtinIDIfNoAsmLabel);
1030
1031 // Return the result directly if a math intrinsic was generated.
1032 if (!rv.isIgnored()) {
1033 return rv;
1034 }
1035 }
1036
1038
1039 switch (builtinIDIfNoAsmLabel) {
1040 default:
1041 break;
1042
1043 // C stdarg builtins.
1044 case Builtin::BI__builtin_stdarg_start:
1045 case Builtin::BI__builtin_va_start:
1046 case Builtin::BI__va_start: {
1047 mlir::Value vaList = builtinID == Builtin::BI__va_start
1048 ? emitScalarExpr(e->getArg(0))
1049 : emitVAListRef(e->getArg(0)).getPointer();
1050 emitVAStart(vaList);
1051 return {};
1052 }
1053
1054 case Builtin::BI__builtin_va_end:
1056 return {};
1057 case Builtin::BI__builtin_va_copy: {
1058 mlir::Value dstPtr = emitVAListRef(e->getArg(0)).getPointer();
1059 mlir::Value srcPtr = emitVAListRef(e->getArg(1)).getPointer();
1060 cir::VACopyOp::create(builder, dstPtr.getLoc(), dstPtr, srcPtr);
1061 return {};
1062 }
1063
1064 case Builtin::BIabs:
1065 case Builtin::BIlabs:
1066 case Builtin::BIllabs:
1067 case Builtin::BI__builtin_abs:
1068 case Builtin::BI__builtin_labs:
1069 case Builtin::BI__builtin_llabs: {
1070 bool sanitizeOverflow = sanOpts.has(SanitizerKind::SignedIntegerOverflow);
1071 mlir::Value arg = emitScalarExpr(e->getArg(0));
1072 mlir::Value result;
1073 switch (getLangOpts().getSignedOverflowBehavior()) {
1075 result = cir::AbsOp::create(builder, loc, arg.getType(), arg,
1076 /*minIsPoison=*/false);
1077 break;
1079 if (!sanitizeOverflow) {
1080 result = cir::AbsOp::create(builder, loc, arg.getType(), arg,
1081 /*minIsPoison=*/true);
1082 break;
1083 }
1084 [[fallthrough]];
1086 cgm.errorNYI(e->getSourceRange(), "abs with overflow handling");
1087 return RValue::get(nullptr);
1088 }
1089 return RValue::get(result);
1090 }
1091
1092 case Builtin::BI__assume:
1093 case Builtin::BI__builtin_assume: {
1094 if (e->getArg(0)->HasSideEffects(getContext()))
1095 return RValue::get(nullptr);
1096
1097 mlir::Value argValue = emitCheckedArgForAssume(e->getArg(0));
1098 cir::AssumeOp::create(builder, loc, argValue);
1099 return RValue::get(nullptr);
1100 }
1101
1102 case Builtin::BI__builtin_assume_separate_storage: {
1103 mlir::Value value0 = emitScalarExpr(e->getArg(0));
1104 mlir::Value value1 = emitScalarExpr(e->getArg(1));
1105 cir::AssumeSepStorageOp::create(builder, loc, value0, value1);
1106 return RValue::get(nullptr);
1107 }
1108
1109 case Builtin::BI__builtin_assume_aligned: {
1110 const Expr *ptrExpr = e->getArg(0);
1111 mlir::Value ptrValue = emitScalarExpr(ptrExpr);
1112 mlir::Value offsetValue =
1113 (e->getNumArgs() > 2) ? emitScalarExpr(e->getArg(2)) : nullptr;
1114
1115 std::optional<llvm::APSInt> alignment =
1117 assert(alignment.has_value() &&
1118 "the second argument to __builtin_assume_aligned must be an "
1119 "integral constant expression");
1120
1121 mlir::Value result =
1122 emitAlignmentAssumption(ptrValue, ptrExpr, ptrExpr->getExprLoc(),
1123 alignment->getSExtValue(), offsetValue);
1124 return RValue::get(result);
1125 }
1126
1127 case Builtin::BI__builtin_complex: {
1128 mlir::Value real = emitScalarExpr(e->getArg(0));
1129 mlir::Value imag = emitScalarExpr(e->getArg(1));
1130 mlir::Value complex = builder.createComplexCreate(loc, real, imag);
1131 return RValue::getComplex(complex);
1132 }
1133
1134 case Builtin::BI__builtin_creal:
1135 case Builtin::BI__builtin_crealf:
1136 case Builtin::BI__builtin_creall:
1137 case Builtin::BIcreal:
1138 case Builtin::BIcrealf:
1139 case Builtin::BIcreall: {
1140 mlir::Value complex = emitComplexExpr(e->getArg(0));
1141 mlir::Value real = builder.createComplexReal(loc, complex);
1142 return RValue::get(real);
1143 }
1144
1145 case Builtin::BI__builtin_cimag:
1146 case Builtin::BI__builtin_cimagf:
1147 case Builtin::BI__builtin_cimagl:
1148 case Builtin::BIcimag:
1149 case Builtin::BIcimagf:
1150 case Builtin::BIcimagl: {
1151 mlir::Value complex = emitComplexExpr(e->getArg(0));
1152 mlir::Value imag = builder.createComplexImag(loc, complex);
1153 return RValue::get(imag);
1154 }
1155
1156 case Builtin::BI__builtin_conj:
1157 case Builtin::BI__builtin_conjf:
1158 case Builtin::BI__builtin_conjl:
1159 case Builtin::BIconj:
1160 case Builtin::BIconjf:
1161 case Builtin::BIconjl: {
1162 mlir::Value complex = emitComplexExpr(e->getArg(0));
1163 mlir::Value conj = builder.createNot(complex);
1164 return RValue::getComplex(conj);
1165 }
1166
1167 case Builtin::BI__builtin_clrsb:
1168 case Builtin::BI__builtin_clrsbl:
1169 case Builtin::BI__builtin_clrsbll:
1170 return emitBuiltinBitOp<cir::BitClrsbOp>(*this, e);
1171
1172 case Builtin::BI__builtin_ctzs:
1173 case Builtin::BI__builtin_ctz:
1174 case Builtin::BI__builtin_ctzl:
1175 case Builtin::BI__builtin_ctzll:
1177 return emitBuiltinBitOp<cir::BitCtzOp>(*this, e,
1178 getTarget().isCLZForZeroUndef());
1179 case Builtin::BI__builtin_ctzg:
1181
1182 case Builtin::BI__builtin_clzs:
1183 case Builtin::BI__builtin_clz:
1184 case Builtin::BI__builtin_clzl:
1185 case Builtin::BI__builtin_clzll:
1187 return emitBuiltinBitOp<cir::BitClzOp>(*this, e,
1188 getTarget().isCLZForZeroUndef());
1189 case Builtin::BI__builtin_clzg:
1191
1192 case Builtin::BI__builtin_elementwise_ctzg:
1193 cgm.errorNYI(e->getSourceRange(), "__builtin_elementwise_ctzg");
1194 return RValue::get(nullptr);
1195 case Builtin::BI__builtin_elementwise_clzg:
1196 cgm.errorNYI(e->getSourceRange(), "__builtin_elementwise_clzg");
1197 return RValue::get(nullptr);
1198
1199 case Builtin::BI__builtin_ffs:
1200 case Builtin::BI__builtin_ffsl:
1201 case Builtin::BI__builtin_ffsll:
1202 return emitBuiltinBitOp<cir::BitFfsOp>(*this, e);
1203
1204 case Builtin::BI__builtin_parity:
1205 case Builtin::BI__builtin_parityl:
1206 case Builtin::BI__builtin_parityll:
1207 return emitBuiltinBitOp<cir::BitParityOp>(*this, e);
1208
1209 case Builtin::BI__lzcnt16:
1210 case Builtin::BI__lzcnt:
1211 case Builtin::BI__lzcnt64:
1212 return emitBuiltinBitOp<cir::BitClzOp>(*this, e);
1213
1214 case Builtin::BI__popcnt16:
1215 case Builtin::BI__popcnt:
1216 case Builtin::BI__popcnt64:
1217 case Builtin::BI__builtin_popcount:
1218 case Builtin::BI__builtin_popcountl:
1219 case Builtin::BI__builtin_popcountll:
1220 case Builtin::BI__builtin_popcountg:
1221 return emitBuiltinBitOp<cir::BitPopcountOp>(*this, e);
1222
1223 // Always return the argument of __builtin_unpredictable. LLVM does not
1224 // have an intrinsic corresponding to this builtin. Metadata for this
1225 // builtin should be added directly to instructions such as branches or
1226 // switches that use it.
1227 case Builtin::BI__builtin_unpredictable: {
1228 return RValue::get(emitScalarExpr(e->getArg(0)));
1229 }
1230
1231 case Builtin::BI__builtin_expect:
1232 case Builtin::BI__builtin_expect_with_probability: {
1233 mlir::Value argValue = emitScalarExpr(e->getArg(0));
1234 if (cgm.getCodeGenOpts().OptimizationLevel == 0)
1235 return RValue::get(argValue);
1236
1237 mlir::Value expectedValue = emitScalarExpr(e->getArg(1));
1238
1239 mlir::FloatAttr probAttr;
1240 if (builtinIDIfNoAsmLabel == Builtin::BI__builtin_expect_with_probability) {
1241 llvm::APFloat probability(0.0);
1242 const Expr *probArg = e->getArg(2);
1243 [[maybe_unused]] bool evalSucceeded =
1244 probArg->EvaluateAsFloat(probability, cgm.getASTContext());
1245 assert(evalSucceeded &&
1246 "probability should be able to evaluate as float");
1247 bool loseInfo = false; // ignored
1248 probability.convert(llvm::APFloat::IEEEdouble(),
1249 llvm::RoundingMode::Dynamic, &loseInfo);
1250 probAttr = mlir::FloatAttr::get(mlir::Float64Type::get(&getMLIRContext()),
1251 probability);
1252 }
1253
1254 auto result = cir::ExpectOp::create(builder, loc, argValue.getType(),
1255 argValue, expectedValue, probAttr);
1256 return RValue::get(result);
1257 }
1258
1259 case Builtin::BI__builtin_bswap16:
1260 case Builtin::BI__builtin_bswap32:
1261 case Builtin::BI__builtin_bswap64:
1262 case Builtin::BI_byteswap_ushort:
1263 case Builtin::BI_byteswap_ulong:
1264 case Builtin::BI_byteswap_uint64: {
1265 mlir::Value arg = emitScalarExpr(e->getArg(0));
1266 return RValue::get(cir::ByteSwapOp::create(builder, loc, arg));
1267 }
1268
1269 case Builtin::BI__builtin_bitreverse8:
1270 case Builtin::BI__builtin_bitreverse16:
1271 case Builtin::BI__builtin_bitreverse32:
1272 case Builtin::BI__builtin_bitreverse64: {
1273 mlir::Value arg = emitScalarExpr(e->getArg(0));
1274 return RValue::get(cir::BitReverseOp::create(builder, loc, arg));
1275 }
1276
1277 case Builtin::BI__builtin_rotateleft8:
1278 case Builtin::BI__builtin_rotateleft16:
1279 case Builtin::BI__builtin_rotateleft32:
1280 case Builtin::BI__builtin_rotateleft64:
1281 return emitRotate(e, /*isRotateLeft=*/true);
1282
1283 case Builtin::BI__builtin_rotateright8:
1284 case Builtin::BI__builtin_rotateright16:
1285 case Builtin::BI__builtin_rotateright32:
1286 case Builtin::BI__builtin_rotateright64:
1287 return emitRotate(e, /*isRotateLeft=*/false);
1288
1289 case Builtin::BI__builtin_coro_id:
1290 case Builtin::BI__builtin_coro_promise:
1291 case Builtin::BI__builtin_coro_resume:
1292 case Builtin::BI__builtin_coro_noop:
1293 case Builtin::BI__builtin_coro_destroy:
1294 case Builtin::BI__builtin_coro_done:
1295 case Builtin::BI__builtin_coro_alloc:
1296 case Builtin::BI__builtin_coro_begin:
1297 case Builtin::BI__builtin_coro_end:
1298 case Builtin::BI__builtin_coro_suspend:
1299 case Builtin::BI__builtin_coro_align:
1300 cgm.errorNYI(e->getSourceRange(), "BI__builtin_coro_id like NYI");
1301 return getUndefRValue(e->getType());
1302
1303 case Builtin::BI__builtin_coro_frame: {
1304 return emitCoroutineFrame();
1305 }
1306 case Builtin::BI__builtin_coro_free:
1307 return RValue::get(emitCoroFreeBuiltin(e).getResult());
1308 case Builtin::BI__builtin_coro_size: {
1309 GlobalDecl gd{fd};
1310 mlir::Type ty = cgm.getTypes().getFunctionType(
1311 cgm.getTypes().arrangeGlobalDeclaration(gd));
1312 const auto *nd = cast<NamedDecl>(gd.getDecl());
1313 cir::FuncOp fnOp =
1314 cgm.getOrCreateCIRFunction(nd->getName(), ty, gd, /*ForVTable=*/false);
1315 fnOp.setBuiltin(true);
1316 return emitCall(e->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), e,
1317 returnValue);
1318 }
1319
1320 case Builtin::BI__builtin_constant_p: {
1321 mlir::Type resultType = convertType(e->getType());
1322
1323 const Expr *arg = e->getArg(0);
1324 QualType argType = arg->getType();
1325 // FIXME: The allowance for Obj-C pointers and block pointers is historical
1326 // and likely a mistake.
1327 if (!argType->isIntegralOrEnumerationType() && !argType->isFloatingType() &&
1328 !argType->isObjCObjectPointerType() && !argType->isBlockPointerType()) {
1329 // Per the GCC documentation, only numeric constants are recognized after
1330 // inlining.
1331 return RValue::get(
1332 builder.getConstInt(getLoc(e->getSourceRange()),
1333 mlir::cast<cir::IntType>(resultType), 0));
1334 }
1335
1336 if (arg->HasSideEffects(getContext())) {
1337 // The argument is unevaluated, so be conservative if it might have
1338 // side-effects.
1339 return RValue::get(
1340 builder.getConstInt(getLoc(e->getSourceRange()),
1341 mlir::cast<cir::IntType>(resultType), 0));
1342 }
1343
1344 mlir::Value argValue = emitScalarExpr(arg);
1345 if (argType->isObjCObjectPointerType()) {
1346 cgm.errorNYI(e->getSourceRange(),
1347 "__builtin_constant_p: Obj-C object pointer");
1348 return {};
1349 }
1350 argValue = builder.createBitcast(argValue, convertType(argType));
1351
1352 mlir::Value result = cir::IsConstantOp::create(
1353 builder, getLoc(e->getSourceRange()), argValue);
1354 // IsConstantOp returns a bool, but __builtin_constant_p returns an int.
1355 result = builder.createBoolToInt(result, resultType);
1356 return RValue::get(result);
1357 }
1358 case Builtin::BI__builtin_dynamic_object_size:
1359 case Builtin::BI__builtin_object_size: {
1360 unsigned type =
1361 e->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
1362 auto resType = mlir::cast<cir::IntType>(convertType(e->getType()));
1363
1364 // We pass this builtin onto the optimizer so that it can figure out the
1365 // object size in more complex cases.
1366 bool isDynamic = builtinID == Builtin::BI__builtin_dynamic_object_size;
1367 return RValue::get(emitBuiltinObjectSize(e->getArg(0), type, resType,
1368 /*EmittedE=*/nullptr, isDynamic));
1369 }
1370
1371 case Builtin::BI__builtin_prefetch: {
1372 auto evaluateOperandAsInt = [&](const Expr *arg) {
1373 Expr::EvalResult res;
1374 [[maybe_unused]] bool evalSucceed =
1375 arg->EvaluateAsInt(res, cgm.getASTContext());
1376 assert(evalSucceed && "expression should be able to evaluate as int");
1377 return res.Val.getInt().getZExtValue();
1378 };
1379
1380 bool isWrite = false;
1381 if (e->getNumArgs() > 1)
1382 isWrite = evaluateOperandAsInt(e->getArg(1));
1383
1384 int locality = 3;
1385 if (e->getNumArgs() > 2)
1386 locality = evaluateOperandAsInt(e->getArg(2));
1387
1388 mlir::Value address = emitScalarExpr(e->getArg(0));
1389 cir::PrefetchOp::create(builder, loc, address, locality, isWrite);
1390 return RValue::get(nullptr);
1391 }
1392 case Builtin::BI__builtin_readcyclecounter:
1393 case Builtin::BI__builtin_readsteadycounter:
1394 return errorBuiltinNYI(*this, e, builtinID);
1395 case Builtin::BI__builtin___clear_cache: {
1396 mlir::Value begin =
1397 builder.createPtrBitcast(emitScalarExpr(e->getArg(0)), cgm.voidTy);
1398 mlir::Value end =
1399 builder.createPtrBitcast(emitScalarExpr(e->getArg(1)), cgm.voidTy);
1400 cir::ClearCacheOp::create(builder, getLoc(e->getSourceRange()), begin, end);
1401 return RValue::get(nullptr);
1402 }
1403 case Builtin::BI__builtin_trap:
1404 emitTrap(loc, /*createNewBlock=*/true);
1405 return RValue::getIgnored();
1406 case Builtin::BI__builtin_verbose_trap:
1408 emitTrap(loc, /*createNewBlock=*/true);
1409 return RValue::getIgnored();
1410 case Builtin::BI__debugbreak:
1411 return errorBuiltinNYI(*this, e, builtinID);
1412 case Builtin::BI__builtin_unreachable:
1413 emitUnreachable(e->getExprLoc(), /*createNewBlock=*/true);
1414 return RValue::getIgnored();
1415 case Builtin::BI__builtin_powi:
1416 case Builtin::BI__builtin_powif:
1417 case Builtin::BI__builtin_powil: {
1418 mlir::Value src0 = emitScalarExpr(e->getArg(0));
1419 mlir::Value src1 = emitScalarExpr(e->getArg(1));
1420 return RValue::get(builder.emitIntrinsicCallOp(
1421 getLoc(e->getExprLoc()), "powi", src0.getType(),
1422 mlir::ValueRange{src0, src1}));
1423 }
1424 case Builtin::BI__builtin_frexpl:
1425 case Builtin::BI__builtin_frexp:
1426 case Builtin::BI__builtin_frexpf:
1427 case Builtin::BI__builtin_frexpf128:
1428 case Builtin::BI__builtin_frexpf16: {
1429 mlir::Value val = emitScalarExpr(e->getArg(0));
1430 mlir::Value ptr = emitScalarExpr(e->getArg(1));
1431 mlir::Type fpTy = val.getType();
1432 QualType intQualTy = e->getArg(1)->getType()->getPointeeType();
1433 mlir::Type intTy = convertType(intQualTy);
1434 mlir::Location callLoc = getLoc(e->getExprLoc());
1435 auto frexpOp = cir::FrexpOp::create(builder, callLoc, fpTy, intTy, val);
1436 LValue lv = makeNaturalAlignAddrLValue(ptr, intQualTy);
1437 emitStoreOfScalar(frexpOp.getExp(), lv, /*isInit=*/false);
1438 return RValue::get(frexpOp.getResult());
1439 }
1440 case Builtin::BImodf:
1441 case Builtin::BImodff:
1442 case Builtin::BImodfl:
1443 case Builtin::BI__builtin_modf:
1444 case Builtin::BI__builtin_modff:
1445 case Builtin::BI__builtin_modfl: {
1446 mlir::Value val = emitScalarExpr(e->getArg(0));
1447 mlir::Value ptr = emitScalarExpr(e->getArg(1));
1448 mlir::Type fpTy = val.getType();
1449 mlir::Location callLoc = getLoc(e->getExprLoc());
1450 auto modfOp = cir::ModfOp::create(builder, callLoc, fpTy, fpTy, val);
1451 QualType destPtrTy = e->getArg(1)->getType()->getPointeeType();
1452 LValue lv = makeNaturalAlignAddrLValue(ptr, destPtrTy);
1453 emitStoreOfScalar(modfOp.getIntegral(), lv, /*isInit=*/false);
1454 return RValue::get(modfOp.getFractional());
1455 }
1456 case Builtin::BI__builtin_isgreater:
1457 case Builtin::BI__builtin_isgreaterequal:
1458 case Builtin::BI__builtin_isless:
1459 case Builtin::BI__builtin_islessequal:
1460 case Builtin::BI__builtin_islessgreater:
1461 case Builtin::BI__builtin_isunordered: {
1462 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1463 mlir::Value lhs = emitScalarExpr(e->getArg(0));
1464 mlir::Value rhs = emitScalarExpr(e->getArg(1));
1465 mlir::Location loc = getLoc(e->getBeginLoc());
1466 mlir::Type intTy = convertType(e->getType());
1467
1468 mlir::Value cmpResult;
1469 switch (builtinID) {
1470 case Builtin::BI__builtin_isgreater:
1471 cmpResult = builder.createCompare(loc, cir::CmpOpKind::gt, lhs, rhs);
1472 break;
1473 case Builtin::BI__builtin_isgreaterequal:
1474 cmpResult = builder.createCompare(loc, cir::CmpOpKind::ge, lhs, rhs);
1475 break;
1476 case Builtin::BI__builtin_isless:
1477 cmpResult = builder.createCompare(loc, cir::CmpOpKind::lt, lhs, rhs);
1478 break;
1479 case Builtin::BI__builtin_islessequal:
1480 cmpResult = builder.createCompare(loc, cir::CmpOpKind::le, lhs, rhs);
1481 break;
1482 case Builtin::BI__builtin_islessgreater:
1483 cmpResult = builder.createCompare(loc, cir::CmpOpKind::one, lhs, rhs);
1484 break;
1485 case Builtin::BI__builtin_isunordered:
1486 cmpResult = builder.createCompare(loc, cir::CmpOpKind::uno, lhs, rhs);
1487 break;
1488 default:
1489 llvm_unreachable("Unknown ordered comparison");
1490 }
1491 return RValue::get(builder.createBoolToInt(cmpResult, intTy));
1492 }
1493 // From https://clang.llvm.org/docs/LanguageExtensions.html#builtin-isfpclass
1494 //
1495 // The `__builtin_isfpclass()` builtin is a generalization of functions
1496 // isnan, isinf, isfinite and some others defined by the C standard. It tests
1497 // if the floating-point value, specified by the first argument, falls into
1498 // any of data classes, specified by the second argument.
1499 case Builtin::BI__builtin_isnan: {
1500 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1501 mlir::Value v = emitScalarExpr(e->getArg(0));
1503 mlir::Location loc = getLoc(e->getBeginLoc());
1504 return RValue::get(builder.createBoolToInt(
1505 builder.createIsFPClass(loc, v, cir::FPClassTest::Nan),
1506 convertType(e->getType())));
1507 }
1508
1509 case Builtin::BI__builtin_issignaling: {
1510 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1511 mlir::Value v = emitScalarExpr(e->getArg(0));
1512 mlir::Location loc = getLoc(e->getBeginLoc());
1513 return RValue::get(builder.createBoolToInt(
1514 builder.createIsFPClass(loc, v, cir::FPClassTest::SignalingNaN),
1515 convertType(e->getType())));
1516 }
1517
1518 case Builtin::BI__builtin_isinf: {
1519 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1520 mlir::Value v = emitScalarExpr(e->getArg(0));
1522 mlir::Location loc = getLoc(e->getBeginLoc());
1523 return RValue::get(builder.createBoolToInt(
1524 builder.createIsFPClass(loc, v, cir::FPClassTest::Infinity),
1525 convertType(e->getType())));
1526 }
1527 case Builtin::BIfinite:
1528 case Builtin::BI__finite:
1529 case Builtin::BIfinitef:
1530 case Builtin::BI__finitef:
1531 case Builtin::BIfinitel:
1532 case Builtin::BI__finitel:
1533 case Builtin::BI__builtin_isfinite: {
1534 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1535 mlir::Value v = emitScalarExpr(e->getArg(0));
1537 mlir::Location loc = getLoc(e->getBeginLoc());
1538 return RValue::get(builder.createBoolToInt(
1539 builder.createIsFPClass(loc, v, cir::FPClassTest::Finite),
1540 convertType(e->getType())));
1541 }
1542
1543 case Builtin::BI__builtin_isnormal: {
1544 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1545 mlir::Value v = emitScalarExpr(e->getArg(0));
1546 mlir::Location loc = getLoc(e->getBeginLoc());
1547 return RValue::get(builder.createBoolToInt(
1548 builder.createIsFPClass(loc, v, cir::FPClassTest::Normal),
1549 convertType(e->getType())));
1550 }
1551
1552 case Builtin::BI__builtin_issubnormal: {
1553 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1554 mlir::Value v = emitScalarExpr(e->getArg(0));
1555 mlir::Location loc = getLoc(e->getBeginLoc());
1556 return RValue::get(builder.createBoolToInt(
1557 builder.createIsFPClass(loc, v, cir::FPClassTest::Subnormal),
1558 convertType(e->getType())));
1559 }
1560
1561 case Builtin::BI__builtin_iszero: {
1562 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1563 mlir::Value v = emitScalarExpr(e->getArg(0));
1564 mlir::Location loc = getLoc(e->getBeginLoc());
1565 return RValue::get(builder.createBoolToInt(
1566 builder.createIsFPClass(loc, v, cir::FPClassTest::Zero),
1567 convertType(e->getType())));
1568 }
1569 case Builtin::BI__builtin_isfpclass: {
1570 Expr::EvalResult result;
1571 if (!e->getArg(1)->EvaluateAsInt(result, cgm.getASTContext()))
1572 break;
1573
1574 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1575 mlir::Value v = emitScalarExpr(e->getArg(0));
1576 uint64_t test = result.Val.getInt().getLimitedValue();
1577 mlir::Location loc = getLoc(e->getBeginLoc());
1578 //
1579 return RValue::get(builder.createBoolToInt(
1580 builder.createIsFPClass(loc, v, cir::FPClassTest(test)),
1581 convertType(e->getType())));
1582 }
1583 case Builtin::BI__builtin_nondeterministic_value:
1584 return errorBuiltinNYI(*this, e, builtinID);
1585 case Builtin::BI__builtin_elementwise_abs: {
1586 mlir::Type cirTy = convertType(e->getArg(0)->getType());
1587 bool isIntTy = cir::isIntOrVectorOfIntType(cirTy);
1588 if (!isIntTy)
1589 return emitUnaryFPBuiltin<cir::FAbsOp>(*this, *e);
1590 mlir::Value arg = emitScalarExpr(e->getArg(0));
1591 mlir::Value result = cir::AbsOp::create(builder, getLoc(e->getExprLoc()),
1592 arg.getType(), arg, false);
1593 return RValue::get(result);
1594 }
1595 case Builtin::BI__builtin_elementwise_acos:
1597 case Builtin::BI__builtin_elementwise_asin:
1599 case Builtin::BI__builtin_elementwise_atan:
1601 case Builtin::BI__builtin_elementwise_atan2:
1602 return RValue::get(
1604 case Builtin::BI__builtin_elementwise_exp:
1606 case Builtin::BI__builtin_elementwise_exp2:
1608 case Builtin::BI__builtin_elementwise_log:
1610 case Builtin::BI__builtin_elementwise_log2:
1612 case Builtin::BI__builtin_elementwise_log10:
1614 case Builtin::BI__builtin_elementwise_cos:
1616 case Builtin::BI__builtin_elementwise_floor:
1618 case Builtin::BI__builtin_elementwise_round:
1620 case Builtin::BI__builtin_elementwise_rint:
1622 case Builtin::BI__builtin_elementwise_nearbyint:
1624 case Builtin::BI__builtin_elementwise_sin:
1626 case Builtin::BI__builtin_elementwise_sqrt:
1628 case Builtin::BI__builtin_elementwise_tan:
1630 case Builtin::BI__builtin_elementwise_trunc:
1632 case Builtin::BI__builtin_elementwise_fmod:
1633 return RValue::get(
1635 case Builtin::BI__builtin_elementwise_ceil:
1636 case Builtin::BI__builtin_elementwise_exp10:
1637 case Builtin::BI__builtin_elementwise_ldexp:
1638 case Builtin::BI__builtin_elementwise_pow:
1639 case Builtin::BI__builtin_elementwise_bitreverse:
1640 case Builtin::BI__builtin_elementwise_cosh:
1641 case Builtin::BI__builtin_elementwise_popcount:
1642 case Builtin::BI__builtin_elementwise_roundeven:
1643 case Builtin::BI__builtin_elementwise_sinh:
1644 case Builtin::BI__builtin_elementwise_tanh:
1645 case Builtin::BI__builtin_elementwise_canonicalize:
1646 case Builtin::BI__builtin_elementwise_copysign:
1647 case Builtin::BI__builtin_elementwise_fma:
1648 return errorBuiltinNYI(*this, e, builtinID);
1649 case Builtin::BI__builtin_elementwise_fshl: {
1650 mlir::Location loc = getLoc(e->getExprLoc());
1651 mlir::Value a = emitScalarExpr(e->getArg(0));
1652 mlir::Value b = emitScalarExpr(e->getArg(1));
1653 mlir::Value c = emitScalarExpr(e->getArg(2));
1654 return RValue::get(builder.emitIntrinsicCallOp(loc, "fshl", a.getType(),
1655 mlir::ValueRange{a, b, c}));
1656 }
1657 case Builtin::BI__builtin_elementwise_fshr: {
1658 mlir::Location loc = getLoc(e->getExprLoc());
1659 mlir::Value a = emitScalarExpr(e->getArg(0));
1660 mlir::Value b = emitScalarExpr(e->getArg(1));
1661 mlir::Value c = emitScalarExpr(e->getArg(2));
1662 return RValue::get(builder.emitIntrinsicCallOp(loc, "fshr", a.getType(),
1663 mlir::ValueRange{a, b, c}));
1664 }
1665 case Builtin::BI__builtin_elementwise_add_sat:
1666 case Builtin::BI__builtin_elementwise_sub_sat:
1667 case Builtin::BI__builtin_elementwise_max:
1668 case Builtin::BI__builtin_elementwise_min:
1669 case Builtin::BI__builtin_elementwise_maxnum:
1670 case Builtin::BI__builtin_elementwise_minnum:
1671 case Builtin::BI__builtin_elementwise_maximum:
1672 case Builtin::BI__builtin_elementwise_minimum:
1673 case Builtin::BI__builtin_elementwise_maximumnum:
1674 case Builtin::BI__builtin_elementwise_minimumnum:
1675 case Builtin::BI__builtin_reduce_max:
1676 case Builtin::BI__builtin_reduce_min:
1677 case Builtin::BI__builtin_reduce_add:
1678 case Builtin::BI__builtin_reduce_mul:
1679 case Builtin::BI__builtin_reduce_xor:
1680 case Builtin::BI__builtin_reduce_or:
1681 case Builtin::BI__builtin_reduce_and:
1682 case Builtin::BI__builtin_reduce_assoc_fadd:
1683 case Builtin::BI__builtin_reduce_in_order_fadd:
1684 case Builtin::BI__builtin_reduce_maximum:
1685 case Builtin::BI__builtin_reduce_minimum:
1686 case Builtin::BI__builtin_matrix_transpose:
1687 case Builtin::BI__builtin_matrix_column_major_load:
1688 case Builtin::BI__builtin_matrix_column_major_store:
1689 case Builtin::BI__builtin_masked_load:
1690 case Builtin::BI__builtin_masked_expand_load:
1691 case Builtin::BI__builtin_masked_gather:
1692 case Builtin::BI__builtin_masked_store:
1693 case Builtin::BI__builtin_masked_compress_store:
1694 case Builtin::BI__builtin_masked_scatter:
1695 return errorBuiltinNYI(*this, e, builtinID);
1696 case Builtin::BI__builtin_isinf_sign: {
1697 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1698 mlir::Location loc = getLoc(e->getBeginLoc());
1699 mlir::Value arg = emitScalarExpr(e->getArg(0));
1700 mlir::Value isInf =
1701 builder.createIsFPClass(loc, arg, cir::FPClassTest::Infinity);
1702 mlir::Value isNeg = emitSignBit(loc, *this, arg);
1703 mlir::Type intTy = convertType(e->getType());
1704 cir::ConstantOp zero = builder.getNullValue(intTy, loc);
1705 cir::ConstantOp one = builder.getConstant(loc, cir::IntAttr::get(intTy, 1));
1706 cir::ConstantOp negativeOne =
1707 builder.getConstant(loc, cir::IntAttr::get(intTy, -1));
1708 mlir::Value signResult = builder.createSelect(loc, isNeg, negativeOne, one);
1709 mlir::Value result = builder.createSelect(loc, isInf, signResult, zero);
1710 return RValue::get(result);
1711 }
1712 case Builtin::BI__builtin_flt_rounds: {
1713 mlir::Location loc = getLoc(e->getExprLoc());
1714 mlir::Type resultType = convertType(e->getType());
1715 mlir::Value result =
1716 builder.emitIntrinsicCallOp(loc, "get.rounding", resultType);
1717 if (result.getType() != resultType)
1718 result =
1719 builder.createCast(loc, cir::CastKind::integral, result, resultType);
1720 return RValue::get(result);
1721 }
1722 case Builtin::BI__builtin_set_flt_rounds: {
1723 mlir::Location loc = getLoc(e->getExprLoc());
1724 mlir::Value v = emitScalarExpr(e->getArg(0));
1725 builder.emitIntrinsicCallOp(loc, "set.rounding", builder.getVoidTy(),
1726 mlir::ValueRange{v});
1727 return RValue::get(nullptr);
1728 }
1729 case Builtin::BI__builtin_fpclassify: {
1730 CIRGenFunction::CIRGenFPOptionsRAII fPOptsRAII(*this, e);
1731 mlir::Location loc = getLoc(e->getBeginLoc());
1732 mlir::Value value = emitScalarExpr(e->getArg(5));
1733 mlir::Type resultTy = convertType(e->getType());
1734 // if isZero then
1735 // result = FP_ZERO
1736 // elseif isNan then
1737 // result = FP_NAN
1738 // elseif isInfinity then
1739 // result = FP_INFINITE
1740 // elseif isNormal then
1741 // result = FP_NORMAL
1742 // else
1743 // result = FP_SUBNORMAL
1744 auto isZero =
1745 cir::IsFPClassOp::create(builder, loc, value, cir::FPClassTest::Zero);
1746 mlir::Value result =
1747 cir::TernaryOp::create(
1748 builder, loc, isZero,
1749 /*thenBuilder=*/
1750 [&](mlir::OpBuilder &opBuilder, mlir::Location location) {
1751 mlir::Value zeroLiteral = emitScalarExpr(e->getArg(4));
1752 cir::YieldOp::create(opBuilder, location, zeroLiteral);
1753 },
1754 /*elseBuilder=*/
1755 [&](mlir::OpBuilder &opBuilder, mlir::Location location) {
1756 auto isNan = cir::IsFPClassOp::create(opBuilder, location, value,
1757 cir::FPClassTest::Nan);
1758 mlir::Value nanResult =
1759 cir::TernaryOp::create(
1760 opBuilder, location, isNan,
1761 /*thenBuilder=*/
1762 [&](mlir::OpBuilder &opBuilder, mlir::Location location) {
1763 mlir::Value nanLiteral = emitScalarExpr(e->getArg(0));
1764 cir::YieldOp::create(opBuilder, location, nanLiteral);
1765 },
1766 /*elseBuilder=*/
1767 [&](mlir::OpBuilder &opBuilder, mlir::Location location) {
1768 auto isInfinity = cir::IsFPClassOp::create(
1769 opBuilder, location, value,
1770 cir::FPClassTest::Infinity);
1771 mlir::Value infResult =
1772 cir::TernaryOp::create(
1773 opBuilder, location, isInfinity,
1774 /*thenBuilder=*/
1775 [&](mlir::OpBuilder &opBuilder,
1776 mlir::Location location) {
1777 mlir::Value infinityLiteral =
1778 emitScalarExpr(e->getArg(1));
1779 cir::YieldOp::create(opBuilder, location,
1780 infinityLiteral);
1781 },
1782 /*elseBuilder=*/
1783 [&](mlir::OpBuilder &opBuilder,
1784 mlir::Location location) {
1785 auto isNormal = cir::IsFPClassOp::create(
1786 opBuilder, location, value,
1787 cir::FPClassTest::Normal);
1788 mlir::Value fpNormal =
1789 emitScalarExpr(e->getArg(2));
1790 mlir::Value fpSubnormal =
1791 emitScalarExpr(e->getArg(3));
1792 mlir::Value returnValue =
1793 cir::SelectOp::create(
1794 opBuilder, location, resultTy,
1795 isNormal, fpNormal, fpSubnormal);
1796 cir::YieldOp::create(opBuilder, location,
1797 returnValue);
1798 })
1799 .getResult();
1800 cir::YieldOp::create(opBuilder, location, infResult);
1801 })
1802 .getResult();
1803 cir::YieldOp::create(opBuilder, location, nanResult);
1804 })
1805 .getResult();
1806 return RValue::get(result);
1807 }
1808 case Builtin::BIalloca:
1809 case Builtin::BI_alloca:
1810 case Builtin::BI__builtin_alloca_uninitialized:
1811 case Builtin::BI__builtin_alloca:
1812 return emitBuiltinAlloca(*this, e, builtinID);
1813 case Builtin::BI__builtin_alloca_with_align_uninitialized:
1814 case Builtin::BI__builtin_alloca_with_align:
1815 case Builtin::BI__builtin_infer_alloc_token:
1816 return errorBuiltinNYI(*this, e, builtinID);
1817 case Builtin::BIbzero:
1818 case Builtin::BI__builtin_bzero: {
1819 mlir::Location loc = getLoc(e->getSourceRange());
1820 Address destPtr = emitPointerWithAlignment(e->getArg(0));
1821 Address destPtrCast = destPtr.withElementType(builder, cgm.voidTy);
1822 mlir::Value size = emitScalarExpr(e->getArg(1));
1823 mlir::Value zero = builder.getNullValue(builder.getUInt8Ty(), loc);
1825 builder.createMemSet(loc, destPtrCast, zero, size);
1827 return RValue::getIgnored();
1828 }
1829 case Builtin::BIbcopy:
1830 case Builtin::BI__builtin_bcopy: {
1833 mlir::Value sizeVal = emitScalarExpr(e->getArg(2));
1835 e->getArg(0)->getExprLoc(), fd, 0);
1837 e->getArg(1)->getExprLoc(), fd, 0);
1838 builder.createMemMove(getLoc(e->getSourceRange()), dest.getPointer(),
1839 src.getPointer(), sizeVal);
1840 return RValue::get(nullptr);
1841 }
1842 case Builtin::BI__builtin_char_memchr:
1843 case Builtin::BI__builtin_memchr: {
1844 Address srcPtr = emitPointerWithAlignment(e->getArg(0));
1845 mlir::Value src =
1846 builder.createBitcast(srcPtr.getPointer(), builder.getVoidPtrTy());
1847 mlir::Value pattern = emitScalarExpr(e->getArg(1));
1848 mlir::Value len = emitScalarExpr(e->getArg(2));
1849 mlir::Value res = cir::MemChrOp::create(builder, getLoc(e->getExprLoc()),
1850 src, pattern, len);
1851 return RValue::get(res);
1852 }
1853 case Builtin::BImemcpy:
1854 case Builtin::BI__builtin_memcpy:
1855 case Builtin::BImempcpy:
1856 case Builtin::BI__builtin_mempcpy:
1857 case Builtin::BI__builtin_memcpy_inline:
1858 case Builtin::BI__builtin___memcpy_chk:
1859 case Builtin::BI__builtin_objc_memmove_collectable:
1860 case Builtin::BI__builtin___memmove_chk:
1861 case Builtin::BI__builtin_trivially_relocate:
1862 case Builtin::BImemmove:
1863 case Builtin::BI__builtin_memmove:
1864 case Builtin::BImemset:
1865 case Builtin::BI__builtin_memset:
1866 case Builtin::BI__builtin_memset_inline:
1867 case Builtin::BI__builtin___memset_chk:
1868 case Builtin::BI__builtin_wmemchr:
1869 case Builtin::BI__builtin_wmemcmp:
1870 break; // Handled as library calls below.
1871 case Builtin::BI__builtin_dwarf_cfa:
1872 return errorBuiltinNYI(*this, e, builtinID);
1873 case Builtin::BI__builtin_return_address: {
1874 llvm::APSInt level = e->getArg(0)->EvaluateKnownConstInt(getContext());
1875 return RValue::get(cir::ReturnAddrOp::create(
1876 builder, getLoc(e->getExprLoc()),
1877 builder.getConstAPInt(loc, builder.getUInt32Ty(), level)));
1878 }
1879 case Builtin::BI_ReturnAddress: {
1880 return RValue::get(cir::ReturnAddrOp::create(
1881 builder, getLoc(e->getExprLoc()),
1882 builder.getConstInt(loc, builder.getUInt32Ty(), 0)));
1883 }
1884 case Builtin::BI__builtin_frame_address: {
1885 llvm::APSInt level = e->getArg(0)->EvaluateKnownConstInt(getContext());
1886 mlir::Location loc = getLoc(e->getExprLoc());
1887 mlir::Value addr = cir::FrameAddrOp::create(
1888 builder, loc, allocaInt8PtrTy,
1889 builder.getConstAPInt(loc, builder.getUInt32Ty(), level));
1890 return RValue::get(
1891 builder.createCast(loc, cir::CastKind::bitcast, addr, voidPtrTy));
1892 }
1893 case Builtin::BI__builtin_extract_return_addr:
1894 case Builtin::BI__builtin_frob_return_addr:
1895 case Builtin::BI__builtin_dwarf_sp_column:
1896 case Builtin::BI__builtin_init_dwarf_reg_size_table:
1897 case Builtin::BI__builtin_eh_return:
1898 case Builtin::BI__builtin_unwind_init:
1899 case Builtin::BI__builtin_extend_pointer:
1900 return errorBuiltinNYI(*this, e, builtinID);
1901 case Builtin::BI__builtin_setjmp: {
1903 mlir::Location loc = getLoc(e->getExprLoc());
1904
1905 cir::PointerType voidPtrTy = builder.getVoidPtrTy();
1906 cir::PointerType ppTy = builder.getPointerTo(voidPtrTy);
1907 Address castBuf = buf.withElementType(builder, voidPtrTy);
1908
1910 if (getTarget().getTriple().isSystemZ()) {
1911 cgm.errorNYI(e->getExprLoc(), "setjmp on SystemZ");
1912 return {};
1913 }
1914
1915 mlir::Value frameAddress =
1916 cir::FrameAddrOp::create(builder, loc, voidPtrTy,
1917 mlir::ValueRange{builder.getUInt32(0, loc)})
1918 .getResult();
1919
1920 builder.createStore(loc, frameAddress, castBuf);
1921
1922 mlir::Value stacksave =
1923 cir::StackSaveOp::create(builder, loc, voidPtrTy).getResult();
1924 cir::PtrStrideOp stackSaveSlot = cir::PtrStrideOp::create(
1925 builder, loc, ppTy, castBuf.getPointer(), builder.getSInt32(2, loc));
1926 llvm::TypeSize voidPtrTySize =
1927 cgm.getDataLayout().getTypeAllocSize(voidPtrTy);
1928 CharUnits slotAlign = castBuf.getAlignment().alignmentAtOffset(
1929 CharUnits().fromQuantity(2 * voidPtrTySize));
1930 Address slotAddr = Address(stackSaveSlot, voidPtrTy, slotAlign);
1931 builder.createStore(loc, stacksave, slotAddr);
1932 auto op = cir::EhSetjmpOp::create(builder, loc, castBuf.getPointer());
1933 return RValue::get(op);
1934 }
1935 case Builtin::BI__builtin_longjmp: {
1936 mlir::Value buf = emitScalarExpr(e->getArg(0));
1937 mlir::Location loc = getLoc(e->getExprLoc());
1938
1939 cir::EhLongjmpOp::create(builder, loc, buf);
1940 cir::UnreachableOp::create(builder, loc);
1941 return RValue::get(nullptr);
1942 }
1943 case Builtin::BI__builtin_launder:
1944 case Builtin::BI__sync_fetch_and_add:
1945 case Builtin::BI__sync_fetch_and_sub:
1946 case Builtin::BI__sync_fetch_and_or:
1947 case Builtin::BI__sync_fetch_and_and:
1948 case Builtin::BI__sync_fetch_and_xor:
1949 case Builtin::BI__sync_fetch_and_nand:
1950 case Builtin::BI__sync_add_and_fetch:
1951 case Builtin::BI__sync_sub_and_fetch:
1952 case Builtin::BI__sync_and_and_fetch:
1953 case Builtin::BI__sync_or_and_fetch:
1954 case Builtin::BI__sync_xor_and_fetch:
1955 case Builtin::BI__sync_nand_and_fetch:
1956 case Builtin::BI__sync_val_compare_and_swap:
1957 case Builtin::BI__sync_bool_compare_and_swap:
1958 case Builtin::BI__sync_lock_test_and_set:
1959 case Builtin::BI__sync_lock_release:
1960 case Builtin::BI__sync_swap:
1961 return errorBuiltinNYI(*this, e, builtinID);
1962 case Builtin::BI__sync_fetch_and_add_1:
1963 case Builtin::BI__sync_fetch_and_add_2:
1964 case Builtin::BI__sync_fetch_and_add_4:
1965 case Builtin::BI__sync_fetch_and_add_8:
1966 case Builtin::BI__sync_fetch_and_add_16:
1967 return emitBinaryAtomic(*this, cir::AtomicFetchKind::Add, e);
1968 case Builtin::BI__sync_fetch_and_sub_1:
1969 case Builtin::BI__sync_fetch_and_sub_2:
1970 case Builtin::BI__sync_fetch_and_sub_4:
1971 case Builtin::BI__sync_fetch_and_sub_8:
1972 case Builtin::BI__sync_fetch_and_sub_16:
1973 return emitBinaryAtomic(*this, cir::AtomicFetchKind::Sub, e);
1974 case Builtin::BI__sync_fetch_and_or_1:
1975 case Builtin::BI__sync_fetch_and_or_2:
1976 case Builtin::BI__sync_fetch_and_or_4:
1977 case Builtin::BI__sync_fetch_and_or_8:
1978 case Builtin::BI__sync_fetch_and_or_16:
1979 return emitBinaryAtomic(*this, cir::AtomicFetchKind::Or, e);
1980 case Builtin::BI__sync_fetch_and_and_1:
1981 case Builtin::BI__sync_fetch_and_and_2:
1982 case Builtin::BI__sync_fetch_and_and_4:
1983 case Builtin::BI__sync_fetch_and_and_8:
1984 case Builtin::BI__sync_fetch_and_and_16:
1985 return emitBinaryAtomic(*this, cir::AtomicFetchKind::And, e);
1986 case Builtin::BI__sync_fetch_and_xor_1:
1987 case Builtin::BI__sync_fetch_and_xor_2:
1988 case Builtin::BI__sync_fetch_and_xor_4:
1989 case Builtin::BI__sync_fetch_and_xor_8:
1990 case Builtin::BI__sync_fetch_and_xor_16:
1991 return emitBinaryAtomic(*this, cir::AtomicFetchKind::Xor, e);
1992 case Builtin::BI__sync_fetch_and_nand_1:
1993 case Builtin::BI__sync_fetch_and_nand_2:
1994 case Builtin::BI__sync_fetch_and_nand_4:
1995 case Builtin::BI__sync_fetch_and_nand_8:
1996 case Builtin::BI__sync_fetch_and_nand_16:
1997 return emitBinaryAtomic(*this, cir::AtomicFetchKind::Nand, e);
1998 case Builtin::BI__sync_fetch_and_min:
1999 case Builtin::BI__sync_fetch_and_max:
2000 case Builtin::BI__sync_fetch_and_umin:
2001 case Builtin::BI__sync_fetch_and_umax:
2002 return errorBuiltinNYI(*this, e, builtinID);
2003 return getUndefRValue(e->getType());
2004 case Builtin::BI__sync_add_and_fetch_1:
2005 case Builtin::BI__sync_add_and_fetch_2:
2006 case Builtin::BI__sync_add_and_fetch_4:
2007 case Builtin::BI__sync_add_and_fetch_8:
2008 case Builtin::BI__sync_add_and_fetch_16:
2009 return emitBinaryAtomicPost<cir::AddOp>(*this, cir::AtomicFetchKind::Add,
2010 e);
2011 case Builtin::BI__sync_sub_and_fetch_1:
2012 case Builtin::BI__sync_sub_and_fetch_2:
2013 case Builtin::BI__sync_sub_and_fetch_4:
2014 case Builtin::BI__sync_sub_and_fetch_8:
2015 case Builtin::BI__sync_sub_and_fetch_16:
2016 return emitBinaryAtomicPost<cir::SubOp>(*this, cir::AtomicFetchKind::Sub,
2017 e);
2018 case Builtin::BI__sync_and_and_fetch_1:
2019 case Builtin::BI__sync_and_and_fetch_2:
2020 case Builtin::BI__sync_and_and_fetch_4:
2021 case Builtin::BI__sync_and_and_fetch_8:
2022 case Builtin::BI__sync_and_and_fetch_16:
2023 return emitBinaryAtomicPost<cir::AndOp>(*this, cir::AtomicFetchKind::And,
2024 e);
2025 case Builtin::BI__sync_or_and_fetch_1:
2026 case Builtin::BI__sync_or_and_fetch_2:
2027 case Builtin::BI__sync_or_and_fetch_4:
2028 case Builtin::BI__sync_or_and_fetch_8:
2029 case Builtin::BI__sync_or_and_fetch_16:
2030 return emitBinaryAtomicPost<cir::OrOp>(*this, cir::AtomicFetchKind::Or, e);
2031 case Builtin::BI__sync_xor_and_fetch_1:
2032 case Builtin::BI__sync_xor_and_fetch_2:
2033 case Builtin::BI__sync_xor_and_fetch_4:
2034 case Builtin::BI__sync_xor_and_fetch_8:
2035 case Builtin::BI__sync_xor_and_fetch_16:
2036 return emitBinaryAtomicPost<cir::XorOp>(*this, cir::AtomicFetchKind::Xor,
2037 e);
2038 case Builtin::BI__sync_nand_and_fetch_1:
2039 case Builtin::BI__sync_nand_and_fetch_2:
2040 case Builtin::BI__sync_nand_and_fetch_4:
2041 case Builtin::BI__sync_nand_and_fetch_8:
2042 case Builtin::BI__sync_nand_and_fetch_16:
2043 return emitBinaryAtomicPost<cir::AndOp>(*this, cir::AtomicFetchKind::Nand,
2044 e, /*invert=*/true);
2045 case Builtin::BI__sync_val_compare_and_swap_1:
2046 case Builtin::BI__sync_val_compare_and_swap_2:
2047 case Builtin::BI__sync_val_compare_and_swap_4:
2048 case Builtin::BI__sync_val_compare_and_swap_8:
2049 case Builtin::BI__sync_val_compare_and_swap_16:
2050 case Builtin::BI__sync_bool_compare_and_swap_1:
2051 case Builtin::BI__sync_bool_compare_and_swap_2:
2052 case Builtin::BI__sync_bool_compare_and_swap_4:
2053 case Builtin::BI__sync_bool_compare_and_swap_8:
2054 case Builtin::BI__sync_bool_compare_and_swap_16:
2055 case Builtin::BI__sync_swap_1:
2056 case Builtin::BI__sync_swap_2:
2057 case Builtin::BI__sync_swap_4:
2058 case Builtin::BI__sync_swap_8:
2059 case Builtin::BI__sync_swap_16:
2060 case Builtin::BI__sync_lock_test_and_set_1:
2061 case Builtin::BI__sync_lock_test_and_set_2:
2062 case Builtin::BI__sync_lock_test_and_set_4:
2063 case Builtin::BI__sync_lock_test_and_set_8:
2064 case Builtin::BI__sync_lock_test_and_set_16:
2065 case Builtin::BI__sync_lock_release_1:
2066 case Builtin::BI__sync_lock_release_2:
2067 case Builtin::BI__sync_lock_release_4:
2068 case Builtin::BI__sync_lock_release_8:
2069 case Builtin::BI__sync_lock_release_16:
2070 case Builtin::BI__sync_synchronize:
2071 case Builtin::BI__builtin_nontemporal_load:
2072 case Builtin::BI__builtin_nontemporal_store:
2073 case Builtin::BI__c11_atomic_is_lock_free:
2074 case Builtin::BI__atomic_is_lock_free:
2075 case Builtin::BI__atomic_test_and_set:
2076 case Builtin::BI__atomic_clear:
2077 return errorBuiltinNYI(*this, e, builtinID);
2078 case Builtin::BI__atomic_thread_fence:
2079 case Builtin::BI__c11_atomic_thread_fence: {
2080 emitAtomicFenceOp(*this, e, cir::SyncScopeKind::System);
2081 return RValue::get(nullptr);
2082 }
2083 case Builtin::BI__atomic_signal_fence:
2084 case Builtin::BI__c11_atomic_signal_fence: {
2085 emitAtomicFenceOp(*this, e, cir::SyncScopeKind::SingleThread);
2086 return RValue::get(nullptr);
2087 }
2088 case Builtin::BI__scoped_atomic_thread_fence:
2089 return errorBuiltinNYI(*this, e, builtinID);
2090 case Builtin::BI__builtin_signbit:
2091 case Builtin::BI__builtin_signbitf:
2092 case Builtin::BI__builtin_signbitl: {
2093 CIRGenFunction::CIRGenFPOptionsRAII fPOptsRAII(*this, e);
2094 mlir::Location loc = getLoc(e->getBeginLoc());
2095 mlir::Value value = emitScalarExpr(e->getArg(0));
2096 mlir::Operation *signBitOp = cir::SignBitOp::create(builder, loc, value);
2097 mlir::Value result = builder.createBoolToInt(signBitOp->getResult(0),
2098 convertType(e->getType()));
2099 return RValue::get(result);
2100 }
2101 case Builtin::BI__warn_memset_zero_len:
2102 case Builtin::BI__annotation:
2103 case Builtin::BI__builtin_annotation:
2104 case Builtin::BI__builtin_addcb:
2105 case Builtin::BI__builtin_addcs:
2106 case Builtin::BI__builtin_addc:
2107 case Builtin::BI__builtin_addcl:
2108 case Builtin::BI__builtin_addcll:
2109 case Builtin::BI__builtin_subcb:
2110 case Builtin::BI__builtin_subcs:
2111 case Builtin::BI__builtin_subc:
2112 case Builtin::BI__builtin_subcl:
2113 case Builtin::BI__builtin_subcll:
2114 return errorBuiltinNYI(*this, e, builtinID);
2115
2116 case Builtin::BI__builtin_add_overflow:
2117 case Builtin::BI__builtin_sub_overflow:
2118 case Builtin::BI__builtin_mul_overflow: {
2119 const clang::Expr *leftArg = e->getArg(0);
2120 const clang::Expr *rightArg = e->getArg(1);
2121 const clang::Expr *resultArg = e->getArg(2);
2122
2123 clang::QualType resultQTy =
2124 resultArg->getType()->castAs<clang::PointerType>()->getPointeeType();
2125
2126 WidthAndSignedness leftInfo =
2127 getIntegerWidthAndSignedness(cgm.getASTContext(), leftArg->getType());
2128 WidthAndSignedness rightInfo =
2129 getIntegerWidthAndSignedness(cgm.getASTContext(), rightArg->getType());
2130 WidthAndSignedness resultInfo =
2131 getIntegerWidthAndSignedness(cgm.getASTContext(), resultQTy);
2132
2133 // Note we compute the encompassing type with the consideration to the
2134 // result type, so later in LLVM lowering we don't get redundant integral
2135 // extension casts.
2136 WidthAndSignedness encompassingInfo =
2137 EncompassingIntegerType({leftInfo, rightInfo, resultInfo});
2138
2139 auto encompassingCIRTy = cir::IntType::get(
2140 &getMLIRContext(), encompassingInfo.width, encompassingInfo.isSigned);
2141 auto resultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(resultQTy));
2142
2143 mlir::Value x = emitScalarExpr(leftArg);
2144 mlir::Value y = emitScalarExpr(rightArg);
2145 Address resultPtr = emitPointerWithAlignment(resultArg);
2146
2147 // Extend each operand to the encompassing type, if necessary.
2148 if (x.getType() != encompassingCIRTy)
2149 x = builder.createCast(cir::CastKind::integral, x, encompassingCIRTy);
2150 if (y.getType() != encompassingCIRTy)
2151 y = builder.createCast(cir::CastKind::integral, y, encompassingCIRTy);
2152
2153 // Perform the operation on the extended values.
2154 mlir::Location loc = getLoc(e->getSourceRange());
2155 mlir::Value result, overflow;
2156 switch (builtinID) {
2157 default:
2158 llvm_unreachable("Unknown overflow builtin id.");
2159 case Builtin::BI__builtin_add_overflow:
2160 std::tie(result, overflow) =
2161 emitOverflowOp<cir::AddOverflowOp>(builder, loc, resultCIRTy, x, y);
2162 break;
2163 case Builtin::BI__builtin_sub_overflow:
2164 std::tie(result, overflow) =
2165 emitOverflowOp<cir::SubOverflowOp>(builder, loc, resultCIRTy, x, y);
2166 break;
2167 case Builtin::BI__builtin_mul_overflow:
2168 std::tie(result, overflow) =
2169 emitOverflowOp<cir::MulOverflowOp>(builder, loc, resultCIRTy, x, y);
2170 break;
2171 }
2172
2173 // Here is a slight difference from the original clang CodeGen:
2174 // - In the original clang CodeGen, the checked arithmetic result is
2175 // first computed as a value of the encompassing type, and then it is
2176 // truncated to the actual result type with a second overflow checking.
2177 // - In CIRGen, the checked arithmetic operation directly produce the
2178 // checked arithmetic result in its expected type.
2179 //
2180 // So we don't need a truncation and a second overflow checking here.
2181
2182 // Finally, store the result using the pointer.
2183 bool isVolatile =
2184 resultArg->getType()->getPointeeType().isVolatileQualified();
2185 builder.createStore(loc, result, resultPtr, isVolatile);
2186
2187 return RValue::get(overflow);
2188 }
2189
2190 case Builtin::BI__builtin_uadd_overflow:
2191 case Builtin::BI__builtin_uaddl_overflow:
2192 case Builtin::BI__builtin_uaddll_overflow:
2193 case Builtin::BI__builtin_usub_overflow:
2194 case Builtin::BI__builtin_usubl_overflow:
2195 case Builtin::BI__builtin_usubll_overflow:
2196 case Builtin::BI__builtin_umul_overflow:
2197 case Builtin::BI__builtin_umull_overflow:
2198 case Builtin::BI__builtin_umulll_overflow:
2199 case Builtin::BI__builtin_sadd_overflow:
2200 case Builtin::BI__builtin_saddl_overflow:
2201 case Builtin::BI__builtin_saddll_overflow:
2202 case Builtin::BI__builtin_ssub_overflow:
2203 case Builtin::BI__builtin_ssubl_overflow:
2204 case Builtin::BI__builtin_ssubll_overflow:
2205 case Builtin::BI__builtin_smul_overflow:
2206 case Builtin::BI__builtin_smull_overflow:
2207 case Builtin::BI__builtin_smulll_overflow: {
2208 // Scalarize our inputs.
2209 mlir::Value x = emitScalarExpr(e->getArg(0));
2210 mlir::Value y = emitScalarExpr(e->getArg(1));
2211
2212 const clang::Expr *resultArg = e->getArg(2);
2213 Address resultPtr = emitPointerWithAlignment(resultArg);
2214
2215 clang::QualType resultQTy =
2216 resultArg->getType()->castAs<clang::PointerType>()->getPointeeType();
2217 auto resultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(resultQTy));
2218
2219 // Create the appropriate overflow-checked arithmetic operation.
2220 mlir::Location loc = getLoc(e->getSourceRange());
2221 mlir::Value result, overflow;
2222 switch (builtinID) {
2223 default:
2224 llvm_unreachable("Unknown overflow builtin id.");
2225 case Builtin::BI__builtin_uadd_overflow:
2226 case Builtin::BI__builtin_uaddl_overflow:
2227 case Builtin::BI__builtin_uaddll_overflow:
2228 case Builtin::BI__builtin_sadd_overflow:
2229 case Builtin::BI__builtin_saddl_overflow:
2230 case Builtin::BI__builtin_saddll_overflow:
2231 std::tie(result, overflow) =
2232 emitOverflowOp<cir::AddOverflowOp>(builder, loc, resultCIRTy, x, y);
2233 break;
2234 case Builtin::BI__builtin_usub_overflow:
2235 case Builtin::BI__builtin_usubl_overflow:
2236 case Builtin::BI__builtin_usubll_overflow:
2237 case Builtin::BI__builtin_ssub_overflow:
2238 case Builtin::BI__builtin_ssubl_overflow:
2239 case Builtin::BI__builtin_ssubll_overflow:
2240 std::tie(result, overflow) =
2241 emitOverflowOp<cir::SubOverflowOp>(builder, loc, resultCIRTy, x, y);
2242 break;
2243 case Builtin::BI__builtin_umul_overflow:
2244 case Builtin::BI__builtin_umull_overflow:
2245 case Builtin::BI__builtin_umulll_overflow:
2246 case Builtin::BI__builtin_smul_overflow:
2247 case Builtin::BI__builtin_smull_overflow:
2248 case Builtin::BI__builtin_smulll_overflow:
2249 std::tie(result, overflow) =
2250 emitOverflowOp<cir::MulOverflowOp>(builder, loc, resultCIRTy, x, y);
2251 break;
2252 }
2253
2254 bool isVolatile =
2255 resultArg->getType()->getPointeeType().isVolatileQualified();
2256 builder.createStore(loc, emitToMemory(result, resultQTy), resultPtr,
2257 isVolatile);
2258
2259 return RValue::get(overflow);
2260 }
2261
2262 case Builtin::BIaddressof:
2263 case Builtin::BI__addressof:
2264 case Builtin::BI__builtin_addressof:
2265 return RValue::get(emitLValue(e->getArg(0)).getPointer());
2266 case Builtin::BI__builtin_function_start:
2267 return errorBuiltinNYI(*this, e, builtinID);
2268 case Builtin::BI__builtin_operator_new:
2270 e->getCallee()->getType()->castAs<FunctionProtoType>(), e, OO_New);
2271 case Builtin::BI__builtin_operator_delete:
2273 e->getCallee()->getType()->castAs<FunctionProtoType>(), e, OO_Delete);
2274 return RValue::get(nullptr);
2275 case Builtin::BI__builtin_is_aligned:
2276 case Builtin::BI__builtin_align_up:
2277 case Builtin::BI__builtin_align_down:
2278 case Builtin::BI__noop:
2279 case Builtin::BI__builtin_call_with_static_chain:
2280 case Builtin::BI_InterlockedExchange8:
2281 case Builtin::BI_InterlockedExchange16:
2282 case Builtin::BI_InterlockedExchange:
2283 case Builtin::BI_InterlockedExchangePointer:
2284 case Builtin::BI_InterlockedCompareExchangePointer:
2285 case Builtin::BI_InterlockedCompareExchangePointer_nf:
2286 case Builtin::BI_InterlockedCompareExchange8:
2287 case Builtin::BI_InterlockedCompareExchange16:
2288 case Builtin::BI_InterlockedCompareExchange:
2289 case Builtin::BI_InterlockedCompareExchange64:
2290 case Builtin::BI_InterlockedIncrement16:
2291 case Builtin::BI_InterlockedIncrement:
2292 case Builtin::BI_InterlockedDecrement16:
2293 case Builtin::BI_InterlockedDecrement:
2294 case Builtin::BI_InterlockedAnd8:
2295 case Builtin::BI_InterlockedAnd16:
2296 case Builtin::BI_InterlockedAnd:
2297 case Builtin::BI_InterlockedExchangeAdd8:
2298 case Builtin::BI_InterlockedExchangeAdd16:
2299 case Builtin::BI_InterlockedExchangeAdd:
2300 case Builtin::BI_InterlockedExchangeSub8:
2301 case Builtin::BI_InterlockedExchangeSub16:
2302 case Builtin::BI_InterlockedExchangeSub:
2303 case Builtin::BI_InterlockedOr8:
2304 case Builtin::BI_InterlockedOr16:
2305 case Builtin::BI_InterlockedOr:
2306 case Builtin::BI_InterlockedXor8:
2307 case Builtin::BI_InterlockedXor16:
2308 case Builtin::BI_InterlockedXor:
2309 case Builtin::BI_bittest64:
2310 case Builtin::BI_bittest:
2311 case Builtin::BI_bittestandcomplement64:
2312 case Builtin::BI_bittestandcomplement:
2313 case Builtin::BI_bittestandreset64:
2314 case Builtin::BI_bittestandreset:
2315 case Builtin::BI_bittestandset64:
2316 case Builtin::BI_bittestandset:
2317 case Builtin::BI_interlockedbittestandreset:
2318 case Builtin::BI_interlockedbittestandreset64:
2319 case Builtin::BI_interlockedbittestandreset64_acq:
2320 case Builtin::BI_interlockedbittestandreset64_rel:
2321 case Builtin::BI_interlockedbittestandreset64_nf:
2322 case Builtin::BI_interlockedbittestandset64:
2323 case Builtin::BI_interlockedbittestandset64_acq:
2324 case Builtin::BI_interlockedbittestandset64_rel:
2325 case Builtin::BI_interlockedbittestandset64_nf:
2326 case Builtin::BI_interlockedbittestandset:
2327 case Builtin::BI_interlockedbittestandset_acq:
2328 case Builtin::BI_interlockedbittestandset_rel:
2329 case Builtin::BI_interlockedbittestandset_nf:
2330 case Builtin::BI_interlockedbittestandreset_acq:
2331 case Builtin::BI_interlockedbittestandreset_rel:
2332 case Builtin::BI_interlockedbittestandreset_nf:
2333 case Builtin::BI__iso_volatile_load8:
2334 case Builtin::BI__iso_volatile_load16:
2335 case Builtin::BI__iso_volatile_load32:
2336 case Builtin::BI__iso_volatile_load64:
2337 case Builtin::BI__iso_volatile_store8:
2338 case Builtin::BI__iso_volatile_store16:
2339 case Builtin::BI__iso_volatile_store32:
2340 case Builtin::BI__iso_volatile_store64:
2341 case Builtin::BI__builtin_ptrauth_sign_constant:
2342 case Builtin::BI__builtin_ptrauth_auth:
2343 case Builtin::BI__builtin_ptrauth_auth_and_resign:
2344 case Builtin::BI__builtin_ptrauth_blend_discriminator:
2345 case Builtin::BI__builtin_ptrauth_sign_generic_data:
2346 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
2347 case Builtin::BI__builtin_ptrauth_strip:
2348 case Builtin::BI__builtin_get_vtable_pointer:
2349 case Builtin::BI__exception_code:
2350 case Builtin::BI_exception_code:
2351 case Builtin::BI__exception_info:
2352 case Builtin::BI_exception_info:
2353 case Builtin::BI__abnormal_termination:
2354 case Builtin::BI_abnormal_termination:
2355 return errorBuiltinNYI(*this, e, builtinID);
2356 case Builtin::BI_setjmpex:
2357 case Builtin::BI_setjmp:
2358 if (getTarget().getTriple().isOSMSVCRT()) {
2359 cgm.errorNYI(e->getSourceRange(), "setjmp/setjmpex on MSVCRT");
2360 return getUndefRValue(e->getType());
2361 }
2362 // Else break and this will be handled as a library call.
2363 break;
2364 case Builtin::BImove:
2365 case Builtin::BImove_if_noexcept:
2366 case Builtin::BIforward:
2367 case Builtin::BIforward_like:
2368 case Builtin::BIas_const:
2369 return RValue::get(emitLValue(e->getArg(0)).getPointer());
2370 case Builtin::BI__GetExceptionInfo:
2371 case Builtin::BI__fastfail:
2372 case Builtin::BIread_pipe:
2373 case Builtin::BIwrite_pipe:
2374 case Builtin::BIreserve_read_pipe:
2375 case Builtin::BIreserve_write_pipe:
2376 case Builtin::BIwork_group_reserve_read_pipe:
2377 case Builtin::BIwork_group_reserve_write_pipe:
2378 case Builtin::BIsub_group_reserve_read_pipe:
2379 case Builtin::BIsub_group_reserve_write_pipe:
2380 case Builtin::BIcommit_read_pipe:
2381 case Builtin::BIcommit_write_pipe:
2382 case Builtin::BIwork_group_commit_read_pipe:
2383 case Builtin::BIwork_group_commit_write_pipe:
2384 case Builtin::BIsub_group_commit_read_pipe:
2385 case Builtin::BIsub_group_commit_write_pipe:
2386 case Builtin::BIget_pipe_num_packets:
2387 case Builtin::BIget_pipe_max_packets:
2388 case Builtin::BIto_global:
2389 case Builtin::BIto_local:
2390 case Builtin::BIto_private:
2391 case Builtin::BIenqueue_kernel:
2392 case Builtin::BIget_kernel_work_group_size:
2393 case Builtin::BIget_kernel_preferred_work_group_size_multiple:
2394 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
2395 case Builtin::BIget_kernel_sub_group_count_for_ndrange:
2396 case Builtin::BI__builtin_store_half:
2397 case Builtin::BI__builtin_store_halff:
2398 case Builtin::BI__builtin_load_half:
2399 case Builtin::BI__builtin_load_halff:
2400 return errorBuiltinNYI(*this, e, builtinID);
2401 case Builtin::BI__builtin_printf:
2402 case Builtin::BIprintf:
2403 break;
2404 case Builtin::BI__builtin_canonicalize:
2405 case Builtin::BI__builtin_canonicalizef:
2406 case Builtin::BI__builtin_canonicalizef16:
2407 case Builtin::BI__builtin_canonicalizel:
2408 case Builtin::BI__builtin_thread_pointer:
2409 case Builtin::BI__builtin_os_log_format:
2410 case Builtin::BI__xray_customevent:
2411 case Builtin::BI__xray_typedevent:
2412 case Builtin::BI__builtin_ms_va_start:
2413 case Builtin::BI__builtin_ms_va_end:
2414 case Builtin::BI__builtin_ms_va_copy:
2415 case Builtin::BI__builtin_get_device_side_mangled_name:
2416 return errorBuiltinNYI(*this, e, builtinID);
2417 }
2418
2419 // If this is an alias for a lib function (e.g. __builtin_sin), emit
2420 // the call using the normal call path, but using the unmangled
2421 // version of the function name.
2422 if (!shouldEmitBuiltinAsIR(builtinID, getContext().BuiltinInfo, *this) &&
2423 getContext().BuiltinInfo.isLibFunction(builtinID))
2424 return emitLibraryCall(*this, fd, e,
2425 cgm.getBuiltinLibFunction(fd, builtinID));
2426
2427 // If this is a predefined lib function (e.g. malloc), emit the call
2428 // using exactly the normal call path.
2429 if (getContext().BuiltinInfo.isPredefinedLibFunction(builtinID))
2430 return emitLibraryCall(*this, fd, e,
2431 emitScalarExpr(e->getCallee()).getDefiningOp());
2432
2433 // See if we have a target specific intrinsic.
2434 std::string name = getContext().BuiltinInfo.getName(builtinID);
2435 Intrinsic::ID intrinsicID = Intrinsic::not_intrinsic;
2436 StringRef prefix =
2437 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
2438 if (!prefix.empty()) {
2439 intrinsicID = Intrinsic::getIntrinsicForClangBuiltin(prefix, name);
2440 // NOTE we don't need to perform a compatibility flag check here since the
2441 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
2442 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
2443 if (intrinsicID == Intrinsic::not_intrinsic)
2444 intrinsicID = Intrinsic::getIntrinsicForMSBuiltin(prefix, name);
2445 }
2446
2447 if (intrinsicID != Intrinsic::not_intrinsic) {
2448 unsigned iceArguments = 0;
2450 getContext().GetBuiltinType(builtinID, error, &iceArguments);
2451 assert(error == ASTContext::GE_None && "Should not codegen an error");
2452
2453 StringRef name = Intrinsic::getName(intrinsicID);
2454 // cir::LLVMIntrinsicCallOp expects intrinsic name to not have prefix
2455 // "llvm." For example, `llvm.nvvm.barrier0` should be passed as
2456 // `nvvm.barrier0`.
2457 assert(name.starts_with("llvm.") && "expected llvm. prefix");
2458 name = name.drop_front(/*strlen("llvm.")=*/5);
2459
2460 cir::FuncType intrinsicType =
2461 getIntrinsicType(*this, &getMLIRContext(), intrinsicID);
2462
2464 const FunctionDecl *fd = e->getDirectCallee();
2465 for (unsigned i = 0; i < e->getNumArgs(); i++) {
2466 mlir::Value argValue =
2467 emitScalarOrConstFoldImmArg(iceArguments, i, e->getArg(i));
2468 // If the intrinsic arg type is different from the builtin arg type
2469 // we need to do a bit cast.
2470 mlir::Type argType = argValue.getType();
2471 mlir::Type expectedTy = intrinsicType.getInput(i);
2472
2473 // Correct integer signedness based on AST parameter type
2474 mlir::Type correctedExpectedTy = expectedTy;
2475 if (fd && i < fd->getNumParams()) {
2476 correctedExpectedTy = correctIntegerSignedness(
2477 expectedTy, fd->getParamDecl(i)->getType(), &getMLIRContext());
2478 }
2479
2480 if (mlir::isa<cir::PointerType>(expectedTy)) {
2481 bool argIsPointer = mlir::isa<cir::PointerType>(argType);
2482 bool argIsVectorOfPointer = false;
2483 if (auto vecTy = dyn_cast<mlir::VectorType>(argType))
2484 argIsVectorOfPointer =
2485 mlir::isa<cir::PointerType>(vecTy.getElementType());
2486
2487 if (!argIsPointer && !argIsVectorOfPointer) {
2488 cgm.errorNYI(
2489 e->getSourceRange(),
2490 "intrinsic expects a pointer type (NYI for non-pointer)");
2491 return getUndefRValue(e->getType());
2492 }
2493
2494 // Pointer handling (address-space cast / bitcast fallback).
2495 if (argType != expectedTy)
2496 argValue = getCorrectedPtr(argValue, expectedTy, builder);
2497 } else {
2498 // Non-pointer expected type: if needed, bitcast to the corrected
2499 // expected type to match signedness/representation.
2500 if (argType != correctedExpectedTy)
2501 argValue = builder.createBitcast(argValue, correctedExpectedTy);
2502 }
2503
2504 args.push_back(argValue);
2505 }
2506
2507 // Correct return type signedness based on AST return type before creating
2508 // the call, avoiding unnecessary casts in the IR.
2509 mlir::Type correctedReturnType = intrinsicType.getReturnType();
2510 if (fd) {
2511 correctedReturnType =
2512 correctIntegerSignedness(intrinsicType.getReturnType(),
2513 fd->getReturnType(), &getMLIRContext());
2514 }
2515
2516 cir::LLVMIntrinsicCallOp intrinsicCall = cir::LLVMIntrinsicCallOp::create(
2517 builder, getLoc(e->getExprLoc()), builder.getStringAttr(name),
2518 correctedReturnType, args);
2519
2520 mlir::Value intrinsicRes = intrinsicCall.getResult();
2521
2522 if (isa<cir::VoidType>(correctedReturnType))
2523 return RValue::get(nullptr);
2524
2525 return RValue::get(intrinsicRes);
2526 }
2527
2528 // Some target-specific builtins can have aggregate return values, e.g.
2529 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
2530 // returnValue to be non-null, so that the target-specific emission code can
2531 // always just emit into it.
2533 if (evalKind == cir::TEK_Aggregate && returnValue.isNull()) {
2534 cgm.errorNYI(e->getSourceRange(), "aggregate return value from builtin");
2535 return getUndefRValue(e->getType());
2536 }
2537
2538 // Now see if we can emit a target-specific builtin.
2539 // FIXME: This is a temporary mechanism (double-optional semantics) that will
2540 // go away once everything is implemented:
2541 // 1. return `mlir::Value{}` for cases where we have issued the diagnostic.
2542 // 2. return `std::nullopt` in cases where we didn't issue a diagnostic
2543 // but also didn't handle the builtin.
2544 if (std::optional<mlir::Value> rst =
2545 emitTargetBuiltinExpr(builtinID, e, returnValue)) {
2546 mlir::Value v = rst.value();
2547 // CIR dialect operations may have no results, no values will be returned
2548 // even if it executes successfully.
2549 if (!v)
2550 return RValue::get(nullptr);
2551
2552 switch (evalKind) {
2553 case cir::TEK_Scalar:
2554 if (mlir::isa<cir::VoidType>(v.getType()))
2555 return RValue::get(nullptr);
2556 return RValue::get(v);
2557 case cir::TEK_Aggregate:
2558 cgm.errorNYI(e->getSourceRange(), "aggregate return value from builtin");
2559 return getUndefRValue(e->getType());
2560 case cir::TEK_Complex:
2561 llvm_unreachable("No current target builtin returns complex");
2562 }
2563 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
2564 }
2565
2566 cgm.errorNYI(e->getSourceRange(),
2567 std::string("unimplemented builtin call: ") +
2568 getContext().BuiltinInfo.getName(builtinID));
2569 return getUndefRValue(e->getType());
2570}
2571
2572static std::optional<mlir::Value>
2574 const CallExpr *e, ReturnValueSlot &returnValue,
2575 llvm::Triple::ArchType arch) {
2576 // When compiling in HipStdPar mode we have to be conservative in rejecting
2577 // target specific features in the FE, and defer the possible error to the
2578 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
2579 // referenced by an accelerator executable function, we emit an error.
2580 // Returning nullptr here leads to the builtin being handled in
2581 // EmitStdParUnsupportedBuiltin.
2582 if (cgf->getLangOpts().HIPStdPar && cgf->getLangOpts().CUDAIsDevice &&
2583 arch != cgf->getTarget().getTriple().getArch())
2584 return std::nullopt;
2585
2586 switch (arch) {
2587 case llvm::Triple::arm:
2588 case llvm::Triple::armeb:
2589 case llvm::Triple::thumb:
2590 case llvm::Triple::thumbeb:
2591 // These are actually NYI, but that will be reported by emitBuiltinExpr.
2592 // At this point, we don't even know that the builtin is target-specific.
2593 return std::nullopt;
2594 case llvm::Triple::aarch64:
2595 case llvm::Triple::aarch64_32:
2596 case llvm::Triple::aarch64_be:
2597 return cgf->emitAArch64BuiltinExpr(builtinID, e, returnValue, arch);
2598 case llvm::Triple::bpfeb:
2599 case llvm::Triple::bpfel:
2600 // These are actually NYI, but that will be reported by emitBuiltinExpr.
2601 // At this point, we don't even know that the builtin is target-specific.
2602 return std::nullopt;
2603
2604 case llvm::Triple::x86:
2605 case llvm::Triple::x86_64:
2606 return cgf->emitX86BuiltinExpr(builtinID, e);
2607
2608 case llvm::Triple::ppc:
2609 case llvm::Triple::ppcle:
2610 case llvm::Triple::ppc64:
2611 case llvm::Triple::ppc64le:
2612 case llvm::Triple::r600:
2613 // These are actually NYI, but that will be reported by emitBuiltinExpr.
2614 // At this point, we don't even know that the builtin is target-specific.
2615 return std::nullopt;
2616 case llvm::Triple::amdgcn:
2617 return cgf->emitAMDGPUBuiltinExpr(builtinID, e);
2618 case llvm::Triple::systemz:
2619 return std::nullopt;
2620 case llvm::Triple::nvptx:
2621 case llvm::Triple::nvptx64:
2622 return cgf->emitNVPTXBuiltinExpr(builtinID, e);
2623 case llvm::Triple::wasm32:
2624 case llvm::Triple::wasm64:
2625 case llvm::Triple::hexagon:
2626 // These are actually NYI, but that will be reported by emitBuiltinExpr.
2627 // At this point, we don't even know that the builtin is target-specific.
2628 return std::nullopt;
2629 case llvm::Triple::riscv32:
2630 case llvm::Triple::riscv64:
2631 return cgf->emitRISCVBuiltinExpr(builtinID, e);
2632 default:
2633 return std::nullopt;
2634 }
2635}
2636
2637std::optional<mlir::Value>
2640 if (getContext().BuiltinInfo.isAuxBuiltinID(builtinID)) {
2641 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
2643 this, getContext().BuiltinInfo.getAuxBuiltinID(builtinID), e,
2644 returnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
2645 }
2646
2647 return emitTargetArchBuiltinExpr(this, builtinID, e, returnValue,
2648 getTarget().getTriple().getArch());
2649}
2650
2652 const unsigned iceArguments, const unsigned idx, const Expr *argExpr) {
2653 mlir::Value arg = {};
2654 if ((iceArguments & (1 << idx)) == 0) {
2655 arg = emitScalarExpr(argExpr);
2656 } else {
2657 // If this is required to be a constant, constant fold it so that we
2658 // know that the generated intrinsic gets a ConstantInt.
2659 const std::optional<llvm::APSInt> result =
2661 assert(result && "Expected argument to be a constant");
2662 arg = builder.getConstInt(getLoc(argExpr->getSourceRange()), *result);
2663 }
2664 return arg;
2665}
2666
2667/// Given a builtin id for a function like "__builtin_fabsf", return a Function*
2668/// for "fabsf".
2670 unsigned builtinID) {
2671 assert(astContext.BuiltinInfo.isLibFunction(builtinID));
2672
2673 // Get the name, skip over the __builtin_ prefix (if necessary). We may have
2674 // to build this up so provide a small stack buffer to handle the vast
2675 // majority of names.
2677
2679 name = astContext.BuiltinInfo.getName(builtinID).substr(10);
2680
2681 GlobalDecl d(fd);
2682 mlir::Type type = convertType(fd->getType());
2683 return getOrCreateCIRFunction(name, type, d, /*forVTable=*/false);
2684}
2685
2687 mlir::Value argValue = evaluateExprAsBool(e);
2688 if (!sanOpts.has(SanitizerKind::Builtin))
2689 return argValue;
2690
2692 cgm.errorNYI(e->getSourceRange(),
2693 "emitCheckedArgForAssume: sanitizers are NYI");
2694 return {};
2695}
2696
2697void CIRGenFunction::emitVAStart(mlir::Value vaList) {
2698 // LLVM codegen casts to *i8, no real gain on doing this for CIRGen this
2699 // early, defer to LLVM lowering.
2700 cir::VAStartOp::create(builder, vaList.getLoc(), vaList);
2701}
2702
2703void CIRGenFunction::emitVAEnd(mlir::Value vaList) {
2704 cir::VAEndOp::create(builder, vaList.getLoc(), vaList);
2705}
2706
2707// FIXME(cir): This completely abstracts away the ABI with a generic CIR Op. By
2708// default this lowers to llvm.va_arg which is incomplete and not ABI-compliant
2709// on most targets so cir.va_arg will need some ABI handling in LoweringPrepare
2711 assert(!cir::MissingFeatures::msabi());
2712 assert(!cir::MissingFeatures::vlas());
2713 mlir::Location loc = cgm.getLoc(ve->getExprLoc());
2714 mlir::Type type = convertType(ve->getType());
2715 mlir::Value vaList = emitVAListRef(ve->getSubExpr()).getPointer();
2716 return cir::VAArgOp::create(builder, loc, type, vaList);
2717}
2718
2719mlir::Value CIRGenFunction::emitBuiltinObjectSize(const Expr *e, unsigned type,
2720 cir::IntType resType,
2721 mlir::Value emittedE,
2722 bool isDynamic) {
2723 // If this is a pass_object_size parameter, load the implicit size arg.
2724 //
2725 // BOS type compatibility: a pass_object_size annotation with one type can
2726 // satisfy a __builtin_object_size query with a different type when the
2727 // annotated type is a safe approximation. Type 0 (max, whole object) is
2728 // an overestimate for type 1 (max, closest surrounding subobject), and
2729 // type 3 (min, closest surrounding subobject) is an underestimate for
2730 // type 2 (min, whole object).
2731 enum BOSType {
2732 MaxWholeObject = 0,
2733 MaxSubobject = 1,
2734 MinWholeObject = 2,
2735 MinSubobject = 3,
2736 };
2737 if (auto *dre = dyn_cast<DeclRefExpr>(e->IgnoreParenImpCasts())) {
2738 auto *param = dyn_cast<ParmVarDecl>(dre->getDecl());
2739 auto *objSizeAttr = dre->getDecl()->getAttr<PassObjectSizeAttr>();
2740 if (param && objSizeAttr) {
2741 auto from = objSizeAttr->getType();
2742 bool compatible = from == static_cast<int>(type) ||
2743 (from == MaxWholeObject && type == MaxSubobject) ||
2744 (from == MinSubobject && type == MinWholeObject);
2745 if (compatible) {
2746 const ImplicitParamDecl *sizeDecl = sizeArguments.lookup(param);
2747 assert(sizeDecl && "expected pass_object_size implicit param");
2748
2749 DeclMapTy::iterator declIter = localDeclMap.find(sizeDecl);
2750 assert(declIter != localDeclMap.end());
2751 Address addr = declIter->second;
2752
2753 return emitLoadOfScalar(addr, /*volatile=*/false,
2754 getContext().getSizeType(), e->getBeginLoc(),
2756 }
2757 }
2758 }
2759
2760 // LLVM can't handle type=3 appropriately, and __builtin_object_size shouldn't
2761 // evaluate e for side-effects. In either case, just like original LLVM
2762 // lowering, we shouldn't lower to `cir.objsize` but to a constant instead.
2763 if (type == 3 || (!emittedE && e->HasSideEffects(getContext())))
2764 return builder.getConstInt(getLoc(e->getSourceRange()), resType,
2765 (type & 2) ? 0 : -1);
2766
2767 mlir::Value ptr = emittedE ? emittedE : emitScalarExpr(e);
2768 assert(mlir::isa<cir::PointerType>(ptr.getType()) &&
2769 "Non-pointer passed to __builtin_object_size?");
2770
2772
2773 // Extract the min/max mode from type. CIR only supports type 0
2774 // (max, whole object) and type 2 (min, whole object), not type 1 or 3
2775 // (closest subobject variants).
2776 const bool min = ((type & 2) != 0);
2777 // For GCC compatibility, __builtin_object_size treats NULL as unknown size.
2778 auto op =
2779 cir::ObjSizeOp::create(builder, getLoc(e->getSourceRange()), resType, ptr,
2780 min, /*nullUnknown=*/true, isDynamic);
2781 return op.getResult();
2782}
2783
2785 const Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE,
2786 bool isDynamic) {
2787 if (std::optional<uint64_t> objectSize =
2789 return builder.getConstInt(getLoc(e->getSourceRange()), resType,
2790 *objectSize);
2791 return emitBuiltinObjectSize(e, type, resType, emittedE, isDynamic);
2792}
static StringRef bytes(const std::vector< T, Allocator > &v)
Defines enum values for all the target-independent builtin functions.
static mlir::Value emitSignBit(mlir::Location loc, CIRGenFunction &cgf, mlir::Value val)
static mlir::Value emitBinaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static mlir::Value createBuiltinBitOp(CIRGenFunction &cgf, const CallExpr *e, mlir::Value arg, Args... args)
static mlir::Type decodeFixedType(CIRGenFunction &cgf, ArrayRef< llvm::Intrinsic::IITDescriptor > &infos, mlir::MLIRContext *context)
static RValue emitUnaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitBinaryFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitBinaryAtomicPost(CIRGenFunction &cgf, cir::AtomicFetchKind atomicOpkind, const CallExpr *e, bool invert=false)
static std::optional< mlir::Value > emitTargetArchBuiltinExpr(CIRGenFunction *cgf, unsigned builtinID, const CallExpr *e, ReturnValueSlot &returnValue, llvm::Triple::ArchType arch)
static RValue emitUnaryFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitBinaryAtomic(CIRGenFunction &cgf, cir::AtomicFetchKind atomicOpkind, const CallExpr *e)
static mlir::Value emitToInt(CIRGenFunction &cgf, mlir::Value v, QualType t, cir::IntType intType)
Emit the conversions required to turn the given value into an integer of the given size.
static mlir::Value getCorrectedPtr(mlir::Value argValue, mlir::Type expectedTy, CIRGenBuilderTy &builder)
static std::pair< mlir::Value, mlir::Value > emitOverflowOp(CIRGenBuilderTy &builder, mlir::Location loc, cir::IntType resultTy, mlir::Value lhs, mlir::Value rhs)
Create a checked overflow arithmetic op and return its result and overflow flag.
static bool shouldEmitBuiltinAsIR(unsigned builtinID, const Builtin::Context &bi, const CIRGenFunction &cgf)
static RValue emitLibraryCall(CIRGenFunction &cgf, const FunctionDecl *fd, const CallExpr *e, mlir::Operation *calleeValue)
static WidthAndSignedness getIntegerWidthAndSignedness(const clang::ASTContext &astContext, const clang::QualType type)
static void emitAtomicFenceOp(CIRGenFunction &cgf, const CallExpr *expr, cir::SyncScopeKind syncScope)
static RValue emitBuiltinBitOp(CIRGenFunction &cgf, const CallExpr *e, Args... args)
static Address checkAtomicAlignment(CIRGenFunction &cgf, const CallExpr *e)
static bool shouldCIREmitFPMathIntrinsic(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static RValue tryEmitFPMathIntrinsic(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static RValue emitBuiltinBitOpWithFallback(CIRGenFunction &cgf, const CallExpr *e)
Emit a clz/ctz bit op with optional fallback for __builtin_c[lt]zg.
static cir::FuncType getIntrinsicType(CIRGenFunction &cgf, mlir::MLIRContext *context, llvm::Intrinsic::ID id)
static mlir::Value makeBinaryAtomicValue(CIRGenFunction &cgf, cir::AtomicFetchKind kind, const CallExpr *expr, mlir::Type *originalArgType=nullptr, mlir::Value *emittedArgValue=nullptr, cir::MemOrder ordering=cir::MemOrder::SequentiallyConsistent)
Utility to insert an atomic instruction based on Intrinsic::ID and the expression node.
static struct WidthAndSignedness EncompassingIntegerType(ArrayRef< struct WidthAndSignedness > types)
static RValue emitBuiltinAlloca(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static mlir::Type correctIntegerSignedness(mlir::Type iitType, QualType astType, mlir::MLIRContext *context)
Helper function to correct integer signedness for intrinsic arguments and return type.
static RValue emitUnaryMaybeConstrainedFPToIntBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue errorBuiltinNYI(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static mlir::Value emitFromInt(CIRGenFunction &cgf, mlir::Value v, QualType t, mlir::Type resultType)
static StringRef getTriple(const Command &Job)
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
Defines an enumeration for C++ overloaded operators.
static QualType getPointeeType(const MemRegion *R)
__DEVICE__ int min(int __a, int __b)
__device__ __2f16 b
__device__ __2f16 float c
cir::ConstantOp getNullValue(mlir::Type ty, mlir::Location loc)
cir::SignBitOp createSignBit(mlir::Location loc, mlir::Value val)
mlir::Value createIntToPtr(mlir::Value src, mlir::Type newTy)
mlir::Value createPtrToInt(mlir::Value src, mlir::Type newTy)
cir::PointerType getPointerTo(mlir::Type ty)
mlir::Value createIntCast(mlir::Value src, mlir::Type newTy)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
cir::CmpOp createCompare(mlir::Location loc, cir::CmpOpKind kind, mlir::Value lhs, mlir::Value rhs)
mlir::Value createNot(mlir::Location loc, mlir::Value value)
mlir::Value createSelect(mlir::Location loc, mlir::Value condition, mlir::Value trueValue, mlir::Value falseValue)
cir::PointerType getVoidPtrTy(clang::LangAS langAS=clang::LangAS::Default)
mlir::Value createAddrSpaceCast(mlir::Location loc, mlir::Value src, mlir::Type newTy)
mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, mlir::Type type, llvm::StringRef name, mlir::IntegerAttr alignment, mlir::Value dynAllocSize)
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
APSInt & getInt()
Definition APValue.h:508
bool isFloat() const
Definition APValue.h:486
bool isInt() const
Definition APValue.h:485
APFloat & getFloat()
Definition APValue.h:522
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:227
unsigned getIntWidth(QualType T) const
CanQualType VoidPtrTy
Builtin::Context & BuiltinInfo
Definition ASTContext.h:807
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error, unsigned *IntegerConstantArgs=nullptr) const
Return the type for the specified builtin.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:924
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
@ GE_None
No error.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
Holds information about both target-independent and target-specific builtins, allowing easy queries b...
Definition Builtins.h:235
bool shouldGenerateFPMathIntrinsic(unsigned BuiltinID, llvm::Triple Trip, std::optional< bool > ErrnoOverwritten, bool MathErrnoEnabled, bool HasOptNoneAttr, bool IsOptimizationEnabled) const
Determine whether we can generate LLVM intrinsics for the given builtin ID, based on whether it has s...
Definition Builtins.cpp:225
bool isLibFunction(unsigned ID) const
Return true if this is a builtin for a libc/libm function, with a "__builtin_" prefix (e....
Definition Builtins.h:309
std::string getName(unsigned ID) const
Return the identifier name for the specified builtin, e.g.
Definition Builtins.cpp:80
mlir::Value getPointer() const
Definition Address.h:96
mlir::Type getElementType() const
Definition Address.h:123
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
Definition Address.h:136
Address withAlignment(clang::CharUnits newAlignment) const
Return address with different alignment, but same pointer and element type.
Definition Address.h:87
mlir::Value emitRawPointer() const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:110
cir::IntType getSIntNTy(int n)
cir::PointerType getUInt8PtrTy()
cir::IntType getUIntNTy(int n)
static CIRGenCallee forDirect(mlir::Operation *funcPtr, const CIRGenCalleeInfo &abstractInfo=CIRGenCalleeInfo())
Definition CIRGenCall.h:92
mlir::Type convertType(clang::QualType t)
mlir::Value emitCheckedArgForAssume(const Expr *e)
Emits an argument for a call to a __builtin_assume.
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
const clang::LangOptions & getLangOpts() const
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
const TargetInfo & getTarget() const
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
const clang::Decl * curFuncDecl
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
cir::CallOp emitCoroFreeBuiltin(const CallExpr *e)
mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
std::optional< mlir::Value > emitRISCVBuiltinExpr(unsigned builtinID, const CallExpr *expr)
mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
Returns a Value corresponding to the size of the given expression by emitting a cir....
std::optional< mlir::Value > emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot &returnValue)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
Address returnValue
The temporary alloca to hold the return value.
std::optional< mlir::Value > emitX86BuiltinExpr(unsigned builtinID, const CallExpr *expr)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
std::optional< mlir::Value > emitAMDGPUBuiltinExpr(unsigned builtinID, const CallExpr *expr)
Emit a call to an AMDGPU builtin function.
std::optional< mlir::Value > emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr, ReturnValueSlot returnValue, llvm::Triple::ArchType arch)
void emitAtomicExprWithMemOrder(const Expr *memOrder, bool isStore, bool isLoad, bool isFence, llvm::function_ref< void(cir::MemOrder)> emitAtomicOp)
llvm::SmallDenseMap< const ParmVarDecl *, const ImplicitParamDecl * > sizeArguments
If a ParmVarDecl had the pass_object_size attribute, this will contain a mapping from said ParmVarDec...
void emitVAEnd(mlir::Value vaList)
Emits the end of a CIR variable-argument operation (cir.va_start)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
void emitVAStart(mlir::Value vaList)
Emits the start of a CIR variable-argument operation (cir.va_start)
void emitNonNullArgCheck(RValue rv, QualType argType, SourceLocation argLoc, AbstractCallee ac, unsigned paramNum)
Create a check for a function parameter that may potentially be declared as non-null.
mlir::MLIRContext & getMLIRContext()
mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
DeclMapTy localDeclMap
This keeps track of the CIR allocas or globals for local C declarations.
std::optional< mlir::Value > emitNVPTXBuiltinExpr(unsigned builtinID, const CallExpr *expr)
Emit a call to an NVPTX builtin function.
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
RValue emitNewOrDeleteBuiltinCall(const FunctionProtoType *type, const CallExpr *callExpr, OverloadedOperatorKind op)
clang::ASTContext & getContext() const
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx, const Expr *argExpr)
mlir::Value emitVAArg(VAArgExpr *ve)
Generate code to get an argument from the passed in pointer and update it accordingly.
RValue emitRotate(const CallExpr *e, bool isRotateLeft)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
clang::ASTContext & getASTContext() const
mlir::Type convertType(clang::QualType type)
clang::DiagnosticsEngine & getDiags() const
cir::FuncOp getBuiltinLibFunction(const FunctionDecl *fd, unsigned builtinID)
Given a builtin id for a function like "__builtin_fabsf", return a Function* for "fabsf".
const llvm::Triple & getTriple() const
const cir::CIRDataLayout getDataLayout() const
const clang::CodeGenOptions & getCodeGenOpts() const
const clang::LangOptions & getLangOpts() const
cir::FuncOp getOrCreateCIRFunction(llvm::StringRef mangledName, mlir::Type funcType, clang::GlobalDecl gd, bool forVTable, bool dontDefer=false, bool isThunk=false, ForDefinition_t isForDefinition=NotForDefinition, mlir::NamedAttrList extraAttrs={})
const TargetCIRGenInfo & getTargetCIRGenInfo()
mlir::Value getPointer() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
static RValue getComplex(mlir::Value v)
Definition CIRGenValue.h:91
bool isIgnored() const
Definition CIRGenValue.h:52
static RValue getIgnored()
Definition CIRGenValue.h:78
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:260
virtual bool supportsLibCall() const
Returns true if the target supports math library calls.
Definition TargetInfo.h:54
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3150
bool hasStoredFPFeatures() const
Definition Expr.h:3105
SourceLocation getBeginLoc() const
Definition Expr.h:3280
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3129
Expr * getCallee()
Definition Expr.h:3093
FPOptionsOverride getFPFeatures() const
Definition Expr.h:3245
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3137
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
FunctionDecl * getAsFunction() LLVM_READONLY
Returns the function itself, or the templated function if this is a function template.
Definition DeclBase.cpp:273
bool hasAttr() const
Definition DeclBase.h:585
Concrete class used by the front-end to report problems and issues.
Definition Diagnostic.h:233
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3090
bool EvaluateAsFloat(llvm::APFloat &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsFloat - Return true if this is a constant which we can fold and convert to a floating point...
std::optional< llvm::APSInt > getIntegerConstantExpr(const ASTContext &Ctx) const
isIntegerConstantExpr - Return the value if this expression is a valid integer constant expression.
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3688
std::optional< uint64_t > tryEvaluateObjectSize(const ASTContext &Ctx, unsigned Type) const
If the current Expr is a pointer, this will try to statically determine the number of bytes available...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
Represents difference between two FPOptions values.
LangOptions::FPExceptionModeKind getExceptionMode() const
Represents a function declaration or definition.
Definition Decl.h:2018
const ParmVarDecl * getParamDecl(unsigned i) const
Definition Decl.h:2815
QualType getReturnType() const
Definition Decl.h:2863
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5362
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
@ FPE_Ignore
Assume that floating-point exceptions are masked.
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3383
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8520
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8562
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:355
Exposes information about the current target.
Definition TargetInfo.h:227
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
virtual bool isCLZForZeroUndef() const
The __builtin_clz* and __builtin_ctz* built-in functions are specified to have undefined results for ...
unsigned getSuitableAlign() const
Return the alignment that is the largest alignment ever used for any scalar/SIMD data type on the tar...
Definition TargetInfo.h:748
bool isBlockPointerType() const
Definition TypeBase.h:8693
bool isPointerType() const
Definition TypeBase.h:8673
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9333
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:789
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
Definition TypeBase.h:9161
bool isObjCObjectPointerType() const
Definition TypeBase.h:8852
bool isFloatingType() const
Definition Type.cpp:2389
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2332
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4960
const Expr * getSubExpr() const
Definition Expr.h:4976
QualType getType() const
Definition Decl.h:723
bool isMatchingAddressSpace(mlir::ptr::MemorySpaceAttrInterface cirAS, clang::LangAS as)
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
U cast(CodeGen::Address addr)
Definition Address.h:327
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
static bool builtinCheckKind()
static bool addressSpace()
static bool asmLabelAttr()
static bool msvcBuiltins()
static bool builtinCallF128()
static bool isPPC_FP128Ty()
static bool emitCheckedInBoundsGEP()
static bool fpConstraints()
static bool countedBySize()
static bool fastMathFlags()
static bool builtinBitCountExpr()
static bool builtinCall()
static bool generateDebugInfo()
cir::PointerType allocaInt8PtrTy
void* in alloca address space
mlir::ptr::MemorySpaceAttrInterface getCIRAllocaAddressSpace() const
cir::PointerType voidPtrTy
void* in address space 0
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650
bool hasSideEffects() const
Return true if the evaluated expression has side effects.
Definition Expr.h:642
#define conj(__x)
Definition tgmath.h:1303