clang 23.0.0git
CIRGenBuiltin.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as CIR or a function call to be
10// later resolved.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CIRGenCall.h"
15#include "CIRGenFunction.h"
16#include "CIRGenModule.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/BuiltinAttributes.h"
19#include "mlir/IR/Value.h"
20#include "mlir/Support/LLVM.h"
21#include "clang/AST/DeclBase.h"
22#include "clang/AST/Expr.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/Support/ErrorHandling.h"
31
32using namespace clang;
33using namespace clang::CIRGen;
34using namespace llvm;
35
37 const CallExpr *e, mlir::Operation *calleeValue) {
38 CIRGenCallee callee = CIRGenCallee::forDirect(calleeValue, GlobalDecl(fd));
39 return cgf.emitCall(e->getCallee()->getType(), callee, e, ReturnValueSlot());
40}
41
42template <typename Op>
44 bool poisonZero = false) {
46
47 mlir::Value arg = cgf.emitScalarExpr(e->getArg(0));
48 CIRGenBuilderTy &builder = cgf.getBuilder();
49
50 Op op;
51 if constexpr (std::is_same_v<Op, cir::BitClzOp> ||
52 std::is_same_v<Op, cir::BitCtzOp>)
53 op = Op::create(builder, cgf.getLoc(e->getSourceRange()), arg, poisonZero);
54 else
55 op = Op::create(builder, cgf.getLoc(e->getSourceRange()), arg);
56
57 mlir::Value result = op.getResult();
58 mlir::Type exprTy = cgf.convertType(e->getType());
59 if (exprTy != result.getType())
60 result = builder.createIntCast(result, exprTy);
61
62 return RValue::get(result);
63}
64
65/// Emit the conversions required to turn the given value into an
66/// integer of the given size.
67static mlir::Value emitToInt(CIRGenFunction &cgf, mlir::Value v, QualType t,
68 cir::IntType intType) {
69 v = cgf.emitToMemory(v, t);
70
71 if (mlir::isa<cir::PointerType>(v.getType()))
72 return cgf.getBuilder().createPtrToInt(v, intType);
73
74 assert(v.getType() == intType);
75 return v;
76}
77
78static mlir::Value emitFromInt(CIRGenFunction &cgf, mlir::Value v, QualType t,
79 mlir::Type resultType) {
80 v = cgf.emitFromMemory(v, t);
81
82 if (mlir::isa<cir::PointerType>(resultType))
83 return cgf.getBuilder().createIntToPtr(v, resultType);
84
85 assert(v.getType() == resultType);
86 return v;
87}
88
89static mlir::Value emitSignBit(mlir::Location loc, CIRGenFunction &cgf,
90 mlir::Value val) {
92 cir::SignBitOp returnValue = cgf.getBuilder().createSignBit(loc, val);
93 return returnValue->getResult(0);
94}
95
97 ASTContext &astContext = cgf.getContext();
99 unsigned bytes =
100 mlir::isa<cir::PointerType>(ptr.getElementType())
101 ? astContext.getTypeSizeInChars(astContext.VoidPtrTy).getQuantity()
104
105 unsigned align = ptr.getAlignment().getQuantity();
106 if (align % bytes != 0) {
107 DiagnosticsEngine &diags = cgf.cgm.getDiags();
108 diags.Report(e->getBeginLoc(), diag::warn_sync_op_misaligned);
109 // Force address to be at least naturally-aligned.
111 }
112 return ptr;
113}
114
115/// Utility to insert an atomic instruction based on Intrinsic::ID
116/// and the expression node.
117static mlir::Value makeBinaryAtomicValue(
118 CIRGenFunction &cgf, cir::AtomicFetchKind kind, const CallExpr *expr,
119 mlir::Type *originalArgType = nullptr,
120 mlir::Value *emittedArgValue = nullptr,
121 cir::MemOrder ordering = cir::MemOrder::SequentiallyConsistent) {
122
123 QualType type = expr->getType();
124 QualType ptrType = expr->getArg(0)->getType();
125
126 assert(ptrType->isPointerType());
127 assert(
130 expr->getArg(1)->getType()));
131
132 Address destAddr = checkAtomicAlignment(cgf, expr);
133 CIRGenBuilderTy &builder = cgf.getBuilder();
134
135 mlir::Value val = cgf.emitScalarExpr(expr->getArg(1));
136 mlir::Type valueType = val.getType();
137 mlir::Value destValue = destAddr.emitRawPointer();
138
139 if (ptrType->getPointeeType()->isPointerType()) {
140 // Pointer to pointer
141 // `cir.atomic.fetch` expects a pointer to an integer type, so we cast
142 // ptr<ptr<T>> to ptr<intPtrSize>
143 cir::IntType ptrSizeInt =
144 builder.getSIntNTy(cgf.getContext().getTypeSize(ptrType));
145 destValue =
146 builder.createBitcast(destValue, builder.getPointerTo(ptrSizeInt));
147 val = emitToInt(cgf, val, type, ptrSizeInt);
148 } else {
149 // Pointer to integer type
150 cir::IntType intType =
152 ? builder.getUIntNTy(cgf.getContext().getTypeSize(type))
153 : builder.getSIntNTy(cgf.getContext().getTypeSize(type));
154 val = emitToInt(cgf, val, type, intType);
155 }
156
157 // This output argument is needed for post atomic fetch operations
158 // that calculate the result of the operation as return value of
159 // <binop>_and_fetch builtins. The `AtomicFetch` operation only updates the
160 // memory location and returns the old value.
161 if (emittedArgValue) {
162 *emittedArgValue = val;
163 assert(originalArgType != nullptr &&
164 "originalArgType must be provided when emittedArgValue is set");
165 *originalArgType = valueType;
166 }
167
168 auto rmwi = cir::AtomicFetchOp::create(
169 builder, cgf.getLoc(expr->getSourceRange()), destValue, val, kind,
170 ordering, cir::SyncScopeKind::System, false, /* is volatile */
171 true); /* fetch first */
172 return rmwi->getResult(0);
173}
174
176 cir::AtomicFetchKind atomicOpkind,
177 const CallExpr *e) {
178 return RValue::get(makeBinaryAtomicValue(cgf, atomicOpkind, e));
179}
180
181template <typename BinOp>
183 cir::AtomicFetchKind atomicOpkind,
184 const CallExpr *e, bool invert = false) {
185 mlir::Value emittedArgValue;
186 mlir::Type originalArgType;
187 clang::QualType typ = e->getType();
188 mlir::Value result = makeBinaryAtomicValue(
189 cgf, atomicOpkind, e, &originalArgType, &emittedArgValue);
191 result = BinOp::create(builder, result.getLoc(), result, emittedArgValue);
192
193 if (invert)
194 result = builder.createNot(result);
195
196 result = emitFromInt(cgf, result, typ, originalArgType);
197 return RValue::get(result);
198}
199
201 cir::SyncScopeKind syncScope) {
202 CIRGenBuilderTy &builder = cgf.getBuilder();
203 mlir::Location loc = cgf.getLoc(expr->getSourceRange());
204
205 auto emitAtomicOpCallBackFn = [&](cir::MemOrder memOrder) {
206 cir::AtomicFenceOp::create(
207 builder, loc, memOrder,
208 cir::SyncScopeKindAttr::get(&cgf.getMLIRContext(), syncScope));
209 };
210
211 cgf.emitAtomicExprWithMemOrder(expr->getArg(0), /*isStore*/ false,
212 /*isLoad*/ false, /*isFence*/ true,
213 emitAtomicOpCallBackFn);
214}
215
216namespace {
217struct WidthAndSignedness {
218 unsigned width;
219 bool isSigned;
220};
221} // namespace
222
223static WidthAndSignedness
225 const clang::QualType type) {
226 assert(type->isIntegerType() && "Given type is not an integer.");
227 unsigned width = type->isBooleanType() ? 1
228 : type->isBitIntType() ? astContext.getIntWidth(type)
229 : astContext.getTypeInfo(type).Width;
230 bool isSigned = type->isSignedIntegerType();
231 return {width, isSigned};
232}
233
234/// Create a checked overflow arithmetic op and return its result and overflow
235/// flag.
236template <typename OpTy>
237static std::pair<mlir::Value, mlir::Value>
238emitOverflowOp(CIRGenBuilderTy &builder, mlir::Location loc,
239 cir::IntType resultTy, mlir::Value lhs, mlir::Value rhs) {
240 auto op = OpTy::create(builder, loc, resultTy, lhs, rhs);
241 return {op.getResult(), op.getOverflow()};
242}
243
244// Given one or more integer types, this function produces an integer type that
245// encompasses them: any value in one of the given types could be expressed in
246// the encompassing type.
247static struct WidthAndSignedness
248EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> types) {
249 assert(types.size() > 0 && "Empty list of types.");
250
251 // If any of the given types is signed, we must return a signed type.
252 bool isSigned = llvm::any_of(types, [](const auto &t) { return t.isSigned; });
253
254 // The encompassing type must have a width greater than or equal to the width
255 // of the specified types. Additionally, if the encompassing type is signed,
256 // its width must be strictly greater than the width of any unsigned types
257 // given.
258 unsigned width = 0;
259 for (const auto &type : types)
260 width = std::max(width, type.width + (isSigned && !type.isSigned));
261
262 return {width, isSigned};
263}
264
265RValue CIRGenFunction::emitRotate(const CallExpr *e, bool isRotateLeft) {
266 mlir::Value input = emitScalarExpr(e->getArg(0));
267 mlir::Value amount = emitScalarExpr(e->getArg(1));
268
269 // TODO(cir): MSVC flavor bit rotate builtins use different types for input
270 // and amount, but cir.rotate requires them to have the same type. Cast amount
271 // to the type of input when necessary.
273
274 auto r = cir::RotateOp::create(builder, getLoc(e->getSourceRange()), input,
275 amount, isRotateLeft);
276 return RValue::get(r);
277}
278
279template <class Operation>
281 const CallExpr &e) {
282 mlir::Value arg = cgf.emitScalarExpr(e.getArg(0));
283
284 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(cgf, &e);
286
287 auto call =
288 Operation::create(cgf.getBuilder(), arg.getLoc(), arg.getType(), arg);
289 return RValue::get(call->getResult(0));
290}
291
292template <class Operation>
294 mlir::Value arg = cgf.emitScalarExpr(e.getArg(0));
295 auto call =
296 Operation::create(cgf.getBuilder(), arg.getLoc(), arg.getType(), arg);
297 return RValue::get(call->getResult(0));
298}
299
300template <typename Op>
302 const CallExpr &e) {
303 mlir::Type resultType = cgf.convertType(e.getType());
304 mlir::Value src = cgf.emitScalarExpr(e.getArg(0));
305
307
308 auto call = Op::create(cgf.getBuilder(), src.getLoc(), resultType, src);
309 return RValue::get(call->getResult(0));
310}
311
312template <typename Op>
314 mlir::Value arg0 = cgf.emitScalarExpr(e.getArg(0));
315 mlir::Value arg1 = cgf.emitScalarExpr(e.getArg(1));
316
317 mlir::Location loc = cgf.getLoc(e.getExprLoc());
318 mlir::Type ty = cgf.convertType(e.getType());
319 auto call = Op::create(cgf.getBuilder(), loc, ty, arg0, arg1);
320
321 return RValue::get(call->getResult(0));
322}
323
324template <typename Op>
326 const CallExpr &e) {
327 mlir::Value arg0 = cgf.emitScalarExpr(e.getArg(0));
328 mlir::Value arg1 = cgf.emitScalarExpr(e.getArg(1));
329
330 mlir::Location loc = cgf.getLoc(e.getExprLoc());
331 mlir::Type ty = cgf.convertType(e.getType());
332
334
335 auto call = Op::create(cgf.getBuilder(), loc, ty, arg0, arg1);
336 return call->getResult(0);
337}
338
340 unsigned builtinID) {
341
342 if (cgf.getContext().BuiltinInfo.isLibFunction(builtinID)) {
343 cgf.cgm.errorNYI(
344 e->getSourceRange(),
345 std::string("unimplemented X86 library function builtin call: ") +
346 cgf.getContext().BuiltinInfo.getName(builtinID));
347 } else {
348 cgf.cgm.errorNYI(e->getSourceRange(),
349 std::string("unimplemented X86 builtin call: ") +
350 cgf.getContext().BuiltinInfo.getName(builtinID));
351 }
352
353 return cgf.getUndefRValue(e->getType());
354}
355
357 unsigned builtinID) {
358 assert(builtinID == Builtin::BI__builtin_alloca ||
359 builtinID == Builtin::BI__builtin_alloca_uninitialized ||
360 builtinID == Builtin::BIalloca || builtinID == Builtin::BI_alloca);
361
362 // Get alloca size input
363 mlir::Value size = cgf.emitScalarExpr(e->getArg(0));
364
365 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
366 const TargetInfo &ti = cgf.getContext().getTargetInfo();
367 const CharUnits suitableAlignmentInBytes =
369
370 // Emit the alloca op with type `u8 *` to match the semantics of
371 // `llvm.alloca`. We later bitcast the type to `void *` to match the
372 // semantics of C/C++
373 // FIXME(cir): It may make sense to allow AllocaOp of type `u8` to return a
374 // pointer of type `void *`. This will require a change to the allocaOp
375 // verifier.
376 CIRGenBuilderTy &builder = cgf.getBuilder();
377 mlir::Value allocaAddr = builder.createAlloca(
378 cgf.getLoc(e->getSourceRange()), builder.getUInt8PtrTy(),
379 builder.getUInt8Ty(), "bi_alloca", suitableAlignmentInBytes, size);
380
381 // Initialize the allocated buffer if required.
382 if (builtinID != Builtin::BI__builtin_alloca_uninitialized) {
383 // Initialize the alloca with the given size and alignment according to
384 // the lang opts. Only the trivial non-initialization is supported for
385 // now.
386
387 switch (cgf.getLangOpts().getTrivialAutoVarInit()) {
389 // Nothing to initialize.
390 break;
393 cgf.cgm.errorNYI("trivial auto var init");
394 break;
395 }
396 }
397
398 // An alloca will always return a pointer to the alloca (stack) address
399 // space. This address space need not be the same as the AST / Language
400 // default (e.g. in C / C++ auto vars are in the generic address space). At
401 // the AST level this is handled within CreateTempAlloca et al., but for the
402 // builtin / dynamic alloca we have to handle it here.
403
407 cgf.cgm.errorNYI(e->getSourceRange(),
408 "Address Space Cast for builtin alloca");
409 }
410
411 // Bitcast the alloca to the expected type.
412 return RValue::get(builder.createBitcast(
413 allocaAddr, builder.getVoidPtrTy(cgf.getCIRAllocaAddressSpace())));
414}
415
417 unsigned builtinID) {
418 std::optional<bool> errnoOverriden;
419 // ErrnoOverriden is true if math-errno is overriden via the
420 // '#pragma float_control(precise, on)'. This pragma disables fast-math,
421 // which implies math-errno.
422 if (e->hasStoredFPFeatures()) {
424 if (op.hasMathErrnoOverride())
425 errnoOverriden = op.getMathErrnoOverride();
426 }
427 // True if 'attribute__((optnone))' is used. This attribute overrides
428 // fast-math which implies math-errno.
429 bool optNone =
430 cgf.curFuncDecl && cgf.curFuncDecl->hasAttr<OptimizeNoneAttr>();
431 bool isOptimizationEnabled = cgf.cgm.getCodeGenOpts().OptimizationLevel != 0;
432 bool generateFPMathIntrinsics =
434 builtinID, cgf.cgm.getTriple(), errnoOverriden,
435 cgf.getLangOpts().MathErrno, optNone, isOptimizationEnabled);
436 return generateFPMathIntrinsics;
437}
438
440 unsigned builtinID) {
442 switch (builtinID) {
443 case Builtin::BIacos:
444 case Builtin::BIacosf:
445 case Builtin::BIacosl:
446 case Builtin::BI__builtin_acos:
447 case Builtin::BI__builtin_acosf:
448 case Builtin::BI__builtin_acosf16:
449 case Builtin::BI__builtin_acosl:
450 case Builtin::BI__builtin_acosf128:
451 case Builtin::BI__builtin_elementwise_acos:
453 case Builtin::BIasin:
454 case Builtin::BIasinf:
455 case Builtin::BIasinl:
456 case Builtin::BI__builtin_asin:
457 case Builtin::BI__builtin_asinf:
458 case Builtin::BI__builtin_asinf16:
459 case Builtin::BI__builtin_asinl:
460 case Builtin::BI__builtin_asinf128:
461 case Builtin::BI__builtin_elementwise_asin:
463 case Builtin::BIatan:
464 case Builtin::BIatanf:
465 case Builtin::BIatanl:
466 case Builtin::BI__builtin_atan:
467 case Builtin::BI__builtin_atanf:
468 case Builtin::BI__builtin_atanf16:
469 case Builtin::BI__builtin_atanl:
470 case Builtin::BI__builtin_atanf128:
471 case Builtin::BI__builtin_elementwise_atan:
473 case Builtin::BIatan2:
474 case Builtin::BIatan2f:
475 case Builtin::BIatan2l:
476 case Builtin::BI__builtin_atan2:
477 case Builtin::BI__builtin_atan2f:
478 case Builtin::BI__builtin_atan2f16:
479 case Builtin::BI__builtin_atan2l:
480 case Builtin::BI__builtin_atan2f128:
481 case Builtin::BI__builtin_elementwise_atan2:
482 return RValue::get(
484 case Builtin::BIceil:
485 case Builtin::BIceilf:
486 case Builtin::BIceill:
487 case Builtin::BI__builtin_ceil:
488 case Builtin::BI__builtin_ceilf:
489 case Builtin::BI__builtin_ceilf16:
490 case Builtin::BI__builtin_ceill:
491 case Builtin::BI__builtin_ceilf128:
493 case Builtin::BI__builtin_elementwise_ceil:
494 return RValue::getIgnored();
495 case Builtin::BIcopysign:
496 case Builtin::BIcopysignf:
497 case Builtin::BIcopysignl:
498 case Builtin::BI__builtin_copysign:
499 case Builtin::BI__builtin_copysignf:
500 case Builtin::BI__builtin_copysignf16:
501 case Builtin::BI__builtin_copysignl:
502 case Builtin::BI__builtin_copysignf128:
504 case Builtin::BIcos:
505 case Builtin::BIcosf:
506 case Builtin::BIcosl:
507 case Builtin::BI__builtin_cos:
508 case Builtin::BI__builtin_cosf:
509 case Builtin::BI__builtin_cosf16:
510 case Builtin::BI__builtin_cosl:
511 case Builtin::BI__builtin_cosf128:
513 case Builtin::BI__builtin_elementwise_cos:
514 case Builtin::BIcosh:
515 case Builtin::BIcoshf:
516 case Builtin::BIcoshl:
517 case Builtin::BI__builtin_cosh:
518 case Builtin::BI__builtin_coshf:
519 case Builtin::BI__builtin_coshf16:
520 case Builtin::BI__builtin_coshl:
521 case Builtin::BI__builtin_coshf128:
522 case Builtin::BI__builtin_elementwise_cosh:
523 return RValue::getIgnored();
524 case Builtin::BIexp:
525 case Builtin::BIexpf:
526 case Builtin::BIexpl:
527 case Builtin::BI__builtin_exp:
528 case Builtin::BI__builtin_expf:
529 case Builtin::BI__builtin_expf16:
530 case Builtin::BI__builtin_expl:
531 case Builtin::BI__builtin_expf128:
533 case Builtin::BI__builtin_elementwise_exp:
534 return RValue::getIgnored();
535 case Builtin::BIexp2:
536 case Builtin::BIexp2f:
537 case Builtin::BIexp2l:
538 case Builtin::BI__builtin_exp2:
539 case Builtin::BI__builtin_exp2f:
540 case Builtin::BI__builtin_exp2f16:
541 case Builtin::BI__builtin_exp2l:
542 case Builtin::BI__builtin_exp2f128:
544 case Builtin::BI__builtin_elementwise_exp2:
545 case Builtin::BI__builtin_exp10:
546 case Builtin::BI__builtin_exp10f:
547 case Builtin::BI__builtin_exp10f16:
548 case Builtin::BI__builtin_exp10l:
549 case Builtin::BI__builtin_exp10f128:
550 case Builtin::BI__builtin_elementwise_exp10:
551 return RValue::getIgnored();
552 case Builtin::BIfabs:
553 case Builtin::BIfabsf:
554 case Builtin::BIfabsl:
555 case Builtin::BI__builtin_fabs:
556 case Builtin::BI__builtin_fabsf:
557 case Builtin::BI__builtin_fabsf16:
558 case Builtin::BI__builtin_fabsl:
559 case Builtin::BI__builtin_fabsf128:
561 case Builtin::BIfloor:
562 case Builtin::BIfloorf:
563 case Builtin::BIfloorl:
564 case Builtin::BI__builtin_floor:
565 case Builtin::BI__builtin_floorf:
566 case Builtin::BI__builtin_floorf16:
567 case Builtin::BI__builtin_floorl:
568 case Builtin::BI__builtin_floorf128:
570 case Builtin::BI__builtin_elementwise_floor:
571 case Builtin::BIfma:
572 case Builtin::BIfmaf:
573 case Builtin::BIfmal:
574 case Builtin::BI__builtin_fma:
575 case Builtin::BI__builtin_fmaf:
576 case Builtin::BI__builtin_fmaf16:
577 case Builtin::BI__builtin_fmal:
578 case Builtin::BI__builtin_fmaf128:
579 case Builtin::BI__builtin_elementwise_fma:
580 return RValue::getIgnored();
581 case Builtin::BIfmax:
582 case Builtin::BIfmaxf:
583 case Builtin::BIfmaxl:
584 case Builtin::BI__builtin_fmax:
585 case Builtin::BI__builtin_fmaxf:
586 case Builtin::BI__builtin_fmaxf16:
587 case Builtin::BI__builtin_fmaxl:
588 case Builtin::BI__builtin_fmaxf128:
589 return RValue::get(
591 case Builtin::BIfmin:
592 case Builtin::BIfminf:
593 case Builtin::BIfminl:
594 case Builtin::BI__builtin_fmin:
595 case Builtin::BI__builtin_fminf:
596 case Builtin::BI__builtin_fminf16:
597 case Builtin::BI__builtin_fminl:
598 case Builtin::BI__builtin_fminf128:
599 return RValue::get(
601 case Builtin::BIfmaximum_num:
602 case Builtin::BIfmaximum_numf:
603 case Builtin::BIfmaximum_numl:
604 case Builtin::BI__builtin_fmaximum_num:
605 case Builtin::BI__builtin_fmaximum_numf:
606 case Builtin::BI__builtin_fmaximum_numf16:
607 case Builtin::BI__builtin_fmaximum_numl:
608 case Builtin::BI__builtin_fmaximum_numf128:
609 case Builtin::BIfminimum_num:
610 case Builtin::BIfminimum_numf:
611 case Builtin::BIfminimum_numl:
612 case Builtin::BI__builtin_fminimum_num:
613 case Builtin::BI__builtin_fminimum_numf:
614 case Builtin::BI__builtin_fminimum_numf16:
615 case Builtin::BI__builtin_fminimum_numl:
616 case Builtin::BI__builtin_fminimum_numf128:
617 return RValue::getIgnored();
618 case Builtin::BIfmod:
619 case Builtin::BIfmodf:
620 case Builtin::BIfmodl:
621 case Builtin::BI__builtin_fmod:
622 case Builtin::BI__builtin_fmodf:
623 case Builtin::BI__builtin_fmodf16:
624 case Builtin::BI__builtin_fmodl:
625 case Builtin::BI__builtin_fmodf128:
626 case Builtin::BI__builtin_elementwise_fmod:
627 return RValue::get(
629 case Builtin::BIlog:
630 case Builtin::BIlogf:
631 case Builtin::BIlogl:
632 case Builtin::BI__builtin_log:
633 case Builtin::BI__builtin_logf:
634 case Builtin::BI__builtin_logf16:
635 case Builtin::BI__builtin_logl:
636 case Builtin::BI__builtin_logf128:
637 case Builtin::BI__builtin_elementwise_log:
639 case Builtin::BIlog10:
640 case Builtin::BIlog10f:
641 case Builtin::BIlog10l:
642 case Builtin::BI__builtin_log10:
643 case Builtin::BI__builtin_log10f:
644 case Builtin::BI__builtin_log10f16:
645 case Builtin::BI__builtin_log10l:
646 case Builtin::BI__builtin_log10f128:
647 case Builtin::BI__builtin_elementwise_log10:
649 case Builtin::BIlog2:
650 case Builtin::BIlog2f:
651 case Builtin::BIlog2l:
652 case Builtin::BI__builtin_log2:
653 case Builtin::BI__builtin_log2f:
654 case Builtin::BI__builtin_log2f16:
655 case Builtin::BI__builtin_log2l:
656 case Builtin::BI__builtin_log2f128:
657 case Builtin::BI__builtin_elementwise_log2:
659 case Builtin::BInearbyint:
660 case Builtin::BInearbyintf:
661 case Builtin::BInearbyintl:
662 case Builtin::BI__builtin_nearbyint:
663 case Builtin::BI__builtin_nearbyintf:
664 case Builtin::BI__builtin_nearbyintl:
665 case Builtin::BI__builtin_nearbyintf128:
666 case Builtin::BI__builtin_elementwise_nearbyint:
668 case Builtin::BIpow:
669 case Builtin::BIpowf:
670 case Builtin::BIpowl:
671 case Builtin::BI__builtin_pow:
672 case Builtin::BI__builtin_powf:
673 case Builtin::BI__builtin_powf16:
674 case Builtin::BI__builtin_powl:
675 case Builtin::BI__builtin_powf128:
676 return RValue::get(
678 case Builtin::BI__builtin_elementwise_pow:
679 return RValue::getIgnored();
680 case Builtin::BIrint:
681 case Builtin::BIrintf:
682 case Builtin::BIrintl:
683 case Builtin::BI__builtin_rint:
684 case Builtin::BI__builtin_rintf:
685 case Builtin::BI__builtin_rintf16:
686 case Builtin::BI__builtin_rintl:
687 case Builtin::BI__builtin_rintf128:
688 case Builtin::BI__builtin_elementwise_rint:
690 case Builtin::BIround:
691 case Builtin::BIroundf:
692 case Builtin::BIroundl:
693 case Builtin::BI__builtin_round:
694 case Builtin::BI__builtin_roundf:
695 case Builtin::BI__builtin_roundf16:
696 case Builtin::BI__builtin_roundl:
697 case Builtin::BI__builtin_roundf128:
698 case Builtin::BI__builtin_elementwise_round:
700 case Builtin::BIroundeven:
701 case Builtin::BIroundevenf:
702 case Builtin::BIroundevenl:
703 case Builtin::BI__builtin_roundeven:
704 case Builtin::BI__builtin_roundevenf:
705 case Builtin::BI__builtin_roundevenf16:
706 case Builtin::BI__builtin_roundevenl:
707 case Builtin::BI__builtin_roundevenf128:
708 case Builtin::BI__builtin_elementwise_roundeven:
710 case Builtin::BIsin:
711 case Builtin::BIsinf:
712 case Builtin::BIsinl:
713 case Builtin::BI__builtin_sin:
714 case Builtin::BI__builtin_sinf:
715 case Builtin::BI__builtin_sinf16:
716 case Builtin::BI__builtin_sinl:
717 case Builtin::BI__builtin_sinf128:
718 case Builtin::BI__builtin_elementwise_sin:
720 case Builtin::BIsinh:
721 case Builtin::BIsinhf:
722 case Builtin::BIsinhl:
723 case Builtin::BI__builtin_sinh:
724 case Builtin::BI__builtin_sinhf:
725 case Builtin::BI__builtin_sinhf16:
726 case Builtin::BI__builtin_sinhl:
727 case Builtin::BI__builtin_sinhf128:
728 case Builtin::BI__builtin_elementwise_sinh:
729 case Builtin::BI__builtin_sincospi:
730 case Builtin::BI__builtin_sincospif:
731 case Builtin::BI__builtin_sincospil:
732 case Builtin::BIsincos:
733 case Builtin::BIsincosf:
734 case Builtin::BIsincosl:
735 case Builtin::BI__builtin_sincos:
736 case Builtin::BI__builtin_sincosf:
737 case Builtin::BI__builtin_sincosf16:
738 case Builtin::BI__builtin_sincosl:
739 case Builtin::BI__builtin_sincosf128:
740 return RValue::getIgnored();
741 case Builtin::BIsqrt:
742 case Builtin::BIsqrtf:
743 case Builtin::BIsqrtl:
744 case Builtin::BI__builtin_sqrt:
745 case Builtin::BI__builtin_sqrtf:
746 case Builtin::BI__builtin_sqrtf16:
747 case Builtin::BI__builtin_sqrtl:
748 case Builtin::BI__builtin_sqrtf128:
749 case Builtin::BI__builtin_elementwise_sqrt:
751 case Builtin::BItan:
752 case Builtin::BItanf:
753 case Builtin::BItanl:
754 case Builtin::BI__builtin_tan:
755 case Builtin::BI__builtin_tanf:
756 case Builtin::BI__builtin_tanf16:
757 case Builtin::BI__builtin_tanl:
758 case Builtin::BI__builtin_tanf128:
759 case Builtin::BI__builtin_elementwise_tan:
761 case Builtin::BItanh:
762 case Builtin::BItanhf:
763 case Builtin::BItanhl:
764 case Builtin::BI__builtin_tanh:
765 case Builtin::BI__builtin_tanhf:
766 case Builtin::BI__builtin_tanhf16:
767 case Builtin::BI__builtin_tanhl:
768 case Builtin::BI__builtin_tanhf128:
769 case Builtin::BI__builtin_elementwise_tanh:
770 return RValue::getIgnored();
771 case Builtin::BItrunc:
772 case Builtin::BItruncf:
773 case Builtin::BItruncl:
774 case Builtin::BI__builtin_trunc:
775 case Builtin::BI__builtin_truncf:
776 case Builtin::BI__builtin_truncf16:
777 case Builtin::BI__builtin_truncl:
778 case Builtin::BI__builtin_truncf128:
779 case Builtin::BI__builtin_elementwise_trunc:
781 case Builtin::BIlround:
782 case Builtin::BIlroundf:
783 case Builtin::BIlroundl:
784 case Builtin::BI__builtin_lround:
785 case Builtin::BI__builtin_lroundf:
786 case Builtin::BI__builtin_lroundl:
787 case Builtin::BI__builtin_lroundf128:
789 case Builtin::BIllround:
790 case Builtin::BIllroundf:
791 case Builtin::BIllroundl:
792 case Builtin::BI__builtin_llround:
793 case Builtin::BI__builtin_llroundf:
794 case Builtin::BI__builtin_llroundl:
795 case Builtin::BI__builtin_llroundf128:
797 case Builtin::BIlrint:
798 case Builtin::BIlrintf:
799 case Builtin::BIlrintl:
800 case Builtin::BI__builtin_lrint:
801 case Builtin::BI__builtin_lrintf:
802 case Builtin::BI__builtin_lrintl:
803 case Builtin::BI__builtin_lrintf128:
805 case Builtin::BIllrint:
806 case Builtin::BIllrintf:
807 case Builtin::BIllrintl:
808 case Builtin::BI__builtin_llrint:
809 case Builtin::BI__builtin_llrintf:
810 case Builtin::BI__builtin_llrintl:
811 case Builtin::BI__builtin_llrintf128:
813 case Builtin::BI__builtin_ldexp:
814 case Builtin::BI__builtin_ldexpf:
815 case Builtin::BI__builtin_ldexpl:
816 case Builtin::BI__builtin_ldexpf16:
817 case Builtin::BI__builtin_ldexpf128:
818 case Builtin::BI__builtin_elementwise_ldexp:
819 default:
820 break;
821 }
822
823 return RValue::getIgnored();
824}
825
826// FIXME: Remove cgf parameter when all descriptor kinds are implemented
827static mlir::Type
830 mlir::MLIRContext *context) {
831 using namespace llvm::Intrinsic;
832
833 IITDescriptor descriptor = infos.front();
834 infos = infos.slice(1);
835
836 switch (descriptor.Kind) {
837 case IITDescriptor::Void:
838 return cir::VoidType::get(context);
839 // If the intrinsic expects unsigned integers, the signedness is corrected in
840 // correctIntegerSignedness()
841 case IITDescriptor::Integer:
842 return cir::IntType::get(context, descriptor.Integer_Width,
843 /*isSigned=*/true);
844 case IITDescriptor::Vector: {
845 mlir::Type elementType = decodeFixedType(cgf, infos, context);
846 unsigned numElements = descriptor.Vector_Width.getFixedValue();
847 return cir::VectorType::get(elementType, numElements);
848 }
849 case IITDescriptor::Pointer: {
850 mlir::Builder builder(context);
851 auto addrSpace = cir::TargetAddressSpaceAttr::get(
852 context, descriptor.Pointer_AddressSpace);
853 return cir::PointerType::get(cir::VoidType::get(context), addrSpace);
854 }
855 default:
856 cgf.cgm.errorNYI("Unimplemented intrinsic type descriptor");
857 return cir::VoidType::get(context);
858 }
859}
860
861/// Helper function to correct integer signedness for intrinsic arguments and
862/// return type. IIT always returns signed integers, but the actual intrinsic
863/// may expect unsigned integers based on the AST FunctionDecl parameter types.
864static mlir::Type correctIntegerSignedness(mlir::Type iitType, QualType astType,
865 mlir::MLIRContext *context) {
866 auto intTy = dyn_cast<cir::IntType>(iitType);
867 if (!intTy)
868 return iitType;
869
870 if (astType->isUnsignedIntegerType())
871 return cir::IntType::get(context, intTy.getWidth(), /*isSigned=*/false);
872
873 return iitType;
874}
875
876static mlir::Value getCorrectedPtr(mlir::Value argValue, mlir::Type expectedTy,
877 CIRGenBuilderTy &builder) {
878 auto ptrType = mlir::cast<cir::PointerType>(argValue.getType());
879
880 auto expectedPtrType = mlir::cast<cir::PointerType>(expectedTy);
881 assert(ptrType != expectedPtrType && "types should not match");
882
883 if (ptrType.getAddrSpace() != expectedPtrType.getAddrSpace()) {
885 "address space handling not yet implemented");
886 auto newPtrType = cir::PointerType::get(ptrType.getPointee(),
887 expectedPtrType.getAddrSpace());
888 return builder.createAddrSpaceCast(argValue, newPtrType);
889 }
890
891 return builder.createBitcast(argValue, expectedTy);
892}
893
894static cir::FuncType getIntrinsicType(CIRGenFunction &cgf,
895 mlir::MLIRContext *context,
896 llvm::Intrinsic::ID id) {
897 using namespace llvm::Intrinsic;
898
900 getIntrinsicInfoTableEntries(id, table);
901
902 ArrayRef<IITDescriptor> tableRef = table;
903 mlir::Type resultTy = decodeFixedType(cgf, tableRef, context);
904
906 bool isVarArg = false;
907 while (!tableRef.empty()) {
908 llvm::Intrinsic::IITDescriptor::IITDescriptorKind kind =
909 tableRef.front().Kind;
910 if (kind == IITDescriptor::VarArg) {
911 isVarArg = true;
912 break; // VarArg is last
913 }
914 argTypes.push_back(decodeFixedType(cgf, tableRef, context));
915 }
916
917 // CIR convention: no explicit void return type
918 if (isa<cir::VoidType>(resultTy))
919 return cir::FuncType::get(context, argTypes, /*optionalReturnType=*/nullptr,
920 isVarArg);
921
922 return cir::FuncType::get(context, argTypes, resultTy, isVarArg);
923}
924
926 const CallExpr *e,
928 mlir::Location loc = getLoc(e->getSourceRange());
929
930 // See if we can constant fold this builtin. If so, don't emit it at all.
931 // TODO: Extend this handling to all builtin calls that we can constant-fold.
932 // Do not constant-fold immediate (target-specific) builtins; their ASTs can
933 // trigger the constant evaluator in cases it cannot safely handle.
934 // Skip EvaluateAsRValue for those.
935 Expr::EvalResult result;
936 if (e->isPRValue() && !getContext().BuiltinInfo.isImmediate(builtinID) &&
937 e->EvaluateAsRValue(result, cgm.getASTContext()) &&
938 !result.hasSideEffects()) {
939 if (result.Val.isInt()) {
940 QualType type = e->getType();
941 if (type->isBooleanType())
942 return RValue::get(
943 builder.getBool(result.Val.getInt().getBoolValue(), loc));
944 return RValue::get(builder.getConstInt(loc, result.Val.getInt()));
945 }
946 if (result.Val.isFloat()) {
947 // Note: we are using result type of CallExpr to determine the type of
948 // the constant. Classic codegen uses the result value to determine the
949 // type. We feel it should be Ok to use expression type because it is
950 // hard to imagine a builtin function evaluates to a value that
951 // over/underflows its own defined type.
952 mlir::Type type = convertType(e->getType());
953 return RValue::get(builder.getConstFP(loc, type, result.Val.getFloat()));
954 }
955 }
956
957 const FunctionDecl *fd = gd.getDecl()->getAsFunction();
958
960
961 // If the builtin has been declared explicitly with an assembler label,
962 // disable the specialized emitting below. Ideally we should communicate the
963 // rename in IR, or at least avoid generating the intrinsic calls that are
964 // likely to get lowered to the renamed library functions.
965 unsigned builtinIDIfNoAsmLabel = fd->hasAttr<AsmLabelAttr>() ? 0 : builtinID;
966
967 bool generateFPMathIntrinsics =
968 shouldCIREmitFPMathIntrinsic(*this, e, builtinID);
969
970 if (generateFPMathIntrinsics) {
971 // Try to match the builtinID with a floating point math builtin.
972 RValue rv = tryEmitFPMathIntrinsic(*this, e, builtinIDIfNoAsmLabel);
973
974 // Return the result directly if a math intrinsic was generated.
975 if (!rv.isIgnored()) {
976 return rv;
977 }
978 }
979
981
982 switch (builtinIDIfNoAsmLabel) {
983 default:
984 break;
985
986 // C stdarg builtins.
987 case Builtin::BI__builtin_stdarg_start:
988 case Builtin::BI__builtin_va_start:
989 case Builtin::BI__va_start: {
990 mlir::Value vaList = builtinID == Builtin::BI__va_start
991 ? emitScalarExpr(e->getArg(0))
993 emitVAStart(vaList);
994 return {};
995 }
996
997 case Builtin::BI__builtin_va_end:
999 return {};
1000 case Builtin::BI__builtin_va_copy: {
1001 mlir::Value dstPtr = emitVAListRef(e->getArg(0)).getPointer();
1002 mlir::Value srcPtr = emitVAListRef(e->getArg(1)).getPointer();
1003 cir::VACopyOp::create(builder, dstPtr.getLoc(), dstPtr, srcPtr);
1004 return {};
1005 }
1006
1007 case Builtin::BIabs:
1008 case Builtin::BIlabs:
1009 case Builtin::BIllabs:
1010 case Builtin::BI__builtin_abs:
1011 case Builtin::BI__builtin_labs:
1012 case Builtin::BI__builtin_llabs: {
1013 bool sanitizeOverflow = sanOpts.has(SanitizerKind::SignedIntegerOverflow);
1014 mlir::Value arg = emitScalarExpr(e->getArg(0));
1015 mlir::Value result;
1016 switch (getLangOpts().getSignedOverflowBehavior()) {
1018 result = cir::AbsOp::create(builder, loc, arg.getType(), arg,
1019 /*minIsPoison=*/false);
1020 break;
1022 if (!sanitizeOverflow) {
1023 result = cir::AbsOp::create(builder, loc, arg.getType(), arg,
1024 /*minIsPoison=*/true);
1025 break;
1026 }
1027 [[fallthrough]];
1029 cgm.errorNYI(e->getSourceRange(), "abs with overflow handling");
1030 return RValue::get(nullptr);
1031 }
1032 return RValue::get(result);
1033 }
1034
1035 case Builtin::BI__assume:
1036 case Builtin::BI__builtin_assume: {
1037 if (e->getArg(0)->HasSideEffects(getContext()))
1038 return RValue::get(nullptr);
1039
1040 mlir::Value argValue = emitCheckedArgForAssume(e->getArg(0));
1041 cir::AssumeOp::create(builder, loc, argValue);
1042 return RValue::get(nullptr);
1043 }
1044
1045 case Builtin::BI__builtin_assume_separate_storage: {
1046 mlir::Value value0 = emitScalarExpr(e->getArg(0));
1047 mlir::Value value1 = emitScalarExpr(e->getArg(1));
1048 cir::AssumeSepStorageOp::create(builder, loc, value0, value1);
1049 return RValue::get(nullptr);
1050 }
1051
1052 case Builtin::BI__builtin_assume_aligned: {
1053 const Expr *ptrExpr = e->getArg(0);
1054 mlir::Value ptrValue = emitScalarExpr(ptrExpr);
1055 mlir::Value offsetValue =
1056 (e->getNumArgs() > 2) ? emitScalarExpr(e->getArg(2)) : nullptr;
1057
1058 std::optional<llvm::APSInt> alignment =
1060 assert(alignment.has_value() &&
1061 "the second argument to __builtin_assume_aligned must be an "
1062 "integral constant expression");
1063
1064 mlir::Value result =
1065 emitAlignmentAssumption(ptrValue, ptrExpr, ptrExpr->getExprLoc(),
1066 alignment->getSExtValue(), offsetValue);
1067 return RValue::get(result);
1068 }
1069
1070 case Builtin::BI__builtin_complex: {
1071 mlir::Value real = emitScalarExpr(e->getArg(0));
1072 mlir::Value imag = emitScalarExpr(e->getArg(1));
1073 mlir::Value complex = builder.createComplexCreate(loc, real, imag);
1074 return RValue::getComplex(complex);
1075 }
1076
1077 case Builtin::BI__builtin_creal:
1078 case Builtin::BI__builtin_crealf:
1079 case Builtin::BI__builtin_creall:
1080 case Builtin::BIcreal:
1081 case Builtin::BIcrealf:
1082 case Builtin::BIcreall: {
1083 mlir::Value complex = emitComplexExpr(e->getArg(0));
1084 mlir::Value real = builder.createComplexReal(loc, complex);
1085 return RValue::get(real);
1086 }
1087
1088 case Builtin::BI__builtin_cimag:
1089 case Builtin::BI__builtin_cimagf:
1090 case Builtin::BI__builtin_cimagl:
1091 case Builtin::BIcimag:
1092 case Builtin::BIcimagf:
1093 case Builtin::BIcimagl: {
1094 mlir::Value complex = emitComplexExpr(e->getArg(0));
1095 mlir::Value imag = builder.createComplexImag(loc, complex);
1096 return RValue::get(imag);
1097 }
1098
1099 case Builtin::BI__builtin_conj:
1100 case Builtin::BI__builtin_conjf:
1101 case Builtin::BI__builtin_conjl:
1102 case Builtin::BIconj:
1103 case Builtin::BIconjf:
1104 case Builtin::BIconjl: {
1105 mlir::Value complex = emitComplexExpr(e->getArg(0));
1106 mlir::Value conj = builder.createNot(complex);
1107 return RValue::getComplex(conj);
1108 }
1109
1110 case Builtin::BI__builtin_clrsb:
1111 case Builtin::BI__builtin_clrsbl:
1112 case Builtin::BI__builtin_clrsbll:
1113 return emitBuiltinBitOp<cir::BitClrsbOp>(*this, e);
1114
1115 case Builtin::BI__builtin_ctzs:
1116 case Builtin::BI__builtin_ctz:
1117 case Builtin::BI__builtin_ctzl:
1118 case Builtin::BI__builtin_ctzll:
1119 case Builtin::BI__builtin_ctzg:
1121 return emitBuiltinBitOp<cir::BitCtzOp>(*this, e, /*poisonZero=*/true);
1122
1123 case Builtin::BI__builtin_clzs:
1124 case Builtin::BI__builtin_clz:
1125 case Builtin::BI__builtin_clzl:
1126 case Builtin::BI__builtin_clzll:
1127 case Builtin::BI__builtin_clzg:
1129 return emitBuiltinBitOp<cir::BitClzOp>(*this, e, /*poisonZero=*/true);
1130
1131 case Builtin::BI__builtin_ffs:
1132 case Builtin::BI__builtin_ffsl:
1133 case Builtin::BI__builtin_ffsll:
1134 return emitBuiltinBitOp<cir::BitFfsOp>(*this, e);
1135
1136 case Builtin::BI__builtin_parity:
1137 case Builtin::BI__builtin_parityl:
1138 case Builtin::BI__builtin_parityll:
1139 return emitBuiltinBitOp<cir::BitParityOp>(*this, e);
1140
1141 case Builtin::BI__lzcnt16:
1142 case Builtin::BI__lzcnt:
1143 case Builtin::BI__lzcnt64:
1145 return emitBuiltinBitOp<cir::BitClzOp>(*this, e, /*poisonZero=*/false);
1146
1147 case Builtin::BI__popcnt16:
1148 case Builtin::BI__popcnt:
1149 case Builtin::BI__popcnt64:
1150 case Builtin::BI__builtin_popcount:
1151 case Builtin::BI__builtin_popcountl:
1152 case Builtin::BI__builtin_popcountll:
1153 case Builtin::BI__builtin_popcountg:
1154 return emitBuiltinBitOp<cir::BitPopcountOp>(*this, e);
1155
1156 // Always return the argument of __builtin_unpredictable. LLVM does not
1157 // have an intrinsic corresponding to this builtin. Metadata for this
1158 // builtin should be added directly to instructions such as branches or
1159 // switches that use it.
1160 case Builtin::BI__builtin_unpredictable: {
1161 return RValue::get(emitScalarExpr(e->getArg(0)));
1162 }
1163
1164 case Builtin::BI__builtin_expect:
1165 case Builtin::BI__builtin_expect_with_probability: {
1166 mlir::Value argValue = emitScalarExpr(e->getArg(0));
1167 if (cgm.getCodeGenOpts().OptimizationLevel == 0)
1168 return RValue::get(argValue);
1169
1170 mlir::Value expectedValue = emitScalarExpr(e->getArg(1));
1171
1172 mlir::FloatAttr probAttr;
1173 if (builtinIDIfNoAsmLabel == Builtin::BI__builtin_expect_with_probability) {
1174 llvm::APFloat probability(0.0);
1175 const Expr *probArg = e->getArg(2);
1176 [[maybe_unused]] bool evalSucceeded =
1177 probArg->EvaluateAsFloat(probability, cgm.getASTContext());
1178 assert(evalSucceeded &&
1179 "probability should be able to evaluate as float");
1180 bool loseInfo = false; // ignored
1181 probability.convert(llvm::APFloat::IEEEdouble(),
1182 llvm::RoundingMode::Dynamic, &loseInfo);
1183 probAttr = mlir::FloatAttr::get(mlir::Float64Type::get(&getMLIRContext()),
1184 probability);
1185 }
1186
1187 auto result = cir::ExpectOp::create(builder, loc, argValue.getType(),
1188 argValue, expectedValue, probAttr);
1189 return RValue::get(result);
1190 }
1191
1192 case Builtin::BI__builtin_bswap16:
1193 case Builtin::BI__builtin_bswap32:
1194 case Builtin::BI__builtin_bswap64:
1195 case Builtin::BI_byteswap_ushort:
1196 case Builtin::BI_byteswap_ulong:
1197 case Builtin::BI_byteswap_uint64: {
1198 mlir::Value arg = emitScalarExpr(e->getArg(0));
1199 return RValue::get(cir::ByteSwapOp::create(builder, loc, arg));
1200 }
1201
1202 case Builtin::BI__builtin_bitreverse8:
1203 case Builtin::BI__builtin_bitreverse16:
1204 case Builtin::BI__builtin_bitreverse32:
1205 case Builtin::BI__builtin_bitreverse64: {
1206 mlir::Value arg = emitScalarExpr(e->getArg(0));
1207 return RValue::get(cir::BitReverseOp::create(builder, loc, arg));
1208 }
1209
1210 case Builtin::BI__builtin_rotateleft8:
1211 case Builtin::BI__builtin_rotateleft16:
1212 case Builtin::BI__builtin_rotateleft32:
1213 case Builtin::BI__builtin_rotateleft64:
1214 return emitRotate(e, /*isRotateLeft=*/true);
1215
1216 case Builtin::BI__builtin_rotateright8:
1217 case Builtin::BI__builtin_rotateright16:
1218 case Builtin::BI__builtin_rotateright32:
1219 case Builtin::BI__builtin_rotateright64:
1220 return emitRotate(e, /*isRotateLeft=*/false);
1221
1222 case Builtin::BI__builtin_coro_id:
1223 case Builtin::BI__builtin_coro_promise:
1224 case Builtin::BI__builtin_coro_resume:
1225 case Builtin::BI__builtin_coro_noop:
1226 case Builtin::BI__builtin_coro_destroy:
1227 case Builtin::BI__builtin_coro_done:
1228 case Builtin::BI__builtin_coro_alloc:
1229 case Builtin::BI__builtin_coro_begin:
1230 case Builtin::BI__builtin_coro_end:
1231 case Builtin::BI__builtin_coro_suspend:
1232 case Builtin::BI__builtin_coro_align:
1233 cgm.errorNYI(e->getSourceRange(), "BI__builtin_coro_id like NYI");
1234 return getUndefRValue(e->getType());
1235
1236 case Builtin::BI__builtin_coro_frame: {
1237 return emitCoroutineFrame();
1238 }
1239 case Builtin::BI__builtin_coro_free:
1240 case Builtin::BI__builtin_coro_size: {
1241 GlobalDecl gd{fd};
1242 mlir::Type ty = cgm.getTypes().getFunctionType(
1243 cgm.getTypes().arrangeGlobalDeclaration(gd));
1244 const auto *nd = cast<NamedDecl>(gd.getDecl());
1245 cir::FuncOp fnOp =
1246 cgm.getOrCreateCIRFunction(nd->getName(), ty, gd, /*ForVTable=*/false);
1247 fnOp.setBuiltin(true);
1248 return emitCall(e->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), e,
1249 returnValue);
1250 }
1251
1252 case Builtin::BI__builtin_constant_p: {
1253 mlir::Type resultType = convertType(e->getType());
1254
1255 const Expr *arg = e->getArg(0);
1256 QualType argType = arg->getType();
1257 // FIXME: The allowance for Obj-C pointers and block pointers is historical
1258 // and likely a mistake.
1259 if (!argType->isIntegralOrEnumerationType() && !argType->isFloatingType() &&
1260 !argType->isObjCObjectPointerType() && !argType->isBlockPointerType()) {
1261 // Per the GCC documentation, only numeric constants are recognized after
1262 // inlining.
1263 return RValue::get(
1264 builder.getConstInt(getLoc(e->getSourceRange()),
1265 mlir::cast<cir::IntType>(resultType), 0));
1266 }
1267
1268 if (arg->HasSideEffects(getContext())) {
1269 // The argument is unevaluated, so be conservative if it might have
1270 // side-effects.
1271 return RValue::get(
1272 builder.getConstInt(getLoc(e->getSourceRange()),
1273 mlir::cast<cir::IntType>(resultType), 0));
1274 }
1275
1276 mlir::Value argValue = emitScalarExpr(arg);
1277 if (argType->isObjCObjectPointerType()) {
1278 cgm.errorNYI(e->getSourceRange(),
1279 "__builtin_constant_p: Obj-C object pointer");
1280 return {};
1281 }
1282 argValue = builder.createBitcast(argValue, convertType(argType));
1283
1284 mlir::Value result = cir::IsConstantOp::create(
1285 builder, getLoc(e->getSourceRange()), argValue);
1286 // IsConstantOp returns a bool, but __builtin_constant_p returns an int.
1287 result = builder.createBoolToInt(result, resultType);
1288 return RValue::get(result);
1289 }
1290 case Builtin::BI__builtin_dynamic_object_size:
1291 case Builtin::BI__builtin_object_size: {
1292 unsigned type =
1293 e->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
1294 auto resType = mlir::cast<cir::IntType>(convertType(e->getType()));
1295
1296 // We pass this builtin onto the optimizer so that it can figure out the
1297 // object size in more complex cases.
1298 bool isDynamic = builtinID == Builtin::BI__builtin_dynamic_object_size;
1299 return RValue::get(emitBuiltinObjectSize(e->getArg(0), type, resType,
1300 /*EmittedE=*/nullptr, isDynamic));
1301 }
1302
1303 case Builtin::BI__builtin_prefetch: {
1304 auto evaluateOperandAsInt = [&](const Expr *arg) {
1305 Expr::EvalResult res;
1306 [[maybe_unused]] bool evalSucceed =
1307 arg->EvaluateAsInt(res, cgm.getASTContext());
1308 assert(evalSucceed && "expression should be able to evaluate as int");
1309 return res.Val.getInt().getZExtValue();
1310 };
1311
1312 bool isWrite = false;
1313 if (e->getNumArgs() > 1)
1314 isWrite = evaluateOperandAsInt(e->getArg(1));
1315
1316 int locality = 3;
1317 if (e->getNumArgs() > 2)
1318 locality = evaluateOperandAsInt(e->getArg(2));
1319
1320 mlir::Value address = emitScalarExpr(e->getArg(0));
1321 cir::PrefetchOp::create(builder, loc, address, locality, isWrite);
1322 return RValue::get(nullptr);
1323 }
1324 case Builtin::BI__builtin_readcyclecounter:
1325 case Builtin::BI__builtin_readsteadycounter:
1326 return errorBuiltinNYI(*this, e, builtinID);
1327 case Builtin::BI__builtin___clear_cache: {
1328 mlir::Value begin =
1329 builder.createPtrBitcast(emitScalarExpr(e->getArg(0)), cgm.voidTy);
1330 mlir::Value end =
1331 builder.createPtrBitcast(emitScalarExpr(e->getArg(1)), cgm.voidTy);
1332 cir::ClearCacheOp::create(builder, getLoc(e->getSourceRange()), begin, end);
1333 return RValue::get(nullptr);
1334 }
1335 case Builtin::BI__builtin_trap:
1336 emitTrap(loc, /*createNewBlock=*/true);
1337 return RValue::getIgnored();
1338 case Builtin::BI__builtin_verbose_trap:
1339 case Builtin::BI__debugbreak:
1340 return errorBuiltinNYI(*this, e, builtinID);
1341 case Builtin::BI__builtin_unreachable:
1342 emitUnreachable(e->getExprLoc(), /*createNewBlock=*/true);
1343 return RValue::getIgnored();
1344 case Builtin::BI__builtin_powi:
1345 case Builtin::BI__builtin_powif:
1346 case Builtin::BI__builtin_powil:
1347 case Builtin::BI__builtin_frexpl:
1348 case Builtin::BI__builtin_frexp:
1349 case Builtin::BI__builtin_frexpf:
1350 case Builtin::BI__builtin_frexpf128:
1351 case Builtin::BI__builtin_frexpf16:
1352 case Builtin::BImodf:
1353 case Builtin::BImodff:
1354 case Builtin::BImodfl:
1355 case Builtin::BI__builtin_modf:
1356 case Builtin::BI__builtin_modff:
1357 case Builtin::BI__builtin_modfl:
1358 case Builtin::BI__builtin_isgreater:
1359 case Builtin::BI__builtin_isgreaterequal:
1360 case Builtin::BI__builtin_isless:
1361 case Builtin::BI__builtin_islessequal:
1362 case Builtin::BI__builtin_islessgreater:
1363 case Builtin::BI__builtin_isunordered:
1364 // From https://clang.llvm.org/docs/LanguageExtensions.html#builtin-isfpclass
1365 //
1366 // The `__builtin_isfpclass()` builtin is a generalization of functions
1367 // isnan, isinf, isfinite and some others defined by the C standard. It tests
1368 // if the floating-point value, specified by the first argument, falls into
1369 // any of data classes, specified by the second argument.
1370 case Builtin::BI__builtin_isnan: {
1371 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1372 mlir::Value v = emitScalarExpr(e->getArg(0));
1374 mlir::Location loc = getLoc(e->getBeginLoc());
1375 return RValue::get(builder.createBoolToInt(
1376 builder.createIsFPClass(loc, v, cir::FPClassTest::Nan),
1377 convertType(e->getType())));
1378 }
1379
1380 case Builtin::BI__builtin_issignaling: {
1381 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1382 mlir::Value v = emitScalarExpr(e->getArg(0));
1383 mlir::Location loc = getLoc(e->getBeginLoc());
1384 return RValue::get(builder.createBoolToInt(
1385 builder.createIsFPClass(loc, v, cir::FPClassTest::SignalingNaN),
1386 convertType(e->getType())));
1387 }
1388
1389 case Builtin::BI__builtin_isinf: {
1390 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1391 mlir::Value v = emitScalarExpr(e->getArg(0));
1393 mlir::Location loc = getLoc(e->getBeginLoc());
1394 return RValue::get(builder.createBoolToInt(
1395 builder.createIsFPClass(loc, v, cir::FPClassTest::Infinity),
1396 convertType(e->getType())));
1397 }
1398 case Builtin::BIfinite:
1399 case Builtin::BI__finite:
1400 case Builtin::BIfinitef:
1401 case Builtin::BI__finitef:
1402 case Builtin::BIfinitel:
1403 case Builtin::BI__finitel:
1404 case Builtin::BI__builtin_isfinite: {
1405 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1406 mlir::Value v = emitScalarExpr(e->getArg(0));
1408 mlir::Location loc = getLoc(e->getBeginLoc());
1409 return RValue::get(builder.createBoolToInt(
1410 builder.createIsFPClass(loc, v, cir::FPClassTest::Finite),
1411 convertType(e->getType())));
1412 }
1413
1414 case Builtin::BI__builtin_isnormal: {
1415 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1416 mlir::Value v = emitScalarExpr(e->getArg(0));
1417 mlir::Location loc = getLoc(e->getBeginLoc());
1418 return RValue::get(builder.createBoolToInt(
1419 builder.createIsFPClass(loc, v, cir::FPClassTest::Normal),
1420 convertType(e->getType())));
1421 }
1422
1423 case Builtin::BI__builtin_issubnormal: {
1424 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1425 mlir::Value v = emitScalarExpr(e->getArg(0));
1426 mlir::Location loc = getLoc(e->getBeginLoc());
1427 return RValue::get(builder.createBoolToInt(
1428 builder.createIsFPClass(loc, v, cir::FPClassTest::Subnormal),
1429 convertType(e->getType())));
1430 }
1431
1432 case Builtin::BI__builtin_iszero: {
1433 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1434 mlir::Value v = emitScalarExpr(e->getArg(0));
1435 mlir::Location loc = getLoc(e->getBeginLoc());
1436 return RValue::get(builder.createBoolToInt(
1437 builder.createIsFPClass(loc, v, cir::FPClassTest::Zero),
1438 convertType(e->getType())));
1439 }
1440 case Builtin::BI__builtin_isfpclass: {
1441 Expr::EvalResult result;
1442 if (!e->getArg(1)->EvaluateAsInt(result, cgm.getASTContext()))
1443 break;
1444
1445 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1446 mlir::Value v = emitScalarExpr(e->getArg(0));
1447 uint64_t test = result.Val.getInt().getLimitedValue();
1448 mlir::Location loc = getLoc(e->getBeginLoc());
1449 //
1450 return RValue::get(builder.createBoolToInt(
1451 builder.createIsFPClass(loc, v, cir::FPClassTest(test)),
1452 convertType(e->getType())));
1453 }
1454 case Builtin::BI__builtin_nondeterministic_value:
1455 return errorBuiltinNYI(*this, e, builtinID);
1456 case Builtin::BI__builtin_elementwise_abs: {
1457 mlir::Type cirTy = convertType(e->getArg(0)->getType());
1458 bool isIntTy = cir::isIntOrVectorOfIntType(cirTy);
1459 if (!isIntTy)
1460 return emitUnaryFPBuiltin<cir::FAbsOp>(*this, *e);
1461 mlir::Value arg = emitScalarExpr(e->getArg(0));
1462 mlir::Value result = cir::AbsOp::create(builder, getLoc(e->getExprLoc()),
1463 arg.getType(), arg, false);
1464 return RValue::get(result);
1465 }
1466 case Builtin::BI__builtin_elementwise_acos:
1468 case Builtin::BI__builtin_elementwise_asin:
1470 case Builtin::BI__builtin_elementwise_atan:
1472 case Builtin::BI__builtin_elementwise_atan2:
1473 return RValue::get(
1475 case Builtin::BI__builtin_elementwise_exp:
1477 case Builtin::BI__builtin_elementwise_exp2:
1479 case Builtin::BI__builtin_elementwise_log:
1481 case Builtin::BI__builtin_elementwise_log2:
1483 case Builtin::BI__builtin_elementwise_log10:
1485 case Builtin::BI__builtin_elementwise_cos:
1487 case Builtin::BI__builtin_elementwise_floor:
1489 case Builtin::BI__builtin_elementwise_round:
1491 case Builtin::BI__builtin_elementwise_rint:
1493 case Builtin::BI__builtin_elementwise_nearbyint:
1495 case Builtin::BI__builtin_elementwise_sin:
1497 case Builtin::BI__builtin_elementwise_sqrt:
1499 case Builtin::BI__builtin_elementwise_tan:
1501 case Builtin::BI__builtin_elementwise_trunc:
1503 case Builtin::BI__builtin_elementwise_fmod:
1504 return RValue::get(
1506 case Builtin::BI__builtin_elementwise_ceil:
1507 case Builtin::BI__builtin_elementwise_exp10:
1508 case Builtin::BI__builtin_elementwise_ldexp:
1509 case Builtin::BI__builtin_elementwise_pow:
1510 case Builtin::BI__builtin_elementwise_bitreverse:
1511 case Builtin::BI__builtin_elementwise_cosh:
1512 case Builtin::BI__builtin_elementwise_popcount:
1513 case Builtin::BI__builtin_elementwise_roundeven:
1514 case Builtin::BI__builtin_elementwise_sinh:
1515 case Builtin::BI__builtin_elementwise_tanh:
1516 case Builtin::BI__builtin_elementwise_canonicalize:
1517 case Builtin::BI__builtin_elementwise_copysign:
1518 case Builtin::BI__builtin_elementwise_fma:
1519 return errorBuiltinNYI(*this, e, builtinID);
1520 case Builtin::BI__builtin_elementwise_fshl: {
1521 mlir::Location loc = getLoc(e->getExprLoc());
1522 mlir::Value a = emitScalarExpr(e->getArg(0));
1523 mlir::Value b = emitScalarExpr(e->getArg(1));
1524 mlir::Value c = emitScalarExpr(e->getArg(2));
1525 return RValue::get(builder.emitIntrinsicCallOp(loc, "fshl", a.getType(),
1526 mlir::ValueRange{a, b, c}));
1527 }
1528 case Builtin::BI__builtin_elementwise_fshr: {
1529 mlir::Location loc = getLoc(e->getExprLoc());
1530 mlir::Value a = emitScalarExpr(e->getArg(0));
1531 mlir::Value b = emitScalarExpr(e->getArg(1));
1532 mlir::Value c = emitScalarExpr(e->getArg(2));
1533 return RValue::get(builder.emitIntrinsicCallOp(loc, "fshr", a.getType(),
1534 mlir::ValueRange{a, b, c}));
1535 }
1536 case Builtin::BI__builtin_elementwise_add_sat:
1537 case Builtin::BI__builtin_elementwise_sub_sat:
1538 case Builtin::BI__builtin_elementwise_max:
1539 case Builtin::BI__builtin_elementwise_min:
1540 case Builtin::BI__builtin_elementwise_maxnum:
1541 case Builtin::BI__builtin_elementwise_minnum:
1542 case Builtin::BI__builtin_elementwise_maximum:
1543 case Builtin::BI__builtin_elementwise_minimum:
1544 case Builtin::BI__builtin_elementwise_maximumnum:
1545 case Builtin::BI__builtin_elementwise_minimumnum:
1546 case Builtin::BI__builtin_reduce_max:
1547 case Builtin::BI__builtin_reduce_min:
1548 case Builtin::BI__builtin_reduce_add:
1549 case Builtin::BI__builtin_reduce_mul:
1550 case Builtin::BI__builtin_reduce_xor:
1551 case Builtin::BI__builtin_reduce_or:
1552 case Builtin::BI__builtin_reduce_and:
1553 case Builtin::BI__builtin_reduce_assoc_fadd:
1554 case Builtin::BI__builtin_reduce_in_order_fadd:
1555 case Builtin::BI__builtin_reduce_maximum:
1556 case Builtin::BI__builtin_reduce_minimum:
1557 case Builtin::BI__builtin_matrix_transpose:
1558 case Builtin::BI__builtin_matrix_column_major_load:
1559 case Builtin::BI__builtin_matrix_column_major_store:
1560 case Builtin::BI__builtin_masked_load:
1561 case Builtin::BI__builtin_masked_expand_load:
1562 case Builtin::BI__builtin_masked_gather:
1563 case Builtin::BI__builtin_masked_store:
1564 case Builtin::BI__builtin_masked_compress_store:
1565 case Builtin::BI__builtin_masked_scatter:
1566 return errorBuiltinNYI(*this, e, builtinID);
1567 case Builtin::BI__builtin_isinf_sign: {
1568 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1569 mlir::Location loc = getLoc(e->getBeginLoc());
1570 mlir::Value arg = emitScalarExpr(e->getArg(0));
1571 mlir::Value isInf =
1572 builder.createIsFPClass(loc, arg, cir::FPClassTest::Infinity);
1573 mlir::Value isNeg = emitSignBit(loc, *this, arg);
1574 mlir::Type intTy = convertType(e->getType());
1575 cir::ConstantOp zero = builder.getNullValue(intTy, loc);
1576 cir::ConstantOp one = builder.getConstant(loc, cir::IntAttr::get(intTy, 1));
1577 cir::ConstantOp negativeOne =
1578 builder.getConstant(loc, cir::IntAttr::get(intTy, -1));
1579 mlir::Value signResult = builder.createSelect(loc, isNeg, negativeOne, one);
1580 mlir::Value result = builder.createSelect(loc, isInf, signResult, zero);
1581 return RValue::get(result);
1582 }
1583 case Builtin::BI__builtin_flt_rounds:
1584 case Builtin::BI__builtin_set_flt_rounds:
1585 case Builtin::BI__builtin_fpclassify:
1586 return errorBuiltinNYI(*this, e, builtinID);
1587 case Builtin::BIalloca:
1588 case Builtin::BI_alloca:
1589 case Builtin::BI__builtin_alloca_uninitialized:
1590 case Builtin::BI__builtin_alloca:
1591 return emitBuiltinAlloca(*this, e, builtinID);
1592 case Builtin::BI__builtin_alloca_with_align_uninitialized:
1593 case Builtin::BI__builtin_alloca_with_align:
1594 case Builtin::BI__builtin_infer_alloc_token:
1595 return errorBuiltinNYI(*this, e, builtinID);
1596 case Builtin::BIbzero:
1597 case Builtin::BI__builtin_bzero: {
1598 mlir::Location loc = getLoc(e->getSourceRange());
1599 Address destPtr = emitPointerWithAlignment(e->getArg(0));
1600 Address destPtrCast = destPtr.withElementType(builder, cgm.voidTy);
1601 mlir::Value size = emitScalarExpr(e->getArg(1));
1602 mlir::Value zero = builder.getNullValue(builder.getUInt8Ty(), loc);
1604 builder.createMemSet(loc, destPtrCast, zero, size);
1606 return RValue::getIgnored();
1607 }
1608 case Builtin::BIbcopy:
1609 case Builtin::BI__builtin_bcopy: {
1612 mlir::Value sizeVal = emitScalarExpr(e->getArg(2));
1614 e->getArg(0)->getExprLoc(), fd, 0);
1616 e->getArg(1)->getExprLoc(), fd, 0);
1617 builder.createMemMove(getLoc(e->getSourceRange()), dest.getPointer(),
1618 src.getPointer(), sizeVal);
1619 return RValue::get(nullptr);
1620 }
1621 case Builtin::BI__builtin_char_memchr:
1622 case Builtin::BI__builtin_memchr: {
1623 Address srcPtr = emitPointerWithAlignment(e->getArg(0));
1624 mlir::Value src =
1625 builder.createBitcast(srcPtr.getPointer(), builder.getVoidPtrTy());
1626 mlir::Value pattern = emitScalarExpr(e->getArg(1));
1627 mlir::Value len = emitScalarExpr(e->getArg(2));
1628 mlir::Value res = cir::MemChrOp::create(builder, getLoc(e->getExprLoc()),
1629 src, pattern, len);
1630 return RValue::get(res);
1631 }
1632 case Builtin::BImemcpy:
1633 case Builtin::BI__builtin_memcpy:
1634 case Builtin::BImempcpy:
1635 case Builtin::BI__builtin_mempcpy:
1636 case Builtin::BI__builtin_memcpy_inline:
1637 case Builtin::BI__builtin___memcpy_chk:
1638 case Builtin::BI__builtin_objc_memmove_collectable:
1639 case Builtin::BI__builtin___memmove_chk:
1640 case Builtin::BI__builtin_trivially_relocate:
1641 case Builtin::BImemmove:
1642 case Builtin::BI__builtin_memmove:
1643 case Builtin::BImemset:
1644 case Builtin::BI__builtin_memset:
1645 case Builtin::BI__builtin_memset_inline:
1646 case Builtin::BI__builtin___memset_chk:
1647 case Builtin::BI__builtin_wmemchr:
1648 case Builtin::BI__builtin_wmemcmp:
1649 break; // Handled as library calls below.
1650 case Builtin::BI__builtin_dwarf_cfa:
1651 return errorBuiltinNYI(*this, e, builtinID);
1652 case Builtin::BI__builtin_return_address: {
1653 llvm::APSInt level = e->getArg(0)->EvaluateKnownConstInt(getContext());
1654 return RValue::get(cir::ReturnAddrOp::create(
1655 builder, getLoc(e->getExprLoc()),
1656 builder.getConstAPInt(loc, builder.getUInt32Ty(), level)));
1657 }
1658 case Builtin::BI_ReturnAddress: {
1659 return RValue::get(cir::ReturnAddrOp::create(
1660 builder, getLoc(e->getExprLoc()),
1661 builder.getConstInt(loc, builder.getUInt32Ty(), 0)));
1662 }
1663 case Builtin::BI__builtin_frame_address: {
1664 llvm::APSInt level = e->getArg(0)->EvaluateKnownConstInt(getContext());
1665 mlir::Location loc = getLoc(e->getExprLoc());
1666 mlir::Value addr = cir::FrameAddrOp::create(
1667 builder, loc, allocaInt8PtrTy,
1668 builder.getConstAPInt(loc, builder.getUInt32Ty(), level));
1669 return RValue::get(
1670 builder.createCast(loc, cir::CastKind::bitcast, addr, voidPtrTy));
1671 }
1672 case Builtin::BI__builtin_extract_return_addr:
1673 case Builtin::BI__builtin_frob_return_addr:
1674 case Builtin::BI__builtin_dwarf_sp_column:
1675 case Builtin::BI__builtin_init_dwarf_reg_size_table:
1676 case Builtin::BI__builtin_eh_return:
1677 case Builtin::BI__builtin_unwind_init:
1678 case Builtin::BI__builtin_extend_pointer:
1679 return errorBuiltinNYI(*this, e, builtinID);
1680 case Builtin::BI__builtin_setjmp: {
1682 mlir::Location loc = getLoc(e->getExprLoc());
1683
1684 cir::PointerType voidPtrTy = builder.getVoidPtrTy();
1685 cir::PointerType ppTy = builder.getPointerTo(voidPtrTy);
1686 Address castBuf = buf.withElementType(builder, voidPtrTy);
1687
1689 if (getTarget().getTriple().isSystemZ()) {
1690 cgm.errorNYI(e->getExprLoc(), "setjmp on SystemZ");
1691 return {};
1692 }
1693
1694 mlir::Value frameAddress =
1695 cir::FrameAddrOp::create(builder, loc, voidPtrTy,
1696 mlir::ValueRange{builder.getUInt32(0, loc)})
1697 .getResult();
1698
1699 builder.createStore(loc, frameAddress, castBuf);
1700
1701 mlir::Value stacksave =
1702 cir::StackSaveOp::create(builder, loc, voidPtrTy).getResult();
1703 cir::PtrStrideOp stackSaveSlot = cir::PtrStrideOp::create(
1704 builder, loc, ppTy, castBuf.getPointer(), builder.getSInt32(2, loc));
1705 llvm::TypeSize voidPtrTySize =
1706 cgm.getDataLayout().getTypeAllocSize(voidPtrTy);
1707 CharUnits slotAlign = castBuf.getAlignment().alignmentAtOffset(
1708 CharUnits().fromQuantity(2 * voidPtrTySize));
1709 Address slotAddr = Address(stackSaveSlot, voidPtrTy, slotAlign);
1710 builder.createStore(loc, stacksave, slotAddr);
1711 auto op = cir::EhSetjmpOp::create(builder, loc, castBuf.getPointer());
1712 return RValue::get(op);
1713 }
1714 case Builtin::BI__builtin_longjmp: {
1715 mlir::Value buf = emitScalarExpr(e->getArg(0));
1716 mlir::Location loc = getLoc(e->getExprLoc());
1717
1718 cir::EhLongjmpOp::create(builder, loc, buf);
1719 cir::UnreachableOp::create(builder, loc);
1720 return RValue::get(nullptr);
1721 }
1722 case Builtin::BI__builtin_launder:
1723 case Builtin::BI__sync_fetch_and_add:
1724 case Builtin::BI__sync_fetch_and_sub:
1725 case Builtin::BI__sync_fetch_and_or:
1726 case Builtin::BI__sync_fetch_and_and:
1727 case Builtin::BI__sync_fetch_and_xor:
1728 case Builtin::BI__sync_fetch_and_nand:
1729 case Builtin::BI__sync_add_and_fetch:
1730 case Builtin::BI__sync_sub_and_fetch:
1731 case Builtin::BI__sync_and_and_fetch:
1732 case Builtin::BI__sync_or_and_fetch:
1733 case Builtin::BI__sync_xor_and_fetch:
1734 case Builtin::BI__sync_nand_and_fetch:
1735 case Builtin::BI__sync_val_compare_and_swap:
1736 case Builtin::BI__sync_bool_compare_and_swap:
1737 case Builtin::BI__sync_lock_test_and_set:
1738 case Builtin::BI__sync_lock_release:
1739 case Builtin::BI__sync_swap:
1740 return errorBuiltinNYI(*this, e, builtinID);
1741 case Builtin::BI__sync_fetch_and_add_1:
1742 case Builtin::BI__sync_fetch_and_add_2:
1743 case Builtin::BI__sync_fetch_and_add_4:
1744 case Builtin::BI__sync_fetch_and_add_8:
1745 case Builtin::BI__sync_fetch_and_add_16:
1746 return emitBinaryAtomic(*this, cir::AtomicFetchKind::Add, e);
1747 case Builtin::BI__sync_fetch_and_sub_1:
1748 case Builtin::BI__sync_fetch_and_sub_2:
1749 case Builtin::BI__sync_fetch_and_sub_4:
1750 case Builtin::BI__sync_fetch_and_sub_8:
1751 case Builtin::BI__sync_fetch_and_sub_16:
1752 return emitBinaryAtomic(*this, cir::AtomicFetchKind::Sub, e);
1753 case Builtin::BI__sync_fetch_and_or_1:
1754 case Builtin::BI__sync_fetch_and_or_2:
1755 case Builtin::BI__sync_fetch_and_or_4:
1756 case Builtin::BI__sync_fetch_and_or_8:
1757 case Builtin::BI__sync_fetch_and_or_16:
1758 return emitBinaryAtomic(*this, cir::AtomicFetchKind::Or, e);
1759 case Builtin::BI__sync_fetch_and_and_1:
1760 case Builtin::BI__sync_fetch_and_and_2:
1761 case Builtin::BI__sync_fetch_and_and_4:
1762 case Builtin::BI__sync_fetch_and_and_8:
1763 case Builtin::BI__sync_fetch_and_and_16:
1764 return emitBinaryAtomic(*this, cir::AtomicFetchKind::And, e);
1765 case Builtin::BI__sync_fetch_and_xor_1:
1766 case Builtin::BI__sync_fetch_and_xor_2:
1767 case Builtin::BI__sync_fetch_and_xor_4:
1768 case Builtin::BI__sync_fetch_and_xor_8:
1769 case Builtin::BI__sync_fetch_and_xor_16:
1770 return emitBinaryAtomic(*this, cir::AtomicFetchKind::Xor, e);
1771 case Builtin::BI__sync_fetch_and_nand_1:
1772 case Builtin::BI__sync_fetch_and_nand_2:
1773 case Builtin::BI__sync_fetch_and_nand_4:
1774 case Builtin::BI__sync_fetch_and_nand_8:
1775 case Builtin::BI__sync_fetch_and_nand_16:
1776 return emitBinaryAtomic(*this, cir::AtomicFetchKind::Nand, e);
1777 case Builtin::BI__sync_fetch_and_min:
1778 case Builtin::BI__sync_fetch_and_max:
1779 case Builtin::BI__sync_fetch_and_umin:
1780 case Builtin::BI__sync_fetch_and_umax:
1781 return errorBuiltinNYI(*this, e, builtinID);
1782 return getUndefRValue(e->getType());
1783 case Builtin::BI__sync_add_and_fetch_1:
1784 case Builtin::BI__sync_add_and_fetch_2:
1785 case Builtin::BI__sync_add_and_fetch_4:
1786 case Builtin::BI__sync_add_and_fetch_8:
1787 case Builtin::BI__sync_add_and_fetch_16:
1788 return emitBinaryAtomicPost<cir::AddOp>(*this, cir::AtomicFetchKind::Add,
1789 e);
1790 case Builtin::BI__sync_sub_and_fetch_1:
1791 case Builtin::BI__sync_sub_and_fetch_2:
1792 case Builtin::BI__sync_sub_and_fetch_4:
1793 case Builtin::BI__sync_sub_and_fetch_8:
1794 case Builtin::BI__sync_sub_and_fetch_16:
1795 return emitBinaryAtomicPost<cir::SubOp>(*this, cir::AtomicFetchKind::Sub,
1796 e);
1797 case Builtin::BI__sync_and_and_fetch_1:
1798 case Builtin::BI__sync_and_and_fetch_2:
1799 case Builtin::BI__sync_and_and_fetch_4:
1800 case Builtin::BI__sync_and_and_fetch_8:
1801 case Builtin::BI__sync_and_and_fetch_16:
1802 return emitBinaryAtomicPost<cir::AndOp>(*this, cir::AtomicFetchKind::And,
1803 e);
1804 case Builtin::BI__sync_or_and_fetch_1:
1805 case Builtin::BI__sync_or_and_fetch_2:
1806 case Builtin::BI__sync_or_and_fetch_4:
1807 case Builtin::BI__sync_or_and_fetch_8:
1808 case Builtin::BI__sync_or_and_fetch_16:
1809 return emitBinaryAtomicPost<cir::OrOp>(*this, cir::AtomicFetchKind::Or, e);
1810 case Builtin::BI__sync_xor_and_fetch_1:
1811 case Builtin::BI__sync_xor_and_fetch_2:
1812 case Builtin::BI__sync_xor_and_fetch_4:
1813 case Builtin::BI__sync_xor_and_fetch_8:
1814 case Builtin::BI__sync_xor_and_fetch_16:
1815 return emitBinaryAtomicPost<cir::XorOp>(*this, cir::AtomicFetchKind::Xor,
1816 e);
1817 case Builtin::BI__sync_nand_and_fetch_1:
1818 case Builtin::BI__sync_nand_and_fetch_2:
1819 case Builtin::BI__sync_nand_and_fetch_4:
1820 case Builtin::BI__sync_nand_and_fetch_8:
1821 case Builtin::BI__sync_nand_and_fetch_16:
1822 return emitBinaryAtomicPost<cir::AndOp>(*this, cir::AtomicFetchKind::Nand,
1823 e, /*invert=*/true);
1824 case Builtin::BI__sync_val_compare_and_swap_1:
1825 case Builtin::BI__sync_val_compare_and_swap_2:
1826 case Builtin::BI__sync_val_compare_and_swap_4:
1827 case Builtin::BI__sync_val_compare_and_swap_8:
1828 case Builtin::BI__sync_val_compare_and_swap_16:
1829 case Builtin::BI__sync_bool_compare_and_swap_1:
1830 case Builtin::BI__sync_bool_compare_and_swap_2:
1831 case Builtin::BI__sync_bool_compare_and_swap_4:
1832 case Builtin::BI__sync_bool_compare_and_swap_8:
1833 case Builtin::BI__sync_bool_compare_and_swap_16:
1834 case Builtin::BI__sync_swap_1:
1835 case Builtin::BI__sync_swap_2:
1836 case Builtin::BI__sync_swap_4:
1837 case Builtin::BI__sync_swap_8:
1838 case Builtin::BI__sync_swap_16:
1839 case Builtin::BI__sync_lock_test_and_set_1:
1840 case Builtin::BI__sync_lock_test_and_set_2:
1841 case Builtin::BI__sync_lock_test_and_set_4:
1842 case Builtin::BI__sync_lock_test_and_set_8:
1843 case Builtin::BI__sync_lock_test_and_set_16:
1844 case Builtin::BI__sync_lock_release_1:
1845 case Builtin::BI__sync_lock_release_2:
1846 case Builtin::BI__sync_lock_release_4:
1847 case Builtin::BI__sync_lock_release_8:
1848 case Builtin::BI__sync_lock_release_16:
1849 case Builtin::BI__sync_synchronize:
1850 case Builtin::BI__builtin_nontemporal_load:
1851 case Builtin::BI__builtin_nontemporal_store:
1852 case Builtin::BI__c11_atomic_is_lock_free:
1853 case Builtin::BI__atomic_is_lock_free:
1854 case Builtin::BI__atomic_test_and_set:
1855 case Builtin::BI__atomic_clear:
1856 return errorBuiltinNYI(*this, e, builtinID);
1857 case Builtin::BI__atomic_thread_fence:
1858 case Builtin::BI__c11_atomic_thread_fence: {
1859 emitAtomicFenceOp(*this, e, cir::SyncScopeKind::System);
1860 return RValue::get(nullptr);
1861 }
1862 case Builtin::BI__atomic_signal_fence:
1863 case Builtin::BI__c11_atomic_signal_fence: {
1864 emitAtomicFenceOp(*this, e, cir::SyncScopeKind::SingleThread);
1865 return RValue::get(nullptr);
1866 }
1867 case Builtin::BI__scoped_atomic_thread_fence:
1868 case Builtin::BI__builtin_signbit:
1869 case Builtin::BI__builtin_signbitf:
1870 case Builtin::BI__builtin_signbitl:
1871 case Builtin::BI__warn_memset_zero_len:
1872 case Builtin::BI__annotation:
1873 case Builtin::BI__builtin_annotation:
1874 case Builtin::BI__builtin_addcb:
1875 case Builtin::BI__builtin_addcs:
1876 case Builtin::BI__builtin_addc:
1877 case Builtin::BI__builtin_addcl:
1878 case Builtin::BI__builtin_addcll:
1879 case Builtin::BI__builtin_subcb:
1880 case Builtin::BI__builtin_subcs:
1881 case Builtin::BI__builtin_subc:
1882 case Builtin::BI__builtin_subcl:
1883 case Builtin::BI__builtin_subcll:
1884 return errorBuiltinNYI(*this, e, builtinID);
1885
1886 case Builtin::BI__builtin_add_overflow:
1887 case Builtin::BI__builtin_sub_overflow:
1888 case Builtin::BI__builtin_mul_overflow: {
1889 const clang::Expr *leftArg = e->getArg(0);
1890 const clang::Expr *rightArg = e->getArg(1);
1891 const clang::Expr *resultArg = e->getArg(2);
1892
1893 clang::QualType resultQTy =
1894 resultArg->getType()->castAs<clang::PointerType>()->getPointeeType();
1895
1896 WidthAndSignedness leftInfo =
1897 getIntegerWidthAndSignedness(cgm.getASTContext(), leftArg->getType());
1898 WidthAndSignedness rightInfo =
1899 getIntegerWidthAndSignedness(cgm.getASTContext(), rightArg->getType());
1900 WidthAndSignedness resultInfo =
1901 getIntegerWidthAndSignedness(cgm.getASTContext(), resultQTy);
1902
1903 // Note we compute the encompassing type with the consideration to the
1904 // result type, so later in LLVM lowering we don't get redundant integral
1905 // extension casts.
1906 WidthAndSignedness encompassingInfo =
1907 EncompassingIntegerType({leftInfo, rightInfo, resultInfo});
1908
1909 auto encompassingCIRTy = cir::IntType::get(
1910 &getMLIRContext(), encompassingInfo.width, encompassingInfo.isSigned);
1911 auto resultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(resultQTy));
1912
1913 mlir::Value x = emitScalarExpr(leftArg);
1914 mlir::Value y = emitScalarExpr(rightArg);
1915 Address resultPtr = emitPointerWithAlignment(resultArg);
1916
1917 // Extend each operand to the encompassing type, if necessary.
1918 if (x.getType() != encompassingCIRTy)
1919 x = builder.createCast(cir::CastKind::integral, x, encompassingCIRTy);
1920 if (y.getType() != encompassingCIRTy)
1921 y = builder.createCast(cir::CastKind::integral, y, encompassingCIRTy);
1922
1923 // Perform the operation on the extended values.
1924 mlir::Location loc = getLoc(e->getSourceRange());
1925 mlir::Value result, overflow;
1926 switch (builtinID) {
1927 default:
1928 llvm_unreachable("Unknown overflow builtin id.");
1929 case Builtin::BI__builtin_add_overflow:
1930 std::tie(result, overflow) =
1931 emitOverflowOp<cir::AddOverflowOp>(builder, loc, resultCIRTy, x, y);
1932 break;
1933 case Builtin::BI__builtin_sub_overflow:
1934 std::tie(result, overflow) =
1935 emitOverflowOp<cir::SubOverflowOp>(builder, loc, resultCIRTy, x, y);
1936 break;
1937 case Builtin::BI__builtin_mul_overflow:
1938 std::tie(result, overflow) =
1939 emitOverflowOp<cir::MulOverflowOp>(builder, loc, resultCIRTy, x, y);
1940 break;
1941 }
1942
1943 // Here is a slight difference from the original clang CodeGen:
1944 // - In the original clang CodeGen, the checked arithmetic result is
1945 // first computed as a value of the encompassing type, and then it is
1946 // truncated to the actual result type with a second overflow checking.
1947 // - In CIRGen, the checked arithmetic operation directly produce the
1948 // checked arithmetic result in its expected type.
1949 //
1950 // So we don't need a truncation and a second overflow checking here.
1951
1952 // Finally, store the result using the pointer.
1953 bool isVolatile =
1954 resultArg->getType()->getPointeeType().isVolatileQualified();
1955 builder.createStore(loc, result, resultPtr, isVolatile);
1956
1957 return RValue::get(overflow);
1958 }
1959
1960 case Builtin::BI__builtin_uadd_overflow:
1961 case Builtin::BI__builtin_uaddl_overflow:
1962 case Builtin::BI__builtin_uaddll_overflow:
1963 case Builtin::BI__builtin_usub_overflow:
1964 case Builtin::BI__builtin_usubl_overflow:
1965 case Builtin::BI__builtin_usubll_overflow:
1966 case Builtin::BI__builtin_umul_overflow:
1967 case Builtin::BI__builtin_umull_overflow:
1968 case Builtin::BI__builtin_umulll_overflow:
1969 case Builtin::BI__builtin_sadd_overflow:
1970 case Builtin::BI__builtin_saddl_overflow:
1971 case Builtin::BI__builtin_saddll_overflow:
1972 case Builtin::BI__builtin_ssub_overflow:
1973 case Builtin::BI__builtin_ssubl_overflow:
1974 case Builtin::BI__builtin_ssubll_overflow:
1975 case Builtin::BI__builtin_smul_overflow:
1976 case Builtin::BI__builtin_smull_overflow:
1977 case Builtin::BI__builtin_smulll_overflow: {
1978 // Scalarize our inputs.
1979 mlir::Value x = emitScalarExpr(e->getArg(0));
1980 mlir::Value y = emitScalarExpr(e->getArg(1));
1981
1982 const clang::Expr *resultArg = e->getArg(2);
1983 Address resultPtr = emitPointerWithAlignment(resultArg);
1984
1985 clang::QualType resultQTy =
1986 resultArg->getType()->castAs<clang::PointerType>()->getPointeeType();
1987 auto resultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(resultQTy));
1988
1989 // Create the appropriate overflow-checked arithmetic operation.
1990 mlir::Location loc = getLoc(e->getSourceRange());
1991 mlir::Value result, overflow;
1992 switch (builtinID) {
1993 default:
1994 llvm_unreachable("Unknown overflow builtin id.");
1995 case Builtin::BI__builtin_uadd_overflow:
1996 case Builtin::BI__builtin_uaddl_overflow:
1997 case Builtin::BI__builtin_uaddll_overflow:
1998 case Builtin::BI__builtin_sadd_overflow:
1999 case Builtin::BI__builtin_saddl_overflow:
2000 case Builtin::BI__builtin_saddll_overflow:
2001 std::tie(result, overflow) =
2002 emitOverflowOp<cir::AddOverflowOp>(builder, loc, resultCIRTy, x, y);
2003 break;
2004 case Builtin::BI__builtin_usub_overflow:
2005 case Builtin::BI__builtin_usubl_overflow:
2006 case Builtin::BI__builtin_usubll_overflow:
2007 case Builtin::BI__builtin_ssub_overflow:
2008 case Builtin::BI__builtin_ssubl_overflow:
2009 case Builtin::BI__builtin_ssubll_overflow:
2010 std::tie(result, overflow) =
2011 emitOverflowOp<cir::SubOverflowOp>(builder, loc, resultCIRTy, x, y);
2012 break;
2013 case Builtin::BI__builtin_umul_overflow:
2014 case Builtin::BI__builtin_umull_overflow:
2015 case Builtin::BI__builtin_umulll_overflow:
2016 case Builtin::BI__builtin_smul_overflow:
2017 case Builtin::BI__builtin_smull_overflow:
2018 case Builtin::BI__builtin_smulll_overflow:
2019 std::tie(result, overflow) =
2020 emitOverflowOp<cir::MulOverflowOp>(builder, loc, resultCIRTy, x, y);
2021 break;
2022 }
2023
2024 bool isVolatile =
2025 resultArg->getType()->getPointeeType().isVolatileQualified();
2026 builder.createStore(loc, emitToMemory(result, resultQTy), resultPtr,
2027 isVolatile);
2028
2029 return RValue::get(overflow);
2030 }
2031
2032 case Builtin::BIaddressof:
2033 case Builtin::BI__addressof:
2034 case Builtin::BI__builtin_addressof:
2035 return RValue::get(emitLValue(e->getArg(0)).getPointer());
2036 case Builtin::BI__builtin_function_start:
2037 return errorBuiltinNYI(*this, e, builtinID);
2038 case Builtin::BI__builtin_operator_new:
2040 e->getCallee()->getType()->castAs<FunctionProtoType>(), e, OO_New);
2041 case Builtin::BI__builtin_operator_delete:
2043 e->getCallee()->getType()->castAs<FunctionProtoType>(), e, OO_Delete);
2044 return RValue::get(nullptr);
2045 case Builtin::BI__builtin_is_aligned:
2046 case Builtin::BI__builtin_align_up:
2047 case Builtin::BI__builtin_align_down:
2048 case Builtin::BI__noop:
2049 case Builtin::BI__builtin_call_with_static_chain:
2050 case Builtin::BI_InterlockedExchange8:
2051 case Builtin::BI_InterlockedExchange16:
2052 case Builtin::BI_InterlockedExchange:
2053 case Builtin::BI_InterlockedExchangePointer:
2054 case Builtin::BI_InterlockedCompareExchangePointer:
2055 case Builtin::BI_InterlockedCompareExchangePointer_nf:
2056 case Builtin::BI_InterlockedCompareExchange8:
2057 case Builtin::BI_InterlockedCompareExchange16:
2058 case Builtin::BI_InterlockedCompareExchange:
2059 case Builtin::BI_InterlockedCompareExchange64:
2060 case Builtin::BI_InterlockedIncrement16:
2061 case Builtin::BI_InterlockedIncrement:
2062 case Builtin::BI_InterlockedDecrement16:
2063 case Builtin::BI_InterlockedDecrement:
2064 case Builtin::BI_InterlockedAnd8:
2065 case Builtin::BI_InterlockedAnd16:
2066 case Builtin::BI_InterlockedAnd:
2067 case Builtin::BI_InterlockedExchangeAdd8:
2068 case Builtin::BI_InterlockedExchangeAdd16:
2069 case Builtin::BI_InterlockedExchangeAdd:
2070 case Builtin::BI_InterlockedExchangeSub8:
2071 case Builtin::BI_InterlockedExchangeSub16:
2072 case Builtin::BI_InterlockedExchangeSub:
2073 case Builtin::BI_InterlockedOr8:
2074 case Builtin::BI_InterlockedOr16:
2075 case Builtin::BI_InterlockedOr:
2076 case Builtin::BI_InterlockedXor8:
2077 case Builtin::BI_InterlockedXor16:
2078 case Builtin::BI_InterlockedXor:
2079 case Builtin::BI_bittest64:
2080 case Builtin::BI_bittest:
2081 case Builtin::BI_bittestandcomplement64:
2082 case Builtin::BI_bittestandcomplement:
2083 case Builtin::BI_bittestandreset64:
2084 case Builtin::BI_bittestandreset:
2085 case Builtin::BI_bittestandset64:
2086 case Builtin::BI_bittestandset:
2087 case Builtin::BI_interlockedbittestandreset:
2088 case Builtin::BI_interlockedbittestandreset64:
2089 case Builtin::BI_interlockedbittestandreset64_acq:
2090 case Builtin::BI_interlockedbittestandreset64_rel:
2091 case Builtin::BI_interlockedbittestandreset64_nf:
2092 case Builtin::BI_interlockedbittestandset64:
2093 case Builtin::BI_interlockedbittestandset64_acq:
2094 case Builtin::BI_interlockedbittestandset64_rel:
2095 case Builtin::BI_interlockedbittestandset64_nf:
2096 case Builtin::BI_interlockedbittestandset:
2097 case Builtin::BI_interlockedbittestandset_acq:
2098 case Builtin::BI_interlockedbittestandset_rel:
2099 case Builtin::BI_interlockedbittestandset_nf:
2100 case Builtin::BI_interlockedbittestandreset_acq:
2101 case Builtin::BI_interlockedbittestandreset_rel:
2102 case Builtin::BI_interlockedbittestandreset_nf:
2103 case Builtin::BI__iso_volatile_load8:
2104 case Builtin::BI__iso_volatile_load16:
2105 case Builtin::BI__iso_volatile_load32:
2106 case Builtin::BI__iso_volatile_load64:
2107 case Builtin::BI__iso_volatile_store8:
2108 case Builtin::BI__iso_volatile_store16:
2109 case Builtin::BI__iso_volatile_store32:
2110 case Builtin::BI__iso_volatile_store64:
2111 case Builtin::BI__builtin_ptrauth_sign_constant:
2112 case Builtin::BI__builtin_ptrauth_auth:
2113 case Builtin::BI__builtin_ptrauth_auth_and_resign:
2114 case Builtin::BI__builtin_ptrauth_blend_discriminator:
2115 case Builtin::BI__builtin_ptrauth_sign_generic_data:
2116 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
2117 case Builtin::BI__builtin_ptrauth_strip:
2118 case Builtin::BI__builtin_get_vtable_pointer:
2119 case Builtin::BI__exception_code:
2120 case Builtin::BI_exception_code:
2121 case Builtin::BI__exception_info:
2122 case Builtin::BI_exception_info:
2123 case Builtin::BI__abnormal_termination:
2124 case Builtin::BI_abnormal_termination:
2125 case Builtin::BI_setjmpex:
2126 case Builtin::BI_setjmp:
2127 return errorBuiltinNYI(*this, e, builtinID);
2128 case Builtin::BImove:
2129 case Builtin::BImove_if_noexcept:
2130 case Builtin::BIforward:
2131 case Builtin::BIforward_like:
2132 case Builtin::BIas_const:
2133 return RValue::get(emitLValue(e->getArg(0)).getPointer());
2134 case Builtin::BI__GetExceptionInfo:
2135 case Builtin::BI__fastfail:
2136 case Builtin::BIread_pipe:
2137 case Builtin::BIwrite_pipe:
2138 case Builtin::BIreserve_read_pipe:
2139 case Builtin::BIreserve_write_pipe:
2140 case Builtin::BIwork_group_reserve_read_pipe:
2141 case Builtin::BIwork_group_reserve_write_pipe:
2142 case Builtin::BIsub_group_reserve_read_pipe:
2143 case Builtin::BIsub_group_reserve_write_pipe:
2144 case Builtin::BIcommit_read_pipe:
2145 case Builtin::BIcommit_write_pipe:
2146 case Builtin::BIwork_group_commit_read_pipe:
2147 case Builtin::BIwork_group_commit_write_pipe:
2148 case Builtin::BIsub_group_commit_read_pipe:
2149 case Builtin::BIsub_group_commit_write_pipe:
2150 case Builtin::BIget_pipe_num_packets:
2151 case Builtin::BIget_pipe_max_packets:
2152 case Builtin::BIto_global:
2153 case Builtin::BIto_local:
2154 case Builtin::BIto_private:
2155 case Builtin::BIenqueue_kernel:
2156 case Builtin::BIget_kernel_work_group_size:
2157 case Builtin::BIget_kernel_preferred_work_group_size_multiple:
2158 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
2159 case Builtin::BIget_kernel_sub_group_count_for_ndrange:
2160 case Builtin::BI__builtin_store_half:
2161 case Builtin::BI__builtin_store_halff:
2162 case Builtin::BI__builtin_load_half:
2163 case Builtin::BI__builtin_load_halff:
2164 return errorBuiltinNYI(*this, e, builtinID);
2165 case Builtin::BI__builtin_printf:
2166 case Builtin::BIprintf:
2167 break;
2168 case Builtin::BI__builtin_canonicalize:
2169 case Builtin::BI__builtin_canonicalizef:
2170 case Builtin::BI__builtin_canonicalizef16:
2171 case Builtin::BI__builtin_canonicalizel:
2172 case Builtin::BI__builtin_thread_pointer:
2173 case Builtin::BI__builtin_os_log_format:
2174 case Builtin::BI__xray_customevent:
2175 case Builtin::BI__xray_typedevent:
2176 case Builtin::BI__builtin_ms_va_start:
2177 case Builtin::BI__builtin_ms_va_end:
2178 case Builtin::BI__builtin_ms_va_copy:
2179 case Builtin::BI__builtin_get_device_side_mangled_name:
2180 return errorBuiltinNYI(*this, e, builtinID);
2181 }
2182
2183 // If this is an alias for a lib function (e.g. __builtin_sin), emit
2184 // the call using the normal call path, but using the unmangled
2185 // version of the function name.
2186 if (getContext().BuiltinInfo.isLibFunction(builtinID))
2187 return emitLibraryCall(*this, fd, e,
2188 cgm.getBuiltinLibFunction(fd, builtinID));
2189
2190 // If this is a predefined lib function (e.g. malloc), emit the call
2191 // using exactly the normal call path.
2192 if (getContext().BuiltinInfo.isPredefinedLibFunction(builtinID))
2193 return emitLibraryCall(*this, fd, e,
2194 emitScalarExpr(e->getCallee()).getDefiningOp());
2195
2196 // See if we have a target specific intrinsic.
2197 std::string name = getContext().BuiltinInfo.getName(builtinID);
2198 Intrinsic::ID intrinsicID = Intrinsic::not_intrinsic;
2199 StringRef prefix =
2200 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
2201 if (!prefix.empty()) {
2202 intrinsicID = Intrinsic::getIntrinsicForClangBuiltin(prefix.data(), name);
2203 // NOTE we don't need to perform a compatibility flag check here since the
2204 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
2205 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
2206 if (intrinsicID == Intrinsic::not_intrinsic)
2207 intrinsicID = Intrinsic::getIntrinsicForMSBuiltin(prefix.data(), name);
2208 }
2209
2210 if (intrinsicID != Intrinsic::not_intrinsic) {
2211 unsigned iceArguments = 0;
2213 getContext().GetBuiltinType(builtinID, error, &iceArguments);
2214 assert(error == ASTContext::GE_None && "Should not codegen an error");
2215
2216 llvm::StringRef name = llvm::Intrinsic::getName(intrinsicID);
2217 // cir::LLVMIntrinsicCallOp expects intrinsic name to not have prefix
2218 // "llvm." For example, `llvm.nvvm.barrier0` should be passed as
2219 // `nvvm.barrier0`.
2220 assert(name.starts_with("llvm.") && "expected llvm. prefix");
2221 name = name.drop_front(/*strlen("llvm.")=*/5);
2222
2223 cir::FuncType intrinsicType =
2224 getIntrinsicType(*this, &getMLIRContext(), intrinsicID);
2225
2227 const FunctionDecl *fd = e->getDirectCallee();
2228 for (unsigned i = 0; i < e->getNumArgs(); i++) {
2229 mlir::Value argValue =
2230 emitScalarOrConstFoldImmArg(iceArguments, i, e->getArg(i));
2231 // If the intrinsic arg type is different from the builtin arg type
2232 // we need to do a bit cast.
2233 mlir::Type argType = argValue.getType();
2234 mlir::Type expectedTy = intrinsicType.getInput(i);
2235
2236 // Correct integer signedness based on AST parameter type
2237 mlir::Type correctedExpectedTy = expectedTy;
2238 if (fd && i < fd->getNumParams()) {
2239 correctedExpectedTy = correctIntegerSignedness(
2240 expectedTy, fd->getParamDecl(i)->getType(), &getMLIRContext());
2241 }
2242
2243 if (mlir::isa<cir::PointerType>(expectedTy)) {
2244 bool argIsPointer = mlir::isa<cir::PointerType>(argType);
2245 bool argIsVectorOfPointer = false;
2246 if (auto vecTy = dyn_cast<mlir::VectorType>(argType))
2247 argIsVectorOfPointer =
2248 mlir::isa<cir::PointerType>(vecTy.getElementType());
2249
2250 if (!argIsPointer && !argIsVectorOfPointer) {
2251 cgm.errorNYI(
2252 e->getSourceRange(),
2253 "intrinsic expects a pointer type (NYI for non-pointer)");
2254 return getUndefRValue(e->getType());
2255 }
2256
2257 // Pointer handling (address-space cast / bitcast fallback).
2258 if (argType != expectedTy)
2259 argValue = getCorrectedPtr(argValue, expectedTy, builder);
2260 } else {
2261 // Non-pointer expected type: if needed, bitcast to the corrected
2262 // expected type to match signedness/representation.
2263 if (argType != correctedExpectedTy)
2264 argValue = builder.createBitcast(argValue, correctedExpectedTy);
2265 }
2266
2267 args.push_back(argValue);
2268 }
2269
2270 // Correct return type signedness based on AST return type before creating
2271 // the call, avoiding unnecessary casts in the IR.
2272 mlir::Type correctedReturnType = intrinsicType.getReturnType();
2273 if (fd) {
2274 correctedReturnType =
2275 correctIntegerSignedness(intrinsicType.getReturnType(),
2276 fd->getReturnType(), &getMLIRContext());
2277 }
2278
2279 cir::LLVMIntrinsicCallOp intrinsicCall = cir::LLVMIntrinsicCallOp::create(
2280 builder, getLoc(e->getExprLoc()), builder.getStringAttr(name),
2281 correctedReturnType, args);
2282
2283 mlir::Value intrinsicRes = intrinsicCall.getResult();
2284
2285 if (isa<cir::VoidType>(correctedReturnType))
2286 return RValue::get(nullptr);
2287
2288 return RValue::get(intrinsicRes);
2289 }
2290
2291 // Some target-specific builtins can have aggregate return values, e.g.
2292 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
2293 // returnValue to be non-null, so that the target-specific emission code can
2294 // always just emit into it.
2296 if (evalKind == cir::TEK_Aggregate && returnValue.isNull()) {
2297 cgm.errorNYI(e->getSourceRange(), "aggregate return value from builtin");
2298 return getUndefRValue(e->getType());
2299 }
2300
2301 // Now see if we can emit a target-specific builtin.
2302 // FIXME: This is a temporary mechanism (double-optional semantics) that will
2303 // go away once everything is implemented:
2304 // 1. return `mlir::Value{}` for cases where we have issued the diagnostic.
2305 // 2. return `std::nullopt` in cases where we didn't issue a diagnostic
2306 // but also didn't handle the builtin.
2307 if (std::optional<mlir::Value> rst =
2308 emitTargetBuiltinExpr(builtinID, e, returnValue)) {
2309 mlir::Value v = rst.value();
2310 // CIR dialect operations may have no results, no values will be returned
2311 // even if it executes successfully.
2312 if (!v)
2313 return RValue::get(nullptr);
2314
2315 switch (evalKind) {
2316 case cir::TEK_Scalar:
2317 if (mlir::isa<cir::VoidType>(v.getType()))
2318 return RValue::get(nullptr);
2319 return RValue::get(v);
2320 case cir::TEK_Aggregate:
2321 cgm.errorNYI(e->getSourceRange(), "aggregate return value from builtin");
2322 return getUndefRValue(e->getType());
2323 case cir::TEK_Complex:
2324 llvm_unreachable("No current target builtin returns complex");
2325 }
2326 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
2327 }
2328
2329 cgm.errorNYI(e->getSourceRange(),
2330 std::string("unimplemented builtin call: ") +
2331 getContext().BuiltinInfo.getName(builtinID));
2332 return getUndefRValue(e->getType());
2333}
2334
2335static std::optional<mlir::Value>
2337 const CallExpr *e, ReturnValueSlot &returnValue,
2338 llvm::Triple::ArchType arch) {
2339 // When compiling in HipStdPar mode we have to be conservative in rejecting
2340 // target specific features in the FE, and defer the possible error to the
2341 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
2342 // referenced by an accelerator executable function, we emit an error.
2343 // Returning nullptr here leads to the builtin being handled in
2344 // EmitStdParUnsupportedBuiltin.
2345 if (cgf->getLangOpts().HIPStdPar && cgf->getLangOpts().CUDAIsDevice &&
2346 arch != cgf->getTarget().getTriple().getArch())
2347 return std::nullopt;
2348
2349 switch (arch) {
2350 case llvm::Triple::arm:
2351 case llvm::Triple::armeb:
2352 case llvm::Triple::thumb:
2353 case llvm::Triple::thumbeb:
2354 // These are actually NYI, but that will be reported by emitBuiltinExpr.
2355 // At this point, we don't even know that the builtin is target-specific.
2356 return std::nullopt;
2357 case llvm::Triple::aarch64:
2358 case llvm::Triple::aarch64_32:
2359 case llvm::Triple::aarch64_be:
2360 return cgf->emitAArch64BuiltinExpr(builtinID, e, returnValue, arch);
2361 case llvm::Triple::bpfeb:
2362 case llvm::Triple::bpfel:
2363 // These are actually NYI, but that will be reported by emitBuiltinExpr.
2364 // At this point, we don't even know that the builtin is target-specific.
2365 return std::nullopt;
2366
2367 case llvm::Triple::x86:
2368 case llvm::Triple::x86_64:
2369 return cgf->emitX86BuiltinExpr(builtinID, e);
2370
2371 case llvm::Triple::ppc:
2372 case llvm::Triple::ppcle:
2373 case llvm::Triple::ppc64:
2374 case llvm::Triple::ppc64le:
2375 case llvm::Triple::r600:
2376 // These are actually NYI, but that will be reported by emitBuiltinExpr.
2377 // At this point, we don't even know that the builtin is target-specific.
2378 return std::nullopt;
2379 case llvm::Triple::amdgcn:
2380 return cgf->emitAMDGPUBuiltinExpr(builtinID, e);
2381 case llvm::Triple::systemz:
2382 case llvm::Triple::nvptx:
2383 case llvm::Triple::nvptx64:
2384 case llvm::Triple::wasm32:
2385 case llvm::Triple::wasm64:
2386 case llvm::Triple::hexagon:
2387 // These are actually NYI, but that will be reported by emitBuiltinExpr.
2388 // At this point, we don't even know that the builtin is target-specific.
2389 return std::nullopt;
2390 case llvm::Triple::riscv32:
2391 case llvm::Triple::riscv64:
2392 return cgf->emitRISCVBuiltinExpr(builtinID, e);
2393 default:
2394 return std::nullopt;
2395 }
2396}
2397
2398std::optional<mlir::Value>
2401 if (getContext().BuiltinInfo.isAuxBuiltinID(builtinID)) {
2402 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
2404 this, getContext().BuiltinInfo.getAuxBuiltinID(builtinID), e,
2405 returnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
2406 }
2407
2408 return emitTargetArchBuiltinExpr(this, builtinID, e, returnValue,
2409 getTarget().getTriple().getArch());
2410}
2411
2413 const unsigned iceArguments, const unsigned idx, const Expr *argExpr) {
2414 mlir::Value arg = {};
2415 if ((iceArguments & (1 << idx)) == 0) {
2416 arg = emitScalarExpr(argExpr);
2417 } else {
2418 // If this is required to be a constant, constant fold it so that we
2419 // know that the generated intrinsic gets a ConstantInt.
2420 const std::optional<llvm::APSInt> result =
2422 assert(result && "Expected argument to be a constant");
2423 arg = builder.getConstInt(getLoc(argExpr->getSourceRange()), *result);
2424 }
2425 return arg;
2426}
2427
2428/// Given a builtin id for a function like "__builtin_fabsf", return a Function*
2429/// for "fabsf".
2431 unsigned builtinID) {
2432 assert(astContext.BuiltinInfo.isLibFunction(builtinID));
2433
2434 // Get the name, skip over the __builtin_ prefix (if necessary). We may have
2435 // to build this up so provide a small stack buffer to handle the vast
2436 // majority of names.
2438
2440 name = astContext.BuiltinInfo.getName(builtinID).substr(10);
2441
2442 GlobalDecl d(fd);
2443 mlir::Type type = convertType(fd->getType());
2444 return getOrCreateCIRFunction(name, type, d, /*forVTable=*/false);
2445}
2446
2448 mlir::Value argValue = evaluateExprAsBool(e);
2449 if (!sanOpts.has(SanitizerKind::Builtin))
2450 return argValue;
2451
2453 cgm.errorNYI(e->getSourceRange(),
2454 "emitCheckedArgForAssume: sanitizers are NYI");
2455 return {};
2456}
2457
2458void CIRGenFunction::emitVAStart(mlir::Value vaList) {
2459 // LLVM codegen casts to *i8, no real gain on doing this for CIRGen this
2460 // early, defer to LLVM lowering.
2461 cir::VAStartOp::create(builder, vaList.getLoc(), vaList);
2462}
2463
2464void CIRGenFunction::emitVAEnd(mlir::Value vaList) {
2465 cir::VAEndOp::create(builder, vaList.getLoc(), vaList);
2466}
2467
2468// FIXME(cir): This completely abstracts away the ABI with a generic CIR Op. By
2469// default this lowers to llvm.va_arg which is incomplete and not ABI-compliant
2470// on most targets so cir.va_arg will need some ABI handling in LoweringPrepare
2472 assert(!cir::MissingFeatures::msabi());
2473 assert(!cir::MissingFeatures::vlas());
2474 mlir::Location loc = cgm.getLoc(ve->getExprLoc());
2475 mlir::Type type = convertType(ve->getType());
2476 mlir::Value vaList = emitVAListRef(ve->getSubExpr()).getPointer();
2477 return cir::VAArgOp::create(builder, loc, type, vaList);
2478}
2479
2480mlir::Value CIRGenFunction::emitBuiltinObjectSize(const Expr *e, unsigned type,
2481 cir::IntType resType,
2482 mlir::Value emittedE,
2483 bool isDynamic) {
2485
2486 // LLVM can't handle type=3 appropriately, and __builtin_object_size shouldn't
2487 // evaluate e for side-effects. In either case, just like original LLVM
2488 // lowering, we shouldn't lower to `cir.objsize` but to a constant instead.
2489 if (type == 3 || (!emittedE && e->HasSideEffects(getContext())))
2490 return builder.getConstInt(getLoc(e->getSourceRange()), resType,
2491 (type & 2) ? 0 : -1);
2492
2493 mlir::Value ptr = emittedE ? emittedE : emitScalarExpr(e);
2494 assert(mlir::isa<cir::PointerType>(ptr.getType()) &&
2495 "Non-pointer passed to __builtin_object_size?");
2496
2498
2499 // Extract the min/max mode from type. CIR only supports type 0
2500 // (max, whole object) and type 2 (min, whole object), not type 1 or 3
2501 // (closest subobject variants).
2502 const bool min = ((type & 2) != 0);
2503 // For GCC compatibility, __builtin_object_size treats NULL as unknown size.
2504 auto op =
2505 cir::ObjSizeOp::create(builder, getLoc(e->getSourceRange()), resType, ptr,
2506 min, /*nullUnknown=*/true, isDynamic);
2507 return op.getResult();
2508}
2509
2511 const Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE,
2512 bool isDynamic) {
2513 if (std::optional<uint64_t> objectSize =
2515 return builder.getConstInt(getLoc(e->getSourceRange()), resType,
2516 *objectSize);
2517 return emitBuiltinObjectSize(e, type, resType, emittedE, isDynamic);
2518}
static StringRef bytes(const std::vector< T, Allocator > &v)
Defines enum values for all the target-independent builtin functions.
static mlir::Value emitSignBit(mlir::Location loc, CIRGenFunction &cgf, mlir::Value val)
static mlir::Value emitBinaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static mlir::Type decodeFixedType(CIRGenFunction &cgf, ArrayRef< llvm::Intrinsic::IITDescriptor > &infos, mlir::MLIRContext *context)
static RValue emitUnaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitBinaryFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitBinaryAtomicPost(CIRGenFunction &cgf, cir::AtomicFetchKind atomicOpkind, const CallExpr *e, bool invert=false)
static std::optional< mlir::Value > emitTargetArchBuiltinExpr(CIRGenFunction *cgf, unsigned builtinID, const CallExpr *e, ReturnValueSlot &returnValue, llvm::Triple::ArchType arch)
static RValue emitUnaryFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitBinaryAtomic(CIRGenFunction &cgf, cir::AtomicFetchKind atomicOpkind, const CallExpr *e)
static mlir::Value emitToInt(CIRGenFunction &cgf, mlir::Value v, QualType t, cir::IntType intType)
Emit the conversions required to turn the given value into an integer of the given size.
static mlir::Value getCorrectedPtr(mlir::Value argValue, mlir::Type expectedTy, CIRGenBuilderTy &builder)
static std::pair< mlir::Value, mlir::Value > emitOverflowOp(CIRGenBuilderTy &builder, mlir::Location loc, cir::IntType resultTy, mlir::Value lhs, mlir::Value rhs)
Create a checked overflow arithmetic op and return its result and overflow flag.
static RValue emitLibraryCall(CIRGenFunction &cgf, const FunctionDecl *fd, const CallExpr *e, mlir::Operation *calleeValue)
static WidthAndSignedness getIntegerWidthAndSignedness(const clang::ASTContext &astContext, const clang::QualType type)
static RValue emitBuiltinBitOp(CIRGenFunction &cgf, const CallExpr *e, bool poisonZero=false)
static void emitAtomicFenceOp(CIRGenFunction &cgf, const CallExpr *expr, cir::SyncScopeKind syncScope)
static Address checkAtomicAlignment(CIRGenFunction &cgf, const CallExpr *e)
static bool shouldCIREmitFPMathIntrinsic(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static RValue tryEmitFPMathIntrinsic(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static cir::FuncType getIntrinsicType(CIRGenFunction &cgf, mlir::MLIRContext *context, llvm::Intrinsic::ID id)
static mlir::Value makeBinaryAtomicValue(CIRGenFunction &cgf, cir::AtomicFetchKind kind, const CallExpr *expr, mlir::Type *originalArgType=nullptr, mlir::Value *emittedArgValue=nullptr, cir::MemOrder ordering=cir::MemOrder::SequentiallyConsistent)
Utility to insert an atomic instruction based on Intrinsic::ID and the expression node.
static struct WidthAndSignedness EncompassingIntegerType(ArrayRef< struct WidthAndSignedness > types)
static RValue emitBuiltinAlloca(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static mlir::Type correctIntegerSignedness(mlir::Type iitType, QualType astType, mlir::MLIRContext *context)
Helper function to correct integer signedness for intrinsic arguments and return type.
static RValue emitUnaryMaybeConstrainedFPToIntBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue errorBuiltinNYI(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static mlir::Value emitFromInt(CIRGenFunction &cgf, mlir::Value v, QualType t, mlir::Type resultType)
static StringRef getTriple(const Command &Job)
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
Defines an enumeration for C++ overloaded operators.
static QualType getPointeeType(const MemRegion *R)
__DEVICE__ int min(int __a, int __b)
__device__ __2f16 b
__device__ __2f16 float c
cir::SignBitOp createSignBit(mlir::Location loc, mlir::Value val)
mlir::Value createIntToPtr(mlir::Value src, mlir::Type newTy)
mlir::Value createPtrToInt(mlir::Value src, mlir::Type newTy)
cir::PointerType getPointerTo(mlir::Type ty)
mlir::Value createIntCast(mlir::Value src, mlir::Type newTy)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
mlir::Value createNot(mlir::Location loc, mlir::Value value)
cir::PointerType getVoidPtrTy(clang::LangAS langAS=clang::LangAS::Default)
mlir::Value createAddrSpaceCast(mlir::Location loc, mlir::Value src, mlir::Type newTy)
mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, mlir::Type type, llvm::StringRef name, mlir::IntegerAttr alignment, mlir::Value dynAllocSize)
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
APSInt & getInt()
Definition APValue.h:508
bool isFloat() const
Definition APValue.h:486
bool isInt() const
Definition APValue.h:485
APFloat & getFloat()
Definition APValue.h:522
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
unsigned getIntWidth(QualType T) const
CanQualType VoidPtrTy
Builtin::Context & BuiltinInfo
Definition ASTContext.h:800
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error, unsigned *IntegerConstantArgs=nullptr) const
Return the type for the specified builtin.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:917
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
@ GE_None
No error.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
bool shouldGenerateFPMathIntrinsic(unsigned BuiltinID, llvm::Triple Trip, std::optional< bool > ErrnoOverwritten, bool MathErrnoEnabled, bool HasOptNoneAttr, bool IsOptimizationEnabled) const
Determine whether we can generate LLVM intrinsics for the given builtin ID, based on whether it has s...
Definition Builtins.cpp:225
bool isLibFunction(unsigned ID) const
Return true if this is a builtin for a libc/libm function, with a "__builtin_" prefix (e....
Definition Builtins.h:309
std::string getName(unsigned ID) const
Return the identifier name for the specified builtin, e.g.
Definition Builtins.cpp:80
mlir::Value getPointer() const
Definition Address.h:96
mlir::Type getElementType() const
Definition Address.h:123
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
Definition Address.h:136
Address withAlignment(clang::CharUnits newAlignment) const
Return address with different alignment, but same pointer and element type.
Definition Address.h:87
mlir::Value emitRawPointer() const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:110
cir::IntType getSIntNTy(int n)
cir::PointerType getUInt8PtrTy()
cir::IntType getUIntNTy(int n)
static CIRGenCallee forDirect(mlir::Operation *funcPtr, const CIRGenCalleeInfo &abstractInfo=CIRGenCalleeInfo())
Definition CIRGenCall.h:92
mlir::Type convertType(clang::QualType t)
mlir::Value emitCheckedArgForAssume(const Expr *e)
Emits an argument for a call to a __builtin_assume.
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
const clang::LangOptions & getLangOpts() const
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
const TargetInfo & getTarget() const
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
const clang::Decl * curFuncDecl
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
std::optional< mlir::Value > emitRISCVBuiltinExpr(unsigned builtinID, const CallExpr *expr)
mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
Returns a Value corresponding to the size of the given expression by emitting a cir....
std::optional< mlir::Value > emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot &returnValue)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
Address returnValue
The temporary alloca to hold the return value.
std::optional< mlir::Value > emitX86BuiltinExpr(unsigned builtinID, const CallExpr *expr)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
std::optional< mlir::Value > emitAMDGPUBuiltinExpr(unsigned builtinID, const CallExpr *expr)
Emit a call to an AMDGPU builtin function.
std::optional< mlir::Value > emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr, ReturnValueSlot returnValue, llvm::Triple::ArchType arch)
void emitAtomicExprWithMemOrder(const Expr *memOrder, bool isStore, bool isLoad, bool isFence, llvm::function_ref< void(cir::MemOrder)> emitAtomicOp)
void emitVAEnd(mlir::Value vaList)
Emits the end of a CIR variable-argument operation (cir.va_start)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
void emitVAStart(mlir::Value vaList)
Emits the start of a CIR variable-argument operation (cir.va_start)
void emitNonNullArgCheck(RValue rv, QualType argType, SourceLocation argLoc, AbstractCallee ac, unsigned paramNum)
Create a check for a function parameter that may potentially be declared as non-null.
mlir::MLIRContext & getMLIRContext()
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
RValue emitNewOrDeleteBuiltinCall(const FunctionProtoType *type, const CallExpr *callExpr, OverloadedOperatorKind op)
clang::ASTContext & getContext() const
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx, const Expr *argExpr)
mlir::Value emitVAArg(VAArgExpr *ve)
Generate code to get an argument from the passed in pointer and update it accordingly.
RValue emitRotate(const CallExpr *e, bool isRotateLeft)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
clang::ASTContext & getASTContext() const
mlir::Type convertType(clang::QualType type)
clang::DiagnosticsEngine & getDiags() const
cir::FuncOp getBuiltinLibFunction(const FunctionDecl *fd, unsigned builtinID)
Given a builtin id for a function like "__builtin_fabsf", return a Function* for "fabsf".
const llvm::Triple & getTriple() const
const cir::CIRDataLayout getDataLayout() const
const clang::CodeGenOptions & getCodeGenOpts() const
cir::FuncOp getOrCreateCIRFunction(llvm::StringRef mangledName, mlir::Type funcType, clang::GlobalDecl gd, bool forVTable, bool dontDefer=false, bool isThunk=false, ForDefinition_t isForDefinition=NotForDefinition, mlir::NamedAttrList extraAttrs={})
mlir::Value getPointer() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
static RValue getComplex(mlir::Value v)
Definition CIRGenValue.h:91
bool isIgnored() const
Definition CIRGenValue.h:52
static RValue getIgnored()
Definition CIRGenValue.h:78
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:260
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3150
bool hasStoredFPFeatures() const
Definition Expr.h:3105
SourceLocation getBeginLoc() const
Definition Expr.h:3280
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3129
Expr * getCallee()
Definition Expr.h:3093
FPOptionsOverride getFPFeatures() const
Definition Expr.h:3245
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3137
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
FunctionDecl * getAsFunction() LLVM_READONLY
Returns the function itself, or the templated function if this is a function template.
Definition DeclBase.cpp:273
bool hasAttr() const
Definition DeclBase.h:577
Concrete class used by the front-end to report problems and issues.
Definition Diagnostic.h:232
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
bool EvaluateAsFloat(llvm::APFloat &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsFloat - Return true if this is a constant which we can fold and convert to a floating point...
std::optional< llvm::APSInt > getIntegerConstantExpr(const ASTContext &Ctx) const
isIntegerConstantExpr - Return the value if this expression is a valid integer constant expression.
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3688
std::optional< uint64_t > tryEvaluateObjectSize(const ASTContext &Ctx, unsigned Type) const
If the current Expr is a pointer, this will try to statically determine the number of bytes available...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
Represents difference between two FPOptions values.
Represents a function declaration or definition.
Definition Decl.h:2015
const ParmVarDecl * getParamDecl(unsigned i) const
Definition Decl.h:2812
QualType getReturnType() const
Definition Decl.h:2860
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5357
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3378
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8515
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8557
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
Exposes information about the current target.
Definition TargetInfo.h:227
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
unsigned getSuitableAlign() const
Return the alignment that is the largest alignment ever used for any scalar/SIMD data type on the tar...
Definition TargetInfo.h:748
bool isBlockPointerType() const
Definition TypeBase.h:8688
bool isPointerType() const
Definition TypeBase.h:8668
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9328
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:754
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
Definition TypeBase.h:9156
bool isObjCObjectPointerType() const
Definition TypeBase.h:8847
bool isFloatingType() const
Definition Type.cpp:2342
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2285
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4960
const Expr * getSubExpr() const
Definition Expr.h:4976
QualType getType() const
Definition Decl.h:723
bool isMatchingAddressSpace(mlir::ptr::MemorySpaceAttrInterface cirAS, clang::LangAS as)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
U cast(CodeGen::Address addr)
Definition Address.h:327
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
static bool builtinCheckKind()
static bool addressSpace()
static bool asmLabelAttr()
static bool msvcBuiltins()
static bool builtinCallF128()
static bool isPPC_FP128Ty()
static bool emitCheckedInBoundsGEP()
static bool fpConstraints()
static bool countedBySize()
static bool opCallImplicitObjectSizeArgs()
static bool fastMathFlags()
static bool builtinCall()
static bool generateDebugInfo()
cir::PointerType allocaInt8PtrTy
void* in alloca address space
mlir::ptr::MemorySpaceAttrInterface getCIRAllocaAddressSpace() const
cir::PointerType voidPtrTy
void* in address space 0
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650
bool hasSideEffects() const
Return true if the evaluated expression has side effects.
Definition Expr.h:642
#define conj(__x)
Definition tgmath.h:1303