clang 23.0.0git
CIRGenBuiltin.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as CIR or a function call to be
10// later resolved.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CIRGenCall.h"
15#include "CIRGenFunction.h"
16#include "CIRGenModule.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/BuiltinAttributes.h"
19#include "mlir/IR/Value.h"
20#include "mlir/Support/LLVM.h"
21#include "clang/AST/DeclBase.h"
22#include "clang/AST/Expr.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/Support/ErrorHandling.h"
31
32using namespace clang;
33using namespace clang::CIRGen;
34using namespace llvm;
35
37 const CallExpr *e, mlir::Operation *calleeValue) {
38 CIRGenCallee callee = CIRGenCallee::forDirect(calleeValue, GlobalDecl(fd));
39 return cgf.emitCall(e->getCallee()->getType(), callee, e, ReturnValueSlot());
40}
41
42template <typename Op>
44 bool poisonZero = false) {
46
47 mlir::Value arg = cgf.emitScalarExpr(e->getArg(0));
48 CIRGenBuilderTy &builder = cgf.getBuilder();
49
50 Op op;
51 if constexpr (std::is_same_v<Op, cir::BitClzOp> ||
52 std::is_same_v<Op, cir::BitCtzOp>)
53 op = Op::create(builder, cgf.getLoc(e->getSourceRange()), arg, poisonZero);
54 else
55 op = Op::create(builder, cgf.getLoc(e->getSourceRange()), arg);
56
57 mlir::Value result = op.getResult();
58 mlir::Type exprTy = cgf.convertType(e->getType());
59 if (exprTy != result.getType())
60 result = builder.createIntCast(result, exprTy);
61
62 return RValue::get(result);
63}
64
65/// Emit the conversions required to turn the given value into an
66/// integer of the given size.
67static mlir::Value emitToInt(CIRGenFunction &cgf, mlir::Value v, QualType t,
68 cir::IntType intType) {
69 v = cgf.emitToMemory(v, t);
70
71 if (mlir::isa<cir::PointerType>(v.getType()))
72 return cgf.getBuilder().createPtrToInt(v, intType);
73
74 assert(v.getType() == intType);
75 return v;
76}
77
78static mlir::Value emitFromInt(CIRGenFunction &cgf, mlir::Value v, QualType t,
79 mlir::Type resultType) {
80 v = cgf.emitFromMemory(v, t);
81
82 if (mlir::isa<cir::PointerType>(resultType))
83 return cgf.getBuilder().createIntToPtr(v, resultType);
84
85 assert(v.getType() == resultType);
86 return v;
87}
88
89static mlir::Value emitSignBit(mlir::Location loc, CIRGenFunction &cgf,
90 mlir::Value val) {
92 cir::SignBitOp returnValue = cgf.getBuilder().createSignBit(loc, val);
93 return returnValue->getResult(0);
94}
95
97 ASTContext &astContext = cgf.getContext();
99 unsigned bytes =
100 mlir::isa<cir::PointerType>(ptr.getElementType())
101 ? astContext.getTypeSizeInChars(astContext.VoidPtrTy).getQuantity()
104
105 unsigned align = ptr.getAlignment().getQuantity();
106 if (align % bytes != 0) {
107 DiagnosticsEngine &diags = cgf.cgm.getDiags();
108 diags.Report(e->getBeginLoc(), diag::warn_sync_op_misaligned);
109 // Force address to be at least naturally-aligned.
111 }
112 return ptr;
113}
114
115/// Utility to insert an atomic instruction based on Intrinsic::ID
116/// and the expression node.
117static mlir::Value makeBinaryAtomicValue(
118 CIRGenFunction &cgf, cir::AtomicFetchKind kind, const CallExpr *expr,
119 mlir::Type *originalArgType = nullptr,
120 mlir::Value *emittedArgValue = nullptr,
121 cir::MemOrder ordering = cir::MemOrder::SequentiallyConsistent) {
122
123 QualType type = expr->getType();
124 QualType ptrType = expr->getArg(0)->getType();
125
126 assert(ptrType->isPointerType());
127 assert(
130 expr->getArg(1)->getType()));
131
132 Address destAddr = checkAtomicAlignment(cgf, expr);
133 CIRGenBuilderTy &builder = cgf.getBuilder();
134
135 mlir::Value val = cgf.emitScalarExpr(expr->getArg(1));
136 mlir::Type valueType = val.getType();
137 mlir::Value destValue = destAddr.emitRawPointer();
138
139 if (ptrType->getPointeeType()->isPointerType()) {
140 // Pointer to pointer
141 // `cir.atomic.fetch` expects a pointer to an integer type, so we cast
142 // ptr<ptr<T>> to ptr<intPtrSize>
143 cir::IntType ptrSizeInt =
144 builder.getSIntNTy(cgf.getContext().getTypeSize(ptrType));
145 destValue =
146 builder.createBitcast(destValue, builder.getPointerTo(ptrSizeInt));
147 val = emitToInt(cgf, val, type, ptrSizeInt);
148 } else {
149 // Pointer to integer type
150 cir::IntType intType =
152 ? builder.getUIntNTy(cgf.getContext().getTypeSize(type))
153 : builder.getSIntNTy(cgf.getContext().getTypeSize(type));
154 val = emitToInt(cgf, val, type, intType);
155 }
156
157 // This output argument is needed for post atomic fetch operations
158 // that calculate the result of the operation as return value of
159 // <binop>_and_fetch builtins. The `AtomicFetch` operation only updates the
160 // memory location and returns the old value.
161 if (emittedArgValue) {
162 *emittedArgValue = val;
163 assert(originalArgType != nullptr &&
164 "originalArgType must be provided when emittedArgValue is set");
165 *originalArgType = valueType;
166 }
167
168 auto rmwi = cir::AtomicFetchOp::create(
169 builder, cgf.getLoc(expr->getSourceRange()), destValue, val, kind,
170 ordering, cir::SyncScopeKind::System, false, /* is volatile */
171 true); /* fetch first */
172 return rmwi->getResult(0);
173}
174
176 cir::AtomicFetchKind atomicOpkind,
177 const CallExpr *e) {
178 return RValue::get(makeBinaryAtomicValue(cgf, atomicOpkind, e));
179}
180
181template <typename BinOp>
183 cir::AtomicFetchKind atomicOpkind,
184 const CallExpr *e, bool invert = false) {
185 mlir::Value emittedArgValue;
186 mlir::Type originalArgType;
187 clang::QualType typ = e->getType();
188 mlir::Value result = makeBinaryAtomicValue(
189 cgf, atomicOpkind, e, &originalArgType, &emittedArgValue);
191 result = BinOp::create(builder, result.getLoc(), result, emittedArgValue);
192
193 if (invert)
194 result = builder.createNot(result);
195
196 result = emitFromInt(cgf, result, typ, originalArgType);
197 return RValue::get(result);
198}
199
201 cir::SyncScopeKind syncScope) {
202 CIRGenBuilderTy &builder = cgf.getBuilder();
203 mlir::Location loc = cgf.getLoc(expr->getSourceRange());
204
205 auto emitAtomicOpCallBackFn = [&](cir::MemOrder memOrder) {
206 cir::AtomicFenceOp::create(
207 builder, loc, memOrder,
208 cir::SyncScopeKindAttr::get(&cgf.getMLIRContext(), syncScope));
209 };
210
211 cgf.emitAtomicExprWithMemOrder(expr->getArg(0), /*isStore*/ false,
212 /*isLoad*/ false, /*isFence*/ true,
213 emitAtomicOpCallBackFn);
214}
215
216namespace {
217struct WidthAndSignedness {
218 unsigned width;
219 bool isSigned;
220};
221} // namespace
222
223static WidthAndSignedness
225 const clang::QualType type) {
226 assert(type->isIntegerType() && "Given type is not an integer.");
227 unsigned width = type->isBooleanType() ? 1
228 : type->isBitIntType() ? astContext.getIntWidth(type)
229 : astContext.getTypeInfo(type).Width;
230 bool isSigned = type->isSignedIntegerType();
231 return {width, isSigned};
232}
233
234/// Create a checked overflow arithmetic op and return its result and overflow
235/// flag.
236template <typename OpTy>
237static std::pair<mlir::Value, mlir::Value>
238emitOverflowOp(CIRGenBuilderTy &builder, mlir::Location loc,
239 cir::IntType resultTy, mlir::Value lhs, mlir::Value rhs) {
240 auto op = OpTy::create(builder, loc, resultTy, lhs, rhs);
241 return {op.getResult(), op.getOverflow()};
242}
243
244// Given one or more integer types, this function produces an integer type that
245// encompasses them: any value in one of the given types could be expressed in
246// the encompassing type.
247static struct WidthAndSignedness
248EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> types) {
249 assert(types.size() > 0 && "Empty list of types.");
250
251 // If any of the given types is signed, we must return a signed type.
252 bool isSigned = llvm::any_of(types, [](const auto &t) { return t.isSigned; });
253
254 // The encompassing type must have a width greater than or equal to the width
255 // of the specified types. Additionally, if the encompassing type is signed,
256 // its width must be strictly greater than the width of any unsigned types
257 // given.
258 unsigned width = 0;
259 for (const auto &type : types)
260 width = std::max(width, type.width + (isSigned && !type.isSigned));
261
262 return {width, isSigned};
263}
264
265RValue CIRGenFunction::emitRotate(const CallExpr *e, bool isRotateLeft) {
266 mlir::Value input = emitScalarExpr(e->getArg(0));
267 mlir::Value amount = emitScalarExpr(e->getArg(1));
268
269 // TODO(cir): MSVC flavor bit rotate builtins use different types for input
270 // and amount, but cir.rotate requires them to have the same type. Cast amount
271 // to the type of input when necessary.
273
274 auto r = cir::RotateOp::create(builder, getLoc(e->getSourceRange()), input,
275 amount, isRotateLeft);
276 return RValue::get(r);
277}
278
279template <class Operation>
281 const CallExpr &e) {
282 mlir::Value arg = cgf.emitScalarExpr(e.getArg(0));
283
284 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(cgf, &e);
286
287 auto call =
288 Operation::create(cgf.getBuilder(), arg.getLoc(), arg.getType(), arg);
289 return RValue::get(call->getResult(0));
290}
291
292template <class Operation>
294 mlir::Value arg = cgf.emitScalarExpr(e.getArg(0));
295 auto call =
296 Operation::create(cgf.getBuilder(), arg.getLoc(), arg.getType(), arg);
297 return RValue::get(call->getResult(0));
298}
299
300template <typename Op>
302 const CallExpr &e) {
303 mlir::Type resultType = cgf.convertType(e.getType());
304 mlir::Value src = cgf.emitScalarExpr(e.getArg(0));
305
307
308 auto call = Op::create(cgf.getBuilder(), src.getLoc(), resultType, src);
309 return RValue::get(call->getResult(0));
310}
311
312template <typename Op>
314 mlir::Value arg0 = cgf.emitScalarExpr(e.getArg(0));
315 mlir::Value arg1 = cgf.emitScalarExpr(e.getArg(1));
316
317 mlir::Location loc = cgf.getLoc(e.getExprLoc());
318 mlir::Type ty = cgf.convertType(e.getType());
319 auto call = Op::create(cgf.getBuilder(), loc, ty, arg0, arg1);
320
321 return RValue::get(call->getResult(0));
322}
323
324template <typename Op>
326 const CallExpr &e) {
327 mlir::Value arg0 = cgf.emitScalarExpr(e.getArg(0));
328 mlir::Value arg1 = cgf.emitScalarExpr(e.getArg(1));
329
330 mlir::Location loc = cgf.getLoc(e.getExprLoc());
331 mlir::Type ty = cgf.convertType(e.getType());
332
334
335 auto call = Op::create(cgf.getBuilder(), loc, ty, arg0, arg1);
336 return call->getResult(0);
337}
338
340 unsigned builtinID) {
341
342 if (cgf.getContext().BuiltinInfo.isLibFunction(builtinID)) {
343 cgf.cgm.errorNYI(
344 e->getSourceRange(),
345 std::string("unimplemented X86 library function builtin call: ") +
346 cgf.getContext().BuiltinInfo.getName(builtinID));
347 } else {
348 cgf.cgm.errorNYI(e->getSourceRange(),
349 std::string("unimplemented X86 builtin call: ") +
350 cgf.getContext().BuiltinInfo.getName(builtinID));
351 }
352
353 return cgf.getUndefRValue(e->getType());
354}
355
357 unsigned builtinID) {
358 assert(builtinID == Builtin::BI__builtin_alloca ||
359 builtinID == Builtin::BI__builtin_alloca_uninitialized ||
360 builtinID == Builtin::BIalloca || builtinID == Builtin::BI_alloca);
361
362 // Get alloca size input
363 mlir::Value size = cgf.emitScalarExpr(e->getArg(0));
364
365 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
366 const TargetInfo &ti = cgf.getContext().getTargetInfo();
367 const CharUnits suitableAlignmentInBytes =
369
370 // Emit the alloca op with type `u8 *` to match the semantics of
371 // `llvm.alloca`. We later bitcast the type to `void *` to match the
372 // semantics of C/C++
373 // FIXME(cir): It may make sense to allow AllocaOp of type `u8` to return a
374 // pointer of type `void *`. This will require a change to the allocaOp
375 // verifier.
376 CIRGenBuilderTy &builder = cgf.getBuilder();
377 mlir::Value allocaAddr = builder.createAlloca(
378 cgf.getLoc(e->getSourceRange()), builder.getUInt8PtrTy(),
379 builder.getUInt8Ty(), "bi_alloca", suitableAlignmentInBytes, size);
380
381 // Initialize the allocated buffer if required.
382 if (builtinID != Builtin::BI__builtin_alloca_uninitialized) {
383 // Initialize the alloca with the given size and alignment according to
384 // the lang opts. Only the trivial non-initialization is supported for
385 // now.
386
387 switch (cgf.getLangOpts().getTrivialAutoVarInit()) {
389 // Nothing to initialize.
390 break;
393 cgf.cgm.errorNYI("trivial auto var init");
394 break;
395 }
396 }
397
398 // An alloca will always return a pointer to the alloca (stack) address
399 // space. This address space need not be the same as the AST / Language
400 // default (e.g. in C / C++ auto vars are in the generic address space). At
401 // the AST level this is handled within CreateTempAlloca et al., but for the
402 // builtin / dynamic alloca we have to handle it here.
403
407 cgf.cgm.errorNYI(e->getSourceRange(),
408 "Address Space Cast for builtin alloca");
409 }
410
411 // Bitcast the alloca to the expected type.
412 return RValue::get(builder.createBitcast(
413 allocaAddr, builder.getVoidPtrTy(cgf.getCIRAllocaAddressSpace())));
414}
415
417 unsigned builtinID) {
418 std::optional<bool> errnoOverriden;
419 // ErrnoOverriden is true if math-errno is overriden via the
420 // '#pragma float_control(precise, on)'. This pragma disables fast-math,
421 // which implies math-errno.
422 if (e->hasStoredFPFeatures()) {
424 if (op.hasMathErrnoOverride())
425 errnoOverriden = op.getMathErrnoOverride();
426 }
427 // True if 'attribute__((optnone))' is used. This attribute overrides
428 // fast-math which implies math-errno.
429 bool optNone =
430 cgf.curFuncDecl && cgf.curFuncDecl->hasAttr<OptimizeNoneAttr>();
431 bool isOptimizationEnabled = cgf.cgm.getCodeGenOpts().OptimizationLevel != 0;
432 bool generateFPMathIntrinsics =
434 builtinID, cgf.cgm.getTriple(), errnoOverriden,
435 cgf.getLangOpts().MathErrno, optNone, isOptimizationEnabled);
436 return generateFPMathIntrinsics;
437}
438
440 unsigned builtinID) {
442 switch (builtinID) {
443 case Builtin::BIacos:
444 case Builtin::BIacosf:
445 case Builtin::BIacosl:
446 case Builtin::BI__builtin_acos:
447 case Builtin::BI__builtin_acosf:
448 case Builtin::BI__builtin_acosf16:
449 case Builtin::BI__builtin_acosl:
450 case Builtin::BI__builtin_acosf128:
451 case Builtin::BI__builtin_elementwise_acos:
453 case Builtin::BIasin:
454 case Builtin::BIasinf:
455 case Builtin::BIasinl:
456 case Builtin::BI__builtin_asin:
457 case Builtin::BI__builtin_asinf:
458 case Builtin::BI__builtin_asinf16:
459 case Builtin::BI__builtin_asinl:
460 case Builtin::BI__builtin_asinf128:
461 case Builtin::BI__builtin_elementwise_asin:
463 case Builtin::BIatan:
464 case Builtin::BIatanf:
465 case Builtin::BIatanl:
466 case Builtin::BI__builtin_atan:
467 case Builtin::BI__builtin_atanf:
468 case Builtin::BI__builtin_atanf16:
469 case Builtin::BI__builtin_atanl:
470 case Builtin::BI__builtin_atanf128:
471 case Builtin::BI__builtin_elementwise_atan:
473 case Builtin::BIatan2:
474 case Builtin::BIatan2f:
475 case Builtin::BIatan2l:
476 case Builtin::BI__builtin_atan2:
477 case Builtin::BI__builtin_atan2f:
478 case Builtin::BI__builtin_atan2f16:
479 case Builtin::BI__builtin_atan2l:
480 case Builtin::BI__builtin_atan2f128:
481 case Builtin::BI__builtin_elementwise_atan2:
482 return RValue::get(
484 case Builtin::BIceil:
485 case Builtin::BIceilf:
486 case Builtin::BIceill:
487 case Builtin::BI__builtin_ceil:
488 case Builtin::BI__builtin_ceilf:
489 case Builtin::BI__builtin_ceilf16:
490 case Builtin::BI__builtin_ceill:
491 case Builtin::BI__builtin_ceilf128:
493 case Builtin::BI__builtin_elementwise_ceil:
494 return RValue::getIgnored();
495 case Builtin::BIcopysign:
496 case Builtin::BIcopysignf:
497 case Builtin::BIcopysignl:
498 case Builtin::BI__builtin_copysign:
499 case Builtin::BI__builtin_copysignf:
500 case Builtin::BI__builtin_copysignf16:
501 case Builtin::BI__builtin_copysignl:
502 case Builtin::BI__builtin_copysignf128:
504 case Builtin::BIcos:
505 case Builtin::BIcosf:
506 case Builtin::BIcosl:
507 case Builtin::BI__builtin_cos:
508 case Builtin::BI__builtin_cosf:
509 case Builtin::BI__builtin_cosf16:
510 case Builtin::BI__builtin_cosl:
511 case Builtin::BI__builtin_cosf128:
513 case Builtin::BI__builtin_elementwise_cos:
514 case Builtin::BIcosh:
515 case Builtin::BIcoshf:
516 case Builtin::BIcoshl:
517 case Builtin::BI__builtin_cosh:
518 case Builtin::BI__builtin_coshf:
519 case Builtin::BI__builtin_coshf16:
520 case Builtin::BI__builtin_coshl:
521 case Builtin::BI__builtin_coshf128:
522 case Builtin::BI__builtin_elementwise_cosh:
523 return RValue::getIgnored();
524 case Builtin::BIexp:
525 case Builtin::BIexpf:
526 case Builtin::BIexpl:
527 case Builtin::BI__builtin_exp:
528 case Builtin::BI__builtin_expf:
529 case Builtin::BI__builtin_expf16:
530 case Builtin::BI__builtin_expl:
531 case Builtin::BI__builtin_expf128:
533 case Builtin::BI__builtin_elementwise_exp:
534 return RValue::getIgnored();
535 case Builtin::BIexp2:
536 case Builtin::BIexp2f:
537 case Builtin::BIexp2l:
538 case Builtin::BI__builtin_exp2:
539 case Builtin::BI__builtin_exp2f:
540 case Builtin::BI__builtin_exp2f16:
541 case Builtin::BI__builtin_exp2l:
542 case Builtin::BI__builtin_exp2f128:
544 case Builtin::BI__builtin_elementwise_exp2:
545 case Builtin::BI__builtin_exp10:
546 case Builtin::BI__builtin_exp10f:
547 case Builtin::BI__builtin_exp10f16:
548 case Builtin::BI__builtin_exp10l:
549 case Builtin::BI__builtin_exp10f128:
550 case Builtin::BI__builtin_elementwise_exp10:
551 return RValue::getIgnored();
552 case Builtin::BIfabs:
553 case Builtin::BIfabsf:
554 case Builtin::BIfabsl:
555 case Builtin::BI__builtin_fabs:
556 case Builtin::BI__builtin_fabsf:
557 case Builtin::BI__builtin_fabsf16:
558 case Builtin::BI__builtin_fabsl:
559 case Builtin::BI__builtin_fabsf128:
561 case Builtin::BIfloor:
562 case Builtin::BIfloorf:
563 case Builtin::BIfloorl:
564 case Builtin::BI__builtin_floor:
565 case Builtin::BI__builtin_floorf:
566 case Builtin::BI__builtin_floorf16:
567 case Builtin::BI__builtin_floorl:
568 case Builtin::BI__builtin_floorf128:
570 case Builtin::BI__builtin_elementwise_floor:
571 case Builtin::BIfma:
572 case Builtin::BIfmaf:
573 case Builtin::BIfmal:
574 case Builtin::BI__builtin_fma:
575 case Builtin::BI__builtin_fmaf:
576 case Builtin::BI__builtin_fmaf16:
577 case Builtin::BI__builtin_fmal:
578 case Builtin::BI__builtin_fmaf128:
579 case Builtin::BI__builtin_elementwise_fma:
580 return RValue::getIgnored();
581 case Builtin::BIfmax:
582 case Builtin::BIfmaxf:
583 case Builtin::BIfmaxl:
584 case Builtin::BI__builtin_fmax:
585 case Builtin::BI__builtin_fmaxf:
586 case Builtin::BI__builtin_fmaxf16:
587 case Builtin::BI__builtin_fmaxl:
588 case Builtin::BI__builtin_fmaxf128:
589 return RValue::get(
591 case Builtin::BIfmin:
592 case Builtin::BIfminf:
593 case Builtin::BIfminl:
594 case Builtin::BI__builtin_fmin:
595 case Builtin::BI__builtin_fminf:
596 case Builtin::BI__builtin_fminf16:
597 case Builtin::BI__builtin_fminl:
598 case Builtin::BI__builtin_fminf128:
599 return RValue::get(
601 case Builtin::BIfmaximum_num:
602 case Builtin::BIfmaximum_numf:
603 case Builtin::BIfmaximum_numl:
604 case Builtin::BI__builtin_fmaximum_num:
605 case Builtin::BI__builtin_fmaximum_numf:
606 case Builtin::BI__builtin_fmaximum_numf16:
607 case Builtin::BI__builtin_fmaximum_numl:
608 case Builtin::BI__builtin_fmaximum_numf128:
609 case Builtin::BIfminimum_num:
610 case Builtin::BIfminimum_numf:
611 case Builtin::BIfminimum_numl:
612 case Builtin::BI__builtin_fminimum_num:
613 case Builtin::BI__builtin_fminimum_numf:
614 case Builtin::BI__builtin_fminimum_numf16:
615 case Builtin::BI__builtin_fminimum_numl:
616 case Builtin::BI__builtin_fminimum_numf128:
617 return RValue::getIgnored();
618 case Builtin::BIfmod:
619 case Builtin::BIfmodf:
620 case Builtin::BIfmodl:
621 case Builtin::BI__builtin_fmod:
622 case Builtin::BI__builtin_fmodf:
623 case Builtin::BI__builtin_fmodf16:
624 case Builtin::BI__builtin_fmodl:
625 case Builtin::BI__builtin_fmodf128:
626 case Builtin::BI__builtin_elementwise_fmod:
627 return RValue::get(
629 case Builtin::BIlog:
630 case Builtin::BIlogf:
631 case Builtin::BIlogl:
632 case Builtin::BI__builtin_log:
633 case Builtin::BI__builtin_logf:
634 case Builtin::BI__builtin_logf16:
635 case Builtin::BI__builtin_logl:
636 case Builtin::BI__builtin_logf128:
637 case Builtin::BI__builtin_elementwise_log:
639 case Builtin::BIlog10:
640 case Builtin::BIlog10f:
641 case Builtin::BIlog10l:
642 case Builtin::BI__builtin_log10:
643 case Builtin::BI__builtin_log10f:
644 case Builtin::BI__builtin_log10f16:
645 case Builtin::BI__builtin_log10l:
646 case Builtin::BI__builtin_log10f128:
647 case Builtin::BI__builtin_elementwise_log10:
649 case Builtin::BIlog2:
650 case Builtin::BIlog2f:
651 case Builtin::BIlog2l:
652 case Builtin::BI__builtin_log2:
653 case Builtin::BI__builtin_log2f:
654 case Builtin::BI__builtin_log2f16:
655 case Builtin::BI__builtin_log2l:
656 case Builtin::BI__builtin_log2f128:
657 case Builtin::BI__builtin_elementwise_log2:
659 case Builtin::BInearbyint:
660 case Builtin::BInearbyintf:
661 case Builtin::BInearbyintl:
662 case Builtin::BI__builtin_nearbyint:
663 case Builtin::BI__builtin_nearbyintf:
664 case Builtin::BI__builtin_nearbyintl:
665 case Builtin::BI__builtin_nearbyintf128:
666 case Builtin::BI__builtin_elementwise_nearbyint:
668 case Builtin::BIpow:
669 case Builtin::BIpowf:
670 case Builtin::BIpowl:
671 case Builtin::BI__builtin_pow:
672 case Builtin::BI__builtin_powf:
673 case Builtin::BI__builtin_powf16:
674 case Builtin::BI__builtin_powl:
675 case Builtin::BI__builtin_powf128:
676 return RValue::get(
678 case Builtin::BI__builtin_elementwise_pow:
679 return RValue::getIgnored();
680 case Builtin::BIrint:
681 case Builtin::BIrintf:
682 case Builtin::BIrintl:
683 case Builtin::BI__builtin_rint:
684 case Builtin::BI__builtin_rintf:
685 case Builtin::BI__builtin_rintf16:
686 case Builtin::BI__builtin_rintl:
687 case Builtin::BI__builtin_rintf128:
688 case Builtin::BI__builtin_elementwise_rint:
690 case Builtin::BIround:
691 case Builtin::BIroundf:
692 case Builtin::BIroundl:
693 case Builtin::BI__builtin_round:
694 case Builtin::BI__builtin_roundf:
695 case Builtin::BI__builtin_roundf16:
696 case Builtin::BI__builtin_roundl:
697 case Builtin::BI__builtin_roundf128:
698 case Builtin::BI__builtin_elementwise_round:
700 case Builtin::BIroundeven:
701 case Builtin::BIroundevenf:
702 case Builtin::BIroundevenl:
703 case Builtin::BI__builtin_roundeven:
704 case Builtin::BI__builtin_roundevenf:
705 case Builtin::BI__builtin_roundevenf16:
706 case Builtin::BI__builtin_roundevenl:
707 case Builtin::BI__builtin_roundevenf128:
708 case Builtin::BI__builtin_elementwise_roundeven:
710 case Builtin::BIsin:
711 case Builtin::BIsinf:
712 case Builtin::BIsinl:
713 case Builtin::BI__builtin_sin:
714 case Builtin::BI__builtin_sinf:
715 case Builtin::BI__builtin_sinf16:
716 case Builtin::BI__builtin_sinl:
717 case Builtin::BI__builtin_sinf128:
718 case Builtin::BI__builtin_elementwise_sin:
720 case Builtin::BIsinh:
721 case Builtin::BIsinhf:
722 case Builtin::BIsinhl:
723 case Builtin::BI__builtin_sinh:
724 case Builtin::BI__builtin_sinhf:
725 case Builtin::BI__builtin_sinhf16:
726 case Builtin::BI__builtin_sinhl:
727 case Builtin::BI__builtin_sinhf128:
728 case Builtin::BI__builtin_elementwise_sinh:
729 case Builtin::BI__builtin_sincospi:
730 case Builtin::BI__builtin_sincospif:
731 case Builtin::BI__builtin_sincospil:
732 case Builtin::BIsincos:
733 case Builtin::BIsincosf:
734 case Builtin::BIsincosl:
735 case Builtin::BI__builtin_sincos:
736 case Builtin::BI__builtin_sincosf:
737 case Builtin::BI__builtin_sincosf16:
738 case Builtin::BI__builtin_sincosl:
739 case Builtin::BI__builtin_sincosf128:
740 return RValue::getIgnored();
741 case Builtin::BIsqrt:
742 case Builtin::BIsqrtf:
743 case Builtin::BIsqrtl:
744 case Builtin::BI__builtin_sqrt:
745 case Builtin::BI__builtin_sqrtf:
746 case Builtin::BI__builtin_sqrtf16:
747 case Builtin::BI__builtin_sqrtl:
748 case Builtin::BI__builtin_sqrtf128:
749 case Builtin::BI__builtin_elementwise_sqrt:
751 case Builtin::BItan:
752 case Builtin::BItanf:
753 case Builtin::BItanl:
754 case Builtin::BI__builtin_tan:
755 case Builtin::BI__builtin_tanf:
756 case Builtin::BI__builtin_tanf16:
757 case Builtin::BI__builtin_tanl:
758 case Builtin::BI__builtin_tanf128:
759 case Builtin::BI__builtin_elementwise_tan:
761 case Builtin::BItanh:
762 case Builtin::BItanhf:
763 case Builtin::BItanhl:
764 case Builtin::BI__builtin_tanh:
765 case Builtin::BI__builtin_tanhf:
766 case Builtin::BI__builtin_tanhf16:
767 case Builtin::BI__builtin_tanhl:
768 case Builtin::BI__builtin_tanhf128:
769 case Builtin::BI__builtin_elementwise_tanh:
770 return RValue::getIgnored();
771 case Builtin::BItrunc:
772 case Builtin::BItruncf:
773 case Builtin::BItruncl:
774 case Builtin::BI__builtin_trunc:
775 case Builtin::BI__builtin_truncf:
776 case Builtin::BI__builtin_truncf16:
777 case Builtin::BI__builtin_truncl:
778 case Builtin::BI__builtin_truncf128:
779 case Builtin::BI__builtin_elementwise_trunc:
781 case Builtin::BIlround:
782 case Builtin::BIlroundf:
783 case Builtin::BIlroundl:
784 case Builtin::BI__builtin_lround:
785 case Builtin::BI__builtin_lroundf:
786 case Builtin::BI__builtin_lroundl:
787 case Builtin::BI__builtin_lroundf128:
789 case Builtin::BIllround:
790 case Builtin::BIllroundf:
791 case Builtin::BIllroundl:
792 case Builtin::BI__builtin_llround:
793 case Builtin::BI__builtin_llroundf:
794 case Builtin::BI__builtin_llroundl:
795 case Builtin::BI__builtin_llroundf128:
797 case Builtin::BIlrint:
798 case Builtin::BIlrintf:
799 case Builtin::BIlrintl:
800 case Builtin::BI__builtin_lrint:
801 case Builtin::BI__builtin_lrintf:
802 case Builtin::BI__builtin_lrintl:
803 case Builtin::BI__builtin_lrintf128:
805 case Builtin::BIllrint:
806 case Builtin::BIllrintf:
807 case Builtin::BIllrintl:
808 case Builtin::BI__builtin_llrint:
809 case Builtin::BI__builtin_llrintf:
810 case Builtin::BI__builtin_llrintl:
811 case Builtin::BI__builtin_llrintf128:
813 case Builtin::BI__builtin_ldexp:
814 case Builtin::BI__builtin_ldexpf:
815 case Builtin::BI__builtin_ldexpl:
816 case Builtin::BI__builtin_ldexpf16:
817 case Builtin::BI__builtin_ldexpf128:
818 case Builtin::BI__builtin_elementwise_ldexp:
819 default:
820 break;
821 }
822
823 return RValue::getIgnored();
824}
825
826// FIXME: Remove cgf parameter when all descriptor kinds are implemented
827static mlir::Type
830 mlir::MLIRContext *context) {
831 using namespace llvm::Intrinsic;
832
833 IITDescriptor descriptor = infos.front();
834 infos = infos.slice(1);
835
836 switch (descriptor.Kind) {
837 case IITDescriptor::Void:
838 return cir::VoidType::get(context);
839 // If the intrinsic expects unsigned integers, the signedness is corrected in
840 // correctIntegerSignedness()
841 case IITDescriptor::Integer:
842 return cir::IntType::get(context, descriptor.IntegerWidth,
843 /*isSigned=*/true);
844 case IITDescriptor::Vector: {
845 mlir::Type elementType = decodeFixedType(cgf, infos, context);
846 unsigned numElements = descriptor.VectorWidth.getFixedValue();
847 return cir::VectorType::get(elementType, numElements);
848 }
849 case IITDescriptor::Pointer: {
850 mlir::Builder builder(context);
851 auto addrSpace = cir::TargetAddressSpaceAttr::get(
852 context, descriptor.PointerAddressSpace);
853 return cir::PointerType::get(cir::VoidType::get(context), addrSpace);
854 }
855 default:
856 cgf.cgm.errorNYI("Unimplemented intrinsic type descriptor");
857 return cir::VoidType::get(context);
858 }
859}
860
861/// Helper function to correct integer signedness for intrinsic arguments and
862/// return type. IIT always returns signed integers, but the actual intrinsic
863/// may expect unsigned integers based on the AST FunctionDecl parameter types.
864static mlir::Type correctIntegerSignedness(mlir::Type iitType, QualType astType,
865 mlir::MLIRContext *context) {
866 auto intTy = dyn_cast<cir::IntType>(iitType);
867 if (!intTy)
868 return iitType;
869
870 if (astType->isUnsignedIntegerType())
871 return cir::IntType::get(context, intTy.getWidth(), /*isSigned=*/false);
872
873 return iitType;
874}
875
876static mlir::Value getCorrectedPtr(mlir::Value argValue, mlir::Type expectedTy,
877 CIRGenBuilderTy &builder) {
878 auto ptrType = mlir::cast<cir::PointerType>(argValue.getType());
879
880 auto expectedPtrType = mlir::cast<cir::PointerType>(expectedTy);
881 assert(ptrType != expectedPtrType && "types should not match");
882
883 if (ptrType.getAddrSpace() != expectedPtrType.getAddrSpace()) {
885 "address space handling not yet implemented");
886 auto newPtrType = cir::PointerType::get(ptrType.getPointee(),
887 expectedPtrType.getAddrSpace());
888 return builder.createAddrSpaceCast(argValue, newPtrType);
889 }
890
891 return builder.createBitcast(argValue, expectedTy);
892}
893
894static cir::FuncType getIntrinsicType(CIRGenFunction &cgf,
895 mlir::MLIRContext *context,
896 llvm::Intrinsic::ID id) {
897 using namespace llvm::Intrinsic;
898
900 getIntrinsicInfoTableEntries(id, table);
901
902 ArrayRef<IITDescriptor> tableRef = table;
903 mlir::Type resultTy = decodeFixedType(cgf, tableRef, context);
904
906 bool isVarArg = false;
907 while (!tableRef.empty()) {
908 llvm::Intrinsic::IITDescriptor::IITDescriptorKind kind =
909 tableRef.front().Kind;
910 if (kind == IITDescriptor::VarArg) {
911 isVarArg = true;
912 break; // VarArg is last
913 }
914 argTypes.push_back(decodeFixedType(cgf, tableRef, context));
915 }
916
917 // CIR convention: no explicit void return type
918 if (isa<cir::VoidType>(resultTy))
919 return cir::FuncType::get(context, argTypes, /*optionalReturnType=*/nullptr,
920 isVarArg);
921
922 return cir::FuncType::get(context, argTypes, resultTy, isVarArg);
923}
924
926 const CallExpr *e,
928 mlir::Location loc = getLoc(e->getSourceRange());
929
930 // See if we can constant fold this builtin. If so, don't emit it at all.
931 // TODO: Extend this handling to all builtin calls that we can constant-fold.
932 // Do not constant-fold immediate (target-specific) builtins; their ASTs can
933 // trigger the constant evaluator in cases it cannot safely handle.
934 // Skip EvaluateAsRValue for those.
935 Expr::EvalResult result;
936 if (e->isPRValue() && !getContext().BuiltinInfo.isImmediate(builtinID) &&
937 e->EvaluateAsRValue(result, cgm.getASTContext()) &&
938 !result.hasSideEffects()) {
939 if (result.Val.isInt()) {
940 QualType type = e->getType();
941 if (type->isBooleanType())
942 return RValue::get(
943 builder.getBool(result.Val.getInt().getBoolValue(), loc));
944 return RValue::get(builder.getConstInt(loc, result.Val.getInt()));
945 }
946 if (result.Val.isFloat()) {
947 // Note: we are using result type of CallExpr to determine the type of
948 // the constant. Classic codegen uses the result value to determine the
949 // type. We feel it should be Ok to use expression type because it is
950 // hard to imagine a builtin function evaluates to a value that
951 // over/underflows its own defined type.
952 mlir::Type type = convertType(e->getType());
953 return RValue::get(builder.getConstFP(loc, type, result.Val.getFloat()));
954 }
955 }
956
957 const FunctionDecl *fd = gd.getDecl()->getAsFunction();
958
960
961 // If the builtin has been declared explicitly with an assembler label,
962 // disable the specialized emitting below. Ideally we should communicate the
963 // rename in IR, or at least avoid generating the intrinsic calls that are
964 // likely to get lowered to the renamed library functions.
965 unsigned builtinIDIfNoAsmLabel = fd->hasAttr<AsmLabelAttr>() ? 0 : builtinID;
966
967 bool generateFPMathIntrinsics =
968 shouldCIREmitFPMathIntrinsic(*this, e, builtinID);
969
970 if (generateFPMathIntrinsics) {
971 // Try to match the builtinID with a floating point math builtin.
972 RValue rv = tryEmitFPMathIntrinsic(*this, e, builtinIDIfNoAsmLabel);
973
974 // Return the result directly if a math intrinsic was generated.
975 if (!rv.isIgnored()) {
976 return rv;
977 }
978 }
979
981
982 switch (builtinIDIfNoAsmLabel) {
983 default:
984 break;
985
986 // C stdarg builtins.
987 case Builtin::BI__builtin_stdarg_start:
988 case Builtin::BI__builtin_va_start:
989 case Builtin::BI__va_start: {
990 mlir::Value vaList = builtinID == Builtin::BI__va_start
991 ? emitScalarExpr(e->getArg(0))
993 emitVAStart(vaList);
994 return {};
995 }
996
997 case Builtin::BI__builtin_va_end:
999 return {};
1000 case Builtin::BI__builtin_va_copy: {
1001 mlir::Value dstPtr = emitVAListRef(e->getArg(0)).getPointer();
1002 mlir::Value srcPtr = emitVAListRef(e->getArg(1)).getPointer();
1003 cir::VACopyOp::create(builder, dstPtr.getLoc(), dstPtr, srcPtr);
1004 return {};
1005 }
1006
1007 case Builtin::BIabs:
1008 case Builtin::BIlabs:
1009 case Builtin::BIllabs:
1010 case Builtin::BI__builtin_abs:
1011 case Builtin::BI__builtin_labs:
1012 case Builtin::BI__builtin_llabs: {
1013 bool sanitizeOverflow = sanOpts.has(SanitizerKind::SignedIntegerOverflow);
1014 mlir::Value arg = emitScalarExpr(e->getArg(0));
1015 mlir::Value result;
1016 switch (getLangOpts().getSignedOverflowBehavior()) {
1018 result = cir::AbsOp::create(builder, loc, arg.getType(), arg,
1019 /*minIsPoison=*/false);
1020 break;
1022 if (!sanitizeOverflow) {
1023 result = cir::AbsOp::create(builder, loc, arg.getType(), arg,
1024 /*minIsPoison=*/true);
1025 break;
1026 }
1027 [[fallthrough]];
1029 cgm.errorNYI(e->getSourceRange(), "abs with overflow handling");
1030 return RValue::get(nullptr);
1031 }
1032 return RValue::get(result);
1033 }
1034
1035 case Builtin::BI__assume:
1036 case Builtin::BI__builtin_assume: {
1037 if (e->getArg(0)->HasSideEffects(getContext()))
1038 return RValue::get(nullptr);
1039
1040 mlir::Value argValue = emitCheckedArgForAssume(e->getArg(0));
1041 cir::AssumeOp::create(builder, loc, argValue);
1042 return RValue::get(nullptr);
1043 }
1044
1045 case Builtin::BI__builtin_assume_separate_storage: {
1046 mlir::Value value0 = emitScalarExpr(e->getArg(0));
1047 mlir::Value value1 = emitScalarExpr(e->getArg(1));
1048 cir::AssumeSepStorageOp::create(builder, loc, value0, value1);
1049 return RValue::get(nullptr);
1050 }
1051
1052 case Builtin::BI__builtin_assume_aligned: {
1053 const Expr *ptrExpr = e->getArg(0);
1054 mlir::Value ptrValue = emitScalarExpr(ptrExpr);
1055 mlir::Value offsetValue =
1056 (e->getNumArgs() > 2) ? emitScalarExpr(e->getArg(2)) : nullptr;
1057
1058 std::optional<llvm::APSInt> alignment =
1060 assert(alignment.has_value() &&
1061 "the second argument to __builtin_assume_aligned must be an "
1062 "integral constant expression");
1063
1064 mlir::Value result =
1065 emitAlignmentAssumption(ptrValue, ptrExpr, ptrExpr->getExprLoc(),
1066 alignment->getSExtValue(), offsetValue);
1067 return RValue::get(result);
1068 }
1069
1070 case Builtin::BI__builtin_complex: {
1071 mlir::Value real = emitScalarExpr(e->getArg(0));
1072 mlir::Value imag = emitScalarExpr(e->getArg(1));
1073 mlir::Value complex = builder.createComplexCreate(loc, real, imag);
1074 return RValue::getComplex(complex);
1075 }
1076
1077 case Builtin::BI__builtin_creal:
1078 case Builtin::BI__builtin_crealf:
1079 case Builtin::BI__builtin_creall:
1080 case Builtin::BIcreal:
1081 case Builtin::BIcrealf:
1082 case Builtin::BIcreall: {
1083 mlir::Value complex = emitComplexExpr(e->getArg(0));
1084 mlir::Value real = builder.createComplexReal(loc, complex);
1085 return RValue::get(real);
1086 }
1087
1088 case Builtin::BI__builtin_cimag:
1089 case Builtin::BI__builtin_cimagf:
1090 case Builtin::BI__builtin_cimagl:
1091 case Builtin::BIcimag:
1092 case Builtin::BIcimagf:
1093 case Builtin::BIcimagl: {
1094 mlir::Value complex = emitComplexExpr(e->getArg(0));
1095 mlir::Value imag = builder.createComplexImag(loc, complex);
1096 return RValue::get(imag);
1097 }
1098
1099 case Builtin::BI__builtin_conj:
1100 case Builtin::BI__builtin_conjf:
1101 case Builtin::BI__builtin_conjl:
1102 case Builtin::BIconj:
1103 case Builtin::BIconjf:
1104 case Builtin::BIconjl: {
1105 mlir::Value complex = emitComplexExpr(e->getArg(0));
1106 mlir::Value conj = builder.createNot(complex);
1107 return RValue::getComplex(conj);
1108 }
1109
1110 case Builtin::BI__builtin_clrsb:
1111 case Builtin::BI__builtin_clrsbl:
1112 case Builtin::BI__builtin_clrsbll:
1113 return emitBuiltinBitOp<cir::BitClrsbOp>(*this, e);
1114
1115 case Builtin::BI__builtin_ctzs:
1116 case Builtin::BI__builtin_ctz:
1117 case Builtin::BI__builtin_ctzl:
1118 case Builtin::BI__builtin_ctzll:
1119 case Builtin::BI__builtin_ctzg:
1121 return emitBuiltinBitOp<cir::BitCtzOp>(*this, e, /*poisonZero=*/true);
1122
1123 case Builtin::BI__builtin_clzs:
1124 case Builtin::BI__builtin_clz:
1125 case Builtin::BI__builtin_clzl:
1126 case Builtin::BI__builtin_clzll:
1127 case Builtin::BI__builtin_clzg:
1129 return emitBuiltinBitOp<cir::BitClzOp>(*this, e, /*poisonZero=*/true);
1130
1131 case Builtin::BI__builtin_ffs:
1132 case Builtin::BI__builtin_ffsl:
1133 case Builtin::BI__builtin_ffsll:
1134 return emitBuiltinBitOp<cir::BitFfsOp>(*this, e);
1135
1136 case Builtin::BI__builtin_parity:
1137 case Builtin::BI__builtin_parityl:
1138 case Builtin::BI__builtin_parityll:
1139 return emitBuiltinBitOp<cir::BitParityOp>(*this, e);
1140
1141 case Builtin::BI__lzcnt16:
1142 case Builtin::BI__lzcnt:
1143 case Builtin::BI__lzcnt64:
1145 return emitBuiltinBitOp<cir::BitClzOp>(*this, e, /*poisonZero=*/false);
1146
1147 case Builtin::BI__popcnt16:
1148 case Builtin::BI__popcnt:
1149 case Builtin::BI__popcnt64:
1150 case Builtin::BI__builtin_popcount:
1151 case Builtin::BI__builtin_popcountl:
1152 case Builtin::BI__builtin_popcountll:
1153 case Builtin::BI__builtin_popcountg:
1154 return emitBuiltinBitOp<cir::BitPopcountOp>(*this, e);
1155
1156 // Always return the argument of __builtin_unpredictable. LLVM does not
1157 // have an intrinsic corresponding to this builtin. Metadata for this
1158 // builtin should be added directly to instructions such as branches or
1159 // switches that use it.
1160 case Builtin::BI__builtin_unpredictable: {
1161 return RValue::get(emitScalarExpr(e->getArg(0)));
1162 }
1163
1164 case Builtin::BI__builtin_expect:
1165 case Builtin::BI__builtin_expect_with_probability: {
1166 mlir::Value argValue = emitScalarExpr(e->getArg(0));
1167 if (cgm.getCodeGenOpts().OptimizationLevel == 0)
1168 return RValue::get(argValue);
1169
1170 mlir::Value expectedValue = emitScalarExpr(e->getArg(1));
1171
1172 mlir::FloatAttr probAttr;
1173 if (builtinIDIfNoAsmLabel == Builtin::BI__builtin_expect_with_probability) {
1174 llvm::APFloat probability(0.0);
1175 const Expr *probArg = e->getArg(2);
1176 [[maybe_unused]] bool evalSucceeded =
1177 probArg->EvaluateAsFloat(probability, cgm.getASTContext());
1178 assert(evalSucceeded &&
1179 "probability should be able to evaluate as float");
1180 bool loseInfo = false; // ignored
1181 probability.convert(llvm::APFloat::IEEEdouble(),
1182 llvm::RoundingMode::Dynamic, &loseInfo);
1183 probAttr = mlir::FloatAttr::get(mlir::Float64Type::get(&getMLIRContext()),
1184 probability);
1185 }
1186
1187 auto result = cir::ExpectOp::create(builder, loc, argValue.getType(),
1188 argValue, expectedValue, probAttr);
1189 return RValue::get(result);
1190 }
1191
1192 case Builtin::BI__builtin_bswap16:
1193 case Builtin::BI__builtin_bswap32:
1194 case Builtin::BI__builtin_bswap64:
1195 case Builtin::BI_byteswap_ushort:
1196 case Builtin::BI_byteswap_ulong:
1197 case Builtin::BI_byteswap_uint64: {
1198 mlir::Value arg = emitScalarExpr(e->getArg(0));
1199 return RValue::get(cir::ByteSwapOp::create(builder, loc, arg));
1200 }
1201
1202 case Builtin::BI__builtin_bitreverse8:
1203 case Builtin::BI__builtin_bitreverse16:
1204 case Builtin::BI__builtin_bitreverse32:
1205 case Builtin::BI__builtin_bitreverse64: {
1206 mlir::Value arg = emitScalarExpr(e->getArg(0));
1207 return RValue::get(cir::BitReverseOp::create(builder, loc, arg));
1208 }
1209
1210 case Builtin::BI__builtin_rotateleft8:
1211 case Builtin::BI__builtin_rotateleft16:
1212 case Builtin::BI__builtin_rotateleft32:
1213 case Builtin::BI__builtin_rotateleft64:
1214 return emitRotate(e, /*isRotateLeft=*/true);
1215
1216 case Builtin::BI__builtin_rotateright8:
1217 case Builtin::BI__builtin_rotateright16:
1218 case Builtin::BI__builtin_rotateright32:
1219 case Builtin::BI__builtin_rotateright64:
1220 return emitRotate(e, /*isRotateLeft=*/false);
1221
1222 case Builtin::BI__builtin_coro_id:
1223 case Builtin::BI__builtin_coro_promise:
1224 case Builtin::BI__builtin_coro_resume:
1225 case Builtin::BI__builtin_coro_noop:
1226 case Builtin::BI__builtin_coro_destroy:
1227 case Builtin::BI__builtin_coro_done:
1228 case Builtin::BI__builtin_coro_alloc:
1229 case Builtin::BI__builtin_coro_begin:
1230 case Builtin::BI__builtin_coro_end:
1231 case Builtin::BI__builtin_coro_suspend:
1232 case Builtin::BI__builtin_coro_align:
1233 cgm.errorNYI(e->getSourceRange(), "BI__builtin_coro_id like NYI");
1234 return getUndefRValue(e->getType());
1235
1236 case Builtin::BI__builtin_coro_frame: {
1237 return emitCoroutineFrame();
1238 }
1239 case Builtin::BI__builtin_coro_free:
1240 case Builtin::BI__builtin_coro_size: {
1241 GlobalDecl gd{fd};
1242 mlir::Type ty = cgm.getTypes().getFunctionType(
1243 cgm.getTypes().arrangeGlobalDeclaration(gd));
1244 const auto *nd = cast<NamedDecl>(gd.getDecl());
1245 cir::FuncOp fnOp =
1246 cgm.getOrCreateCIRFunction(nd->getName(), ty, gd, /*ForVTable=*/false);
1247 fnOp.setBuiltin(true);
1248 return emitCall(e->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), e,
1249 returnValue);
1250 }
1251
1252 case Builtin::BI__builtin_constant_p: {
1253 mlir::Type resultType = convertType(e->getType());
1254
1255 const Expr *arg = e->getArg(0);
1256 QualType argType = arg->getType();
1257 // FIXME: The allowance for Obj-C pointers and block pointers is historical
1258 // and likely a mistake.
1259 if (!argType->isIntegralOrEnumerationType() && !argType->isFloatingType() &&
1260 !argType->isObjCObjectPointerType() && !argType->isBlockPointerType()) {
1261 // Per the GCC documentation, only numeric constants are recognized after
1262 // inlining.
1263 return RValue::get(
1264 builder.getConstInt(getLoc(e->getSourceRange()),
1265 mlir::cast<cir::IntType>(resultType), 0));
1266 }
1267
1268 if (arg->HasSideEffects(getContext())) {
1269 // The argument is unevaluated, so be conservative if it might have
1270 // side-effects.
1271 return RValue::get(
1272 builder.getConstInt(getLoc(e->getSourceRange()),
1273 mlir::cast<cir::IntType>(resultType), 0));
1274 }
1275
1276 mlir::Value argValue = emitScalarExpr(arg);
1277 if (argType->isObjCObjectPointerType()) {
1278 cgm.errorNYI(e->getSourceRange(),
1279 "__builtin_constant_p: Obj-C object pointer");
1280 return {};
1281 }
1282 argValue = builder.createBitcast(argValue, convertType(argType));
1283
1284 mlir::Value result = cir::IsConstantOp::create(
1285 builder, getLoc(e->getSourceRange()), argValue);
1286 // IsConstantOp returns a bool, but __builtin_constant_p returns an int.
1287 result = builder.createBoolToInt(result, resultType);
1288 return RValue::get(result);
1289 }
1290 case Builtin::BI__builtin_dynamic_object_size:
1291 case Builtin::BI__builtin_object_size: {
1292 unsigned type =
1293 e->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
1294 auto resType = mlir::cast<cir::IntType>(convertType(e->getType()));
1295
1296 // We pass this builtin onto the optimizer so that it can figure out the
1297 // object size in more complex cases.
1298 bool isDynamic = builtinID == Builtin::BI__builtin_dynamic_object_size;
1299 return RValue::get(emitBuiltinObjectSize(e->getArg(0), type, resType,
1300 /*EmittedE=*/nullptr, isDynamic));
1301 }
1302
1303 case Builtin::BI__builtin_prefetch: {
1304 auto evaluateOperandAsInt = [&](const Expr *arg) {
1305 Expr::EvalResult res;
1306 [[maybe_unused]] bool evalSucceed =
1307 arg->EvaluateAsInt(res, cgm.getASTContext());
1308 assert(evalSucceed && "expression should be able to evaluate as int");
1309 return res.Val.getInt().getZExtValue();
1310 };
1311
1312 bool isWrite = false;
1313 if (e->getNumArgs() > 1)
1314 isWrite = evaluateOperandAsInt(e->getArg(1));
1315
1316 int locality = 3;
1317 if (e->getNumArgs() > 2)
1318 locality = evaluateOperandAsInt(e->getArg(2));
1319
1320 mlir::Value address = emitScalarExpr(e->getArg(0));
1321 cir::PrefetchOp::create(builder, loc, address, locality, isWrite);
1322 return RValue::get(nullptr);
1323 }
1324 case Builtin::BI__builtin_readcyclecounter:
1325 case Builtin::BI__builtin_readsteadycounter:
1326 return errorBuiltinNYI(*this, e, builtinID);
1327 case Builtin::BI__builtin___clear_cache: {
1328 mlir::Value begin =
1329 builder.createPtrBitcast(emitScalarExpr(e->getArg(0)), cgm.voidTy);
1330 mlir::Value end =
1331 builder.createPtrBitcast(emitScalarExpr(e->getArg(1)), cgm.voidTy);
1332 cir::ClearCacheOp::create(builder, getLoc(e->getSourceRange()), begin, end);
1333 return RValue::get(nullptr);
1334 }
1335 case Builtin::BI__builtin_trap:
1336 emitTrap(loc, /*createNewBlock=*/true);
1337 return RValue::getIgnored();
1338 case Builtin::BI__builtin_verbose_trap:
1340 emitTrap(loc, /*createNewBlock=*/true);
1341 return RValue::getIgnored();
1342 case Builtin::BI__debugbreak:
1343 return errorBuiltinNYI(*this, e, builtinID);
1344 case Builtin::BI__builtin_unreachable:
1345 emitUnreachable(e->getExprLoc(), /*createNewBlock=*/true);
1346 return RValue::getIgnored();
1347 case Builtin::BI__builtin_powi:
1348 case Builtin::BI__builtin_powif:
1349 case Builtin::BI__builtin_powil:
1350 case Builtin::BI__builtin_frexpl:
1351 case Builtin::BI__builtin_frexp:
1352 case Builtin::BI__builtin_frexpf:
1353 case Builtin::BI__builtin_frexpf128:
1354 case Builtin::BI__builtin_frexpf16:
1355 case Builtin::BImodf:
1356 case Builtin::BImodff:
1357 case Builtin::BImodfl:
1358 case Builtin::BI__builtin_modf:
1359 case Builtin::BI__builtin_modff:
1360 case Builtin::BI__builtin_modfl:
1361 case Builtin::BI__builtin_isgreater:
1362 case Builtin::BI__builtin_isgreaterequal:
1363 case Builtin::BI__builtin_isless:
1364 case Builtin::BI__builtin_islessequal:
1365 case Builtin::BI__builtin_islessgreater:
1366 case Builtin::BI__builtin_isunordered:
1367 // From https://clang.llvm.org/docs/LanguageExtensions.html#builtin-isfpclass
1368 //
1369 // The `__builtin_isfpclass()` builtin is a generalization of functions
1370 // isnan, isinf, isfinite and some others defined by the C standard. It tests
1371 // if the floating-point value, specified by the first argument, falls into
1372 // any of data classes, specified by the second argument.
1373 case Builtin::BI__builtin_isnan: {
1374 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1375 mlir::Value v = emitScalarExpr(e->getArg(0));
1377 mlir::Location loc = getLoc(e->getBeginLoc());
1378 return RValue::get(builder.createBoolToInt(
1379 builder.createIsFPClass(loc, v, cir::FPClassTest::Nan),
1380 convertType(e->getType())));
1381 }
1382
1383 case Builtin::BI__builtin_issignaling: {
1384 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1385 mlir::Value v = emitScalarExpr(e->getArg(0));
1386 mlir::Location loc = getLoc(e->getBeginLoc());
1387 return RValue::get(builder.createBoolToInt(
1388 builder.createIsFPClass(loc, v, cir::FPClassTest::SignalingNaN),
1389 convertType(e->getType())));
1390 }
1391
1392 case Builtin::BI__builtin_isinf: {
1393 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1394 mlir::Value v = emitScalarExpr(e->getArg(0));
1396 mlir::Location loc = getLoc(e->getBeginLoc());
1397 return RValue::get(builder.createBoolToInt(
1398 builder.createIsFPClass(loc, v, cir::FPClassTest::Infinity),
1399 convertType(e->getType())));
1400 }
1401 case Builtin::BIfinite:
1402 case Builtin::BI__finite:
1403 case Builtin::BIfinitef:
1404 case Builtin::BI__finitef:
1405 case Builtin::BIfinitel:
1406 case Builtin::BI__finitel:
1407 case Builtin::BI__builtin_isfinite: {
1408 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1409 mlir::Value v = emitScalarExpr(e->getArg(0));
1411 mlir::Location loc = getLoc(e->getBeginLoc());
1412 return RValue::get(builder.createBoolToInt(
1413 builder.createIsFPClass(loc, v, cir::FPClassTest::Finite),
1414 convertType(e->getType())));
1415 }
1416
1417 case Builtin::BI__builtin_isnormal: {
1418 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1419 mlir::Value v = emitScalarExpr(e->getArg(0));
1420 mlir::Location loc = getLoc(e->getBeginLoc());
1421 return RValue::get(builder.createBoolToInt(
1422 builder.createIsFPClass(loc, v, cir::FPClassTest::Normal),
1423 convertType(e->getType())));
1424 }
1425
1426 case Builtin::BI__builtin_issubnormal: {
1427 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1428 mlir::Value v = emitScalarExpr(e->getArg(0));
1429 mlir::Location loc = getLoc(e->getBeginLoc());
1430 return RValue::get(builder.createBoolToInt(
1431 builder.createIsFPClass(loc, v, cir::FPClassTest::Subnormal),
1432 convertType(e->getType())));
1433 }
1434
1435 case Builtin::BI__builtin_iszero: {
1436 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1437 mlir::Value v = emitScalarExpr(e->getArg(0));
1438 mlir::Location loc = getLoc(e->getBeginLoc());
1439 return RValue::get(builder.createBoolToInt(
1440 builder.createIsFPClass(loc, v, cir::FPClassTest::Zero),
1441 convertType(e->getType())));
1442 }
1443 case Builtin::BI__builtin_isfpclass: {
1444 Expr::EvalResult result;
1445 if (!e->getArg(1)->EvaluateAsInt(result, cgm.getASTContext()))
1446 break;
1447
1448 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1449 mlir::Value v = emitScalarExpr(e->getArg(0));
1450 uint64_t test = result.Val.getInt().getLimitedValue();
1451 mlir::Location loc = getLoc(e->getBeginLoc());
1452 //
1453 return RValue::get(builder.createBoolToInt(
1454 builder.createIsFPClass(loc, v, cir::FPClassTest(test)),
1455 convertType(e->getType())));
1456 }
1457 case Builtin::BI__builtin_nondeterministic_value:
1458 return errorBuiltinNYI(*this, e, builtinID);
1459 case Builtin::BI__builtin_elementwise_abs: {
1460 mlir::Type cirTy = convertType(e->getArg(0)->getType());
1461 bool isIntTy = cir::isIntOrVectorOfIntType(cirTy);
1462 if (!isIntTy)
1463 return emitUnaryFPBuiltin<cir::FAbsOp>(*this, *e);
1464 mlir::Value arg = emitScalarExpr(e->getArg(0));
1465 mlir::Value result = cir::AbsOp::create(builder, getLoc(e->getExprLoc()),
1466 arg.getType(), arg, false);
1467 return RValue::get(result);
1468 }
1469 case Builtin::BI__builtin_elementwise_acos:
1471 case Builtin::BI__builtin_elementwise_asin:
1473 case Builtin::BI__builtin_elementwise_atan:
1475 case Builtin::BI__builtin_elementwise_atan2:
1476 return RValue::get(
1478 case Builtin::BI__builtin_elementwise_exp:
1480 case Builtin::BI__builtin_elementwise_exp2:
1482 case Builtin::BI__builtin_elementwise_log:
1484 case Builtin::BI__builtin_elementwise_log2:
1486 case Builtin::BI__builtin_elementwise_log10:
1488 case Builtin::BI__builtin_elementwise_cos:
1490 case Builtin::BI__builtin_elementwise_floor:
1492 case Builtin::BI__builtin_elementwise_round:
1494 case Builtin::BI__builtin_elementwise_rint:
1496 case Builtin::BI__builtin_elementwise_nearbyint:
1498 case Builtin::BI__builtin_elementwise_sin:
1500 case Builtin::BI__builtin_elementwise_sqrt:
1502 case Builtin::BI__builtin_elementwise_tan:
1504 case Builtin::BI__builtin_elementwise_trunc:
1506 case Builtin::BI__builtin_elementwise_fmod:
1507 return RValue::get(
1509 case Builtin::BI__builtin_elementwise_ceil:
1510 case Builtin::BI__builtin_elementwise_exp10:
1511 case Builtin::BI__builtin_elementwise_ldexp:
1512 case Builtin::BI__builtin_elementwise_pow:
1513 case Builtin::BI__builtin_elementwise_bitreverse:
1514 case Builtin::BI__builtin_elementwise_cosh:
1515 case Builtin::BI__builtin_elementwise_popcount:
1516 case Builtin::BI__builtin_elementwise_roundeven:
1517 case Builtin::BI__builtin_elementwise_sinh:
1518 case Builtin::BI__builtin_elementwise_tanh:
1519 case Builtin::BI__builtin_elementwise_canonicalize:
1520 case Builtin::BI__builtin_elementwise_copysign:
1521 case Builtin::BI__builtin_elementwise_fma:
1522 return errorBuiltinNYI(*this, e, builtinID);
1523 case Builtin::BI__builtin_elementwise_fshl: {
1524 mlir::Location loc = getLoc(e->getExprLoc());
1525 mlir::Value a = emitScalarExpr(e->getArg(0));
1526 mlir::Value b = emitScalarExpr(e->getArg(1));
1527 mlir::Value c = emitScalarExpr(e->getArg(2));
1528 return RValue::get(builder.emitIntrinsicCallOp(loc, "fshl", a.getType(),
1529 mlir::ValueRange{a, b, c}));
1530 }
1531 case Builtin::BI__builtin_elementwise_fshr: {
1532 mlir::Location loc = getLoc(e->getExprLoc());
1533 mlir::Value a = emitScalarExpr(e->getArg(0));
1534 mlir::Value b = emitScalarExpr(e->getArg(1));
1535 mlir::Value c = emitScalarExpr(e->getArg(2));
1536 return RValue::get(builder.emitIntrinsicCallOp(loc, "fshr", a.getType(),
1537 mlir::ValueRange{a, b, c}));
1538 }
1539 case Builtin::BI__builtin_elementwise_add_sat:
1540 case Builtin::BI__builtin_elementwise_sub_sat:
1541 case Builtin::BI__builtin_elementwise_max:
1542 case Builtin::BI__builtin_elementwise_min:
1543 case Builtin::BI__builtin_elementwise_maxnum:
1544 case Builtin::BI__builtin_elementwise_minnum:
1545 case Builtin::BI__builtin_elementwise_maximum:
1546 case Builtin::BI__builtin_elementwise_minimum:
1547 case Builtin::BI__builtin_elementwise_maximumnum:
1548 case Builtin::BI__builtin_elementwise_minimumnum:
1549 case Builtin::BI__builtin_reduce_max:
1550 case Builtin::BI__builtin_reduce_min:
1551 case Builtin::BI__builtin_reduce_add:
1552 case Builtin::BI__builtin_reduce_mul:
1553 case Builtin::BI__builtin_reduce_xor:
1554 case Builtin::BI__builtin_reduce_or:
1555 case Builtin::BI__builtin_reduce_and:
1556 case Builtin::BI__builtin_reduce_assoc_fadd:
1557 case Builtin::BI__builtin_reduce_in_order_fadd:
1558 case Builtin::BI__builtin_reduce_maximum:
1559 case Builtin::BI__builtin_reduce_minimum:
1560 case Builtin::BI__builtin_matrix_transpose:
1561 case Builtin::BI__builtin_matrix_column_major_load:
1562 case Builtin::BI__builtin_matrix_column_major_store:
1563 case Builtin::BI__builtin_masked_load:
1564 case Builtin::BI__builtin_masked_expand_load:
1565 case Builtin::BI__builtin_masked_gather:
1566 case Builtin::BI__builtin_masked_store:
1567 case Builtin::BI__builtin_masked_compress_store:
1568 case Builtin::BI__builtin_masked_scatter:
1569 return errorBuiltinNYI(*this, e, builtinID);
1570 case Builtin::BI__builtin_isinf_sign: {
1571 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1572 mlir::Location loc = getLoc(e->getBeginLoc());
1573 mlir::Value arg = emitScalarExpr(e->getArg(0));
1574 mlir::Value isInf =
1575 builder.createIsFPClass(loc, arg, cir::FPClassTest::Infinity);
1576 mlir::Value isNeg = emitSignBit(loc, *this, arg);
1577 mlir::Type intTy = convertType(e->getType());
1578 cir::ConstantOp zero = builder.getNullValue(intTy, loc);
1579 cir::ConstantOp one = builder.getConstant(loc, cir::IntAttr::get(intTy, 1));
1580 cir::ConstantOp negativeOne =
1581 builder.getConstant(loc, cir::IntAttr::get(intTy, -1));
1582 mlir::Value signResult = builder.createSelect(loc, isNeg, negativeOne, one);
1583 mlir::Value result = builder.createSelect(loc, isInf, signResult, zero);
1584 return RValue::get(result);
1585 }
1586 case Builtin::BI__builtin_flt_rounds: {
1587 mlir::Location loc = getLoc(e->getExprLoc());
1588 mlir::Type resultType = convertType(e->getType());
1589 mlir::Value result =
1590 builder.emitIntrinsicCallOp(loc, "get.rounding", resultType);
1591 if (result.getType() != resultType)
1592 result =
1593 builder.createCast(loc, cir::CastKind::integral, result, resultType);
1594 return RValue::get(result);
1595 }
1596 case Builtin::BI__builtin_set_flt_rounds: {
1597 mlir::Location loc = getLoc(e->getExprLoc());
1598 mlir::Value v = emitScalarExpr(e->getArg(0));
1599 builder.emitIntrinsicCallOp(loc, "set.rounding", builder.getVoidTy(),
1600 mlir::ValueRange{v});
1601 return RValue::get(nullptr);
1602 }
1603 case Builtin::BI__builtin_fpclassify: {
1604 CIRGenFunction::CIRGenFPOptionsRAII fPOptsRAII(*this, e);
1605 mlir::Location loc = getLoc(e->getBeginLoc());
1606 mlir::Value value = emitScalarExpr(e->getArg(5));
1607 mlir::Type resultTy = convertType(e->getType());
1608 // if isZero then
1609 // result = FP_ZERO
1610 // elseif isNan then
1611 // result = FP_NAN
1612 // elseif isInfinity then
1613 // result = FP_INFINITE
1614 // elseif isNormal then
1615 // result = FP_NORMAL
1616 // else
1617 // result = FP_SUBNORMAL
1618 auto isZero =
1619 cir::IsFPClassOp::create(builder, loc, value, cir::FPClassTest::Zero);
1620 mlir::Value result =
1621 cir::TernaryOp::create(
1622 builder, loc, isZero,
1623 /*thenBuilder=*/
1624 [&](mlir::OpBuilder &opBuilder, mlir::Location location) {
1625 mlir::Value zeroLiteral = emitScalarExpr(e->getArg(4));
1626 cir::YieldOp::create(opBuilder, location, zeroLiteral);
1627 },
1628 /*elseBuilder=*/
1629 [&](mlir::OpBuilder &opBuilder, mlir::Location location) {
1630 auto isNan = cir::IsFPClassOp::create(opBuilder, location, value,
1631 cir::FPClassTest::Nan);
1632 mlir::Value nanResult =
1633 cir::TernaryOp::create(
1634 opBuilder, location, isNan,
1635 /*thenBuilder=*/
1636 [&](mlir::OpBuilder &opBuilder, mlir::Location location) {
1637 mlir::Value nanLiteral = emitScalarExpr(e->getArg(0));
1638 cir::YieldOp::create(opBuilder, location, nanLiteral);
1639 },
1640 /*elseBuilder=*/
1641 [&](mlir::OpBuilder &opBuilder, mlir::Location location) {
1642 auto isInfinity = cir::IsFPClassOp::create(
1643 opBuilder, location, value,
1644 cir::FPClassTest::Infinity);
1645 mlir::Value infResult =
1646 cir::TernaryOp::create(
1647 opBuilder, location, isInfinity,
1648 /*thenBuilder=*/
1649 [&](mlir::OpBuilder &opBuilder,
1650 mlir::Location location) {
1651 mlir::Value infinityLiteral =
1652 emitScalarExpr(e->getArg(1));
1653 cir::YieldOp::create(opBuilder, location,
1654 infinityLiteral);
1655 },
1656 /*elseBuilder=*/
1657 [&](mlir::OpBuilder &opBuilder,
1658 mlir::Location location) {
1659 auto isNormal = cir::IsFPClassOp::create(
1660 opBuilder, location, value,
1661 cir::FPClassTest::Normal);
1662 mlir::Value fpNormal =
1663 emitScalarExpr(e->getArg(2));
1664 mlir::Value fpSubnormal =
1665 emitScalarExpr(e->getArg(3));
1666 mlir::Value returnValue =
1667 cir::SelectOp::create(
1668 opBuilder, location, resultTy,
1669 isNormal, fpNormal, fpSubnormal);
1670 cir::YieldOp::create(opBuilder, location,
1671 returnValue);
1672 })
1673 .getResult();
1674 cir::YieldOp::create(opBuilder, location, infResult);
1675 })
1676 .getResult();
1677 cir::YieldOp::create(opBuilder, location, nanResult);
1678 })
1679 .getResult();
1680 return RValue::get(result);
1681 }
1682 case Builtin::BIalloca:
1683 case Builtin::BI_alloca:
1684 case Builtin::BI__builtin_alloca_uninitialized:
1685 case Builtin::BI__builtin_alloca:
1686 return emitBuiltinAlloca(*this, e, builtinID);
1687 case Builtin::BI__builtin_alloca_with_align_uninitialized:
1688 case Builtin::BI__builtin_alloca_with_align:
1689 case Builtin::BI__builtin_infer_alloc_token:
1690 return errorBuiltinNYI(*this, e, builtinID);
1691 case Builtin::BIbzero:
1692 case Builtin::BI__builtin_bzero: {
1693 mlir::Location loc = getLoc(e->getSourceRange());
1694 Address destPtr = emitPointerWithAlignment(e->getArg(0));
1695 Address destPtrCast = destPtr.withElementType(builder, cgm.voidTy);
1696 mlir::Value size = emitScalarExpr(e->getArg(1));
1697 mlir::Value zero = builder.getNullValue(builder.getUInt8Ty(), loc);
1699 builder.createMemSet(loc, destPtrCast, zero, size);
1701 return RValue::getIgnored();
1702 }
1703 case Builtin::BIbcopy:
1704 case Builtin::BI__builtin_bcopy: {
1707 mlir::Value sizeVal = emitScalarExpr(e->getArg(2));
1709 e->getArg(0)->getExprLoc(), fd, 0);
1711 e->getArg(1)->getExprLoc(), fd, 0);
1712 builder.createMemMove(getLoc(e->getSourceRange()), dest.getPointer(),
1713 src.getPointer(), sizeVal);
1714 return RValue::get(nullptr);
1715 }
1716 case Builtin::BI__builtin_char_memchr:
1717 case Builtin::BI__builtin_memchr: {
1718 Address srcPtr = emitPointerWithAlignment(e->getArg(0));
1719 mlir::Value src =
1720 builder.createBitcast(srcPtr.getPointer(), builder.getVoidPtrTy());
1721 mlir::Value pattern = emitScalarExpr(e->getArg(1));
1722 mlir::Value len = emitScalarExpr(e->getArg(2));
1723 mlir::Value res = cir::MemChrOp::create(builder, getLoc(e->getExprLoc()),
1724 src, pattern, len);
1725 return RValue::get(res);
1726 }
1727 case Builtin::BImemcpy:
1728 case Builtin::BI__builtin_memcpy:
1729 case Builtin::BImempcpy:
1730 case Builtin::BI__builtin_mempcpy:
1731 case Builtin::BI__builtin_memcpy_inline:
1732 case Builtin::BI__builtin___memcpy_chk:
1733 case Builtin::BI__builtin_objc_memmove_collectable:
1734 case Builtin::BI__builtin___memmove_chk:
1735 case Builtin::BI__builtin_trivially_relocate:
1736 case Builtin::BImemmove:
1737 case Builtin::BI__builtin_memmove:
1738 case Builtin::BImemset:
1739 case Builtin::BI__builtin_memset:
1740 case Builtin::BI__builtin_memset_inline:
1741 case Builtin::BI__builtin___memset_chk:
1742 case Builtin::BI__builtin_wmemchr:
1743 case Builtin::BI__builtin_wmemcmp:
1744 break; // Handled as library calls below.
1745 case Builtin::BI__builtin_dwarf_cfa:
1746 return errorBuiltinNYI(*this, e, builtinID);
1747 case Builtin::BI__builtin_return_address: {
1748 llvm::APSInt level = e->getArg(0)->EvaluateKnownConstInt(getContext());
1749 return RValue::get(cir::ReturnAddrOp::create(
1750 builder, getLoc(e->getExprLoc()),
1751 builder.getConstAPInt(loc, builder.getUInt32Ty(), level)));
1752 }
1753 case Builtin::BI_ReturnAddress: {
1754 return RValue::get(cir::ReturnAddrOp::create(
1755 builder, getLoc(e->getExprLoc()),
1756 builder.getConstInt(loc, builder.getUInt32Ty(), 0)));
1757 }
1758 case Builtin::BI__builtin_frame_address: {
1759 llvm::APSInt level = e->getArg(0)->EvaluateKnownConstInt(getContext());
1760 mlir::Location loc = getLoc(e->getExprLoc());
1761 mlir::Value addr = cir::FrameAddrOp::create(
1762 builder, loc, allocaInt8PtrTy,
1763 builder.getConstAPInt(loc, builder.getUInt32Ty(), level));
1764 return RValue::get(
1765 builder.createCast(loc, cir::CastKind::bitcast, addr, voidPtrTy));
1766 }
1767 case Builtin::BI__builtin_extract_return_addr:
1768 case Builtin::BI__builtin_frob_return_addr:
1769 case Builtin::BI__builtin_dwarf_sp_column:
1770 case Builtin::BI__builtin_init_dwarf_reg_size_table:
1771 case Builtin::BI__builtin_eh_return:
1772 case Builtin::BI__builtin_unwind_init:
1773 case Builtin::BI__builtin_extend_pointer:
1774 return errorBuiltinNYI(*this, e, builtinID);
1775 case Builtin::BI__builtin_setjmp: {
1777 mlir::Location loc = getLoc(e->getExprLoc());
1778
1779 cir::PointerType voidPtrTy = builder.getVoidPtrTy();
1780 cir::PointerType ppTy = builder.getPointerTo(voidPtrTy);
1781 Address castBuf = buf.withElementType(builder, voidPtrTy);
1782
1784 if (getTarget().getTriple().isSystemZ()) {
1785 cgm.errorNYI(e->getExprLoc(), "setjmp on SystemZ");
1786 return {};
1787 }
1788
1789 mlir::Value frameAddress =
1790 cir::FrameAddrOp::create(builder, loc, voidPtrTy,
1791 mlir::ValueRange{builder.getUInt32(0, loc)})
1792 .getResult();
1793
1794 builder.createStore(loc, frameAddress, castBuf);
1795
1796 mlir::Value stacksave =
1797 cir::StackSaveOp::create(builder, loc, voidPtrTy).getResult();
1798 cir::PtrStrideOp stackSaveSlot = cir::PtrStrideOp::create(
1799 builder, loc, ppTy, castBuf.getPointer(), builder.getSInt32(2, loc));
1800 llvm::TypeSize voidPtrTySize =
1801 cgm.getDataLayout().getTypeAllocSize(voidPtrTy);
1802 CharUnits slotAlign = castBuf.getAlignment().alignmentAtOffset(
1803 CharUnits().fromQuantity(2 * voidPtrTySize));
1804 Address slotAddr = Address(stackSaveSlot, voidPtrTy, slotAlign);
1805 builder.createStore(loc, stacksave, slotAddr);
1806 auto op = cir::EhSetjmpOp::create(builder, loc, castBuf.getPointer());
1807 return RValue::get(op);
1808 }
1809 case Builtin::BI__builtin_longjmp: {
1810 mlir::Value buf = emitScalarExpr(e->getArg(0));
1811 mlir::Location loc = getLoc(e->getExprLoc());
1812
1813 cir::EhLongjmpOp::create(builder, loc, buf);
1814 cir::UnreachableOp::create(builder, loc);
1815 return RValue::get(nullptr);
1816 }
1817 case Builtin::BI__builtin_launder:
1818 case Builtin::BI__sync_fetch_and_add:
1819 case Builtin::BI__sync_fetch_and_sub:
1820 case Builtin::BI__sync_fetch_and_or:
1821 case Builtin::BI__sync_fetch_and_and:
1822 case Builtin::BI__sync_fetch_and_xor:
1823 case Builtin::BI__sync_fetch_and_nand:
1824 case Builtin::BI__sync_add_and_fetch:
1825 case Builtin::BI__sync_sub_and_fetch:
1826 case Builtin::BI__sync_and_and_fetch:
1827 case Builtin::BI__sync_or_and_fetch:
1828 case Builtin::BI__sync_xor_and_fetch:
1829 case Builtin::BI__sync_nand_and_fetch:
1830 case Builtin::BI__sync_val_compare_and_swap:
1831 case Builtin::BI__sync_bool_compare_and_swap:
1832 case Builtin::BI__sync_lock_test_and_set:
1833 case Builtin::BI__sync_lock_release:
1834 case Builtin::BI__sync_swap:
1835 return errorBuiltinNYI(*this, e, builtinID);
1836 case Builtin::BI__sync_fetch_and_add_1:
1837 case Builtin::BI__sync_fetch_and_add_2:
1838 case Builtin::BI__sync_fetch_and_add_4:
1839 case Builtin::BI__sync_fetch_and_add_8:
1840 case Builtin::BI__sync_fetch_and_add_16:
1841 return emitBinaryAtomic(*this, cir::AtomicFetchKind::Add, e);
1842 case Builtin::BI__sync_fetch_and_sub_1:
1843 case Builtin::BI__sync_fetch_and_sub_2:
1844 case Builtin::BI__sync_fetch_and_sub_4:
1845 case Builtin::BI__sync_fetch_and_sub_8:
1846 case Builtin::BI__sync_fetch_and_sub_16:
1847 return emitBinaryAtomic(*this, cir::AtomicFetchKind::Sub, e);
1848 case Builtin::BI__sync_fetch_and_or_1:
1849 case Builtin::BI__sync_fetch_and_or_2:
1850 case Builtin::BI__sync_fetch_and_or_4:
1851 case Builtin::BI__sync_fetch_and_or_8:
1852 case Builtin::BI__sync_fetch_and_or_16:
1853 return emitBinaryAtomic(*this, cir::AtomicFetchKind::Or, e);
1854 case Builtin::BI__sync_fetch_and_and_1:
1855 case Builtin::BI__sync_fetch_and_and_2:
1856 case Builtin::BI__sync_fetch_and_and_4:
1857 case Builtin::BI__sync_fetch_and_and_8:
1858 case Builtin::BI__sync_fetch_and_and_16:
1859 return emitBinaryAtomic(*this, cir::AtomicFetchKind::And, e);
1860 case Builtin::BI__sync_fetch_and_xor_1:
1861 case Builtin::BI__sync_fetch_and_xor_2:
1862 case Builtin::BI__sync_fetch_and_xor_4:
1863 case Builtin::BI__sync_fetch_and_xor_8:
1864 case Builtin::BI__sync_fetch_and_xor_16:
1865 return emitBinaryAtomic(*this, cir::AtomicFetchKind::Xor, e);
1866 case Builtin::BI__sync_fetch_and_nand_1:
1867 case Builtin::BI__sync_fetch_and_nand_2:
1868 case Builtin::BI__sync_fetch_and_nand_4:
1869 case Builtin::BI__sync_fetch_and_nand_8:
1870 case Builtin::BI__sync_fetch_and_nand_16:
1871 return emitBinaryAtomic(*this, cir::AtomicFetchKind::Nand, e);
1872 case Builtin::BI__sync_fetch_and_min:
1873 case Builtin::BI__sync_fetch_and_max:
1874 case Builtin::BI__sync_fetch_and_umin:
1875 case Builtin::BI__sync_fetch_and_umax:
1876 return errorBuiltinNYI(*this, e, builtinID);
1877 return getUndefRValue(e->getType());
1878 case Builtin::BI__sync_add_and_fetch_1:
1879 case Builtin::BI__sync_add_and_fetch_2:
1880 case Builtin::BI__sync_add_and_fetch_4:
1881 case Builtin::BI__sync_add_and_fetch_8:
1882 case Builtin::BI__sync_add_and_fetch_16:
1883 return emitBinaryAtomicPost<cir::AddOp>(*this, cir::AtomicFetchKind::Add,
1884 e);
1885 case Builtin::BI__sync_sub_and_fetch_1:
1886 case Builtin::BI__sync_sub_and_fetch_2:
1887 case Builtin::BI__sync_sub_and_fetch_4:
1888 case Builtin::BI__sync_sub_and_fetch_8:
1889 case Builtin::BI__sync_sub_and_fetch_16:
1890 return emitBinaryAtomicPost<cir::SubOp>(*this, cir::AtomicFetchKind::Sub,
1891 e);
1892 case Builtin::BI__sync_and_and_fetch_1:
1893 case Builtin::BI__sync_and_and_fetch_2:
1894 case Builtin::BI__sync_and_and_fetch_4:
1895 case Builtin::BI__sync_and_and_fetch_8:
1896 case Builtin::BI__sync_and_and_fetch_16:
1897 return emitBinaryAtomicPost<cir::AndOp>(*this, cir::AtomicFetchKind::And,
1898 e);
1899 case Builtin::BI__sync_or_and_fetch_1:
1900 case Builtin::BI__sync_or_and_fetch_2:
1901 case Builtin::BI__sync_or_and_fetch_4:
1902 case Builtin::BI__sync_or_and_fetch_8:
1903 case Builtin::BI__sync_or_and_fetch_16:
1904 return emitBinaryAtomicPost<cir::OrOp>(*this, cir::AtomicFetchKind::Or, e);
1905 case Builtin::BI__sync_xor_and_fetch_1:
1906 case Builtin::BI__sync_xor_and_fetch_2:
1907 case Builtin::BI__sync_xor_and_fetch_4:
1908 case Builtin::BI__sync_xor_and_fetch_8:
1909 case Builtin::BI__sync_xor_and_fetch_16:
1910 return emitBinaryAtomicPost<cir::XorOp>(*this, cir::AtomicFetchKind::Xor,
1911 e);
1912 case Builtin::BI__sync_nand_and_fetch_1:
1913 case Builtin::BI__sync_nand_and_fetch_2:
1914 case Builtin::BI__sync_nand_and_fetch_4:
1915 case Builtin::BI__sync_nand_and_fetch_8:
1916 case Builtin::BI__sync_nand_and_fetch_16:
1917 return emitBinaryAtomicPost<cir::AndOp>(*this, cir::AtomicFetchKind::Nand,
1918 e, /*invert=*/true);
1919 case Builtin::BI__sync_val_compare_and_swap_1:
1920 case Builtin::BI__sync_val_compare_and_swap_2:
1921 case Builtin::BI__sync_val_compare_and_swap_4:
1922 case Builtin::BI__sync_val_compare_and_swap_8:
1923 case Builtin::BI__sync_val_compare_and_swap_16:
1924 case Builtin::BI__sync_bool_compare_and_swap_1:
1925 case Builtin::BI__sync_bool_compare_and_swap_2:
1926 case Builtin::BI__sync_bool_compare_and_swap_4:
1927 case Builtin::BI__sync_bool_compare_and_swap_8:
1928 case Builtin::BI__sync_bool_compare_and_swap_16:
1929 case Builtin::BI__sync_swap_1:
1930 case Builtin::BI__sync_swap_2:
1931 case Builtin::BI__sync_swap_4:
1932 case Builtin::BI__sync_swap_8:
1933 case Builtin::BI__sync_swap_16:
1934 case Builtin::BI__sync_lock_test_and_set_1:
1935 case Builtin::BI__sync_lock_test_and_set_2:
1936 case Builtin::BI__sync_lock_test_and_set_4:
1937 case Builtin::BI__sync_lock_test_and_set_8:
1938 case Builtin::BI__sync_lock_test_and_set_16:
1939 case Builtin::BI__sync_lock_release_1:
1940 case Builtin::BI__sync_lock_release_2:
1941 case Builtin::BI__sync_lock_release_4:
1942 case Builtin::BI__sync_lock_release_8:
1943 case Builtin::BI__sync_lock_release_16:
1944 case Builtin::BI__sync_synchronize:
1945 case Builtin::BI__builtin_nontemporal_load:
1946 case Builtin::BI__builtin_nontemporal_store:
1947 case Builtin::BI__c11_atomic_is_lock_free:
1948 case Builtin::BI__atomic_is_lock_free:
1949 case Builtin::BI__atomic_test_and_set:
1950 case Builtin::BI__atomic_clear:
1951 return errorBuiltinNYI(*this, e, builtinID);
1952 case Builtin::BI__atomic_thread_fence:
1953 case Builtin::BI__c11_atomic_thread_fence: {
1954 emitAtomicFenceOp(*this, e, cir::SyncScopeKind::System);
1955 return RValue::get(nullptr);
1956 }
1957 case Builtin::BI__atomic_signal_fence:
1958 case Builtin::BI__c11_atomic_signal_fence: {
1959 emitAtomicFenceOp(*this, e, cir::SyncScopeKind::SingleThread);
1960 return RValue::get(nullptr);
1961 }
1962 case Builtin::BI__scoped_atomic_thread_fence:
1963 return errorBuiltinNYI(*this, e, builtinID);
1964 case Builtin::BI__builtin_signbit:
1965 case Builtin::BI__builtin_signbitf:
1966 case Builtin::BI__builtin_signbitl: {
1967 CIRGenFunction::CIRGenFPOptionsRAII fPOptsRAII(*this, e);
1968 mlir::Location loc = getLoc(e->getBeginLoc());
1969 mlir::Value value = emitScalarExpr(e->getArg(0));
1970 mlir::Operation *signBitOp = cir::SignBitOp::create(builder, loc, value);
1971 mlir::Value result = builder.createBoolToInt(signBitOp->getResult(0),
1972 convertType(e->getType()));
1973 return RValue::get(result);
1974 }
1975 case Builtin::BI__warn_memset_zero_len:
1976 case Builtin::BI__annotation:
1977 case Builtin::BI__builtin_annotation:
1978 case Builtin::BI__builtin_addcb:
1979 case Builtin::BI__builtin_addcs:
1980 case Builtin::BI__builtin_addc:
1981 case Builtin::BI__builtin_addcl:
1982 case Builtin::BI__builtin_addcll:
1983 case Builtin::BI__builtin_subcb:
1984 case Builtin::BI__builtin_subcs:
1985 case Builtin::BI__builtin_subc:
1986 case Builtin::BI__builtin_subcl:
1987 case Builtin::BI__builtin_subcll:
1988 return errorBuiltinNYI(*this, e, builtinID);
1989
1990 case Builtin::BI__builtin_add_overflow:
1991 case Builtin::BI__builtin_sub_overflow:
1992 case Builtin::BI__builtin_mul_overflow: {
1993 const clang::Expr *leftArg = e->getArg(0);
1994 const clang::Expr *rightArg = e->getArg(1);
1995 const clang::Expr *resultArg = e->getArg(2);
1996
1997 clang::QualType resultQTy =
1998 resultArg->getType()->castAs<clang::PointerType>()->getPointeeType();
1999
2000 WidthAndSignedness leftInfo =
2001 getIntegerWidthAndSignedness(cgm.getASTContext(), leftArg->getType());
2002 WidthAndSignedness rightInfo =
2003 getIntegerWidthAndSignedness(cgm.getASTContext(), rightArg->getType());
2004 WidthAndSignedness resultInfo =
2005 getIntegerWidthAndSignedness(cgm.getASTContext(), resultQTy);
2006
2007 // Note we compute the encompassing type with the consideration to the
2008 // result type, so later in LLVM lowering we don't get redundant integral
2009 // extension casts.
2010 WidthAndSignedness encompassingInfo =
2011 EncompassingIntegerType({leftInfo, rightInfo, resultInfo});
2012
2013 auto encompassingCIRTy = cir::IntType::get(
2014 &getMLIRContext(), encompassingInfo.width, encompassingInfo.isSigned);
2015 auto resultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(resultQTy));
2016
2017 mlir::Value x = emitScalarExpr(leftArg);
2018 mlir::Value y = emitScalarExpr(rightArg);
2019 Address resultPtr = emitPointerWithAlignment(resultArg);
2020
2021 // Extend each operand to the encompassing type, if necessary.
2022 if (x.getType() != encompassingCIRTy)
2023 x = builder.createCast(cir::CastKind::integral, x, encompassingCIRTy);
2024 if (y.getType() != encompassingCIRTy)
2025 y = builder.createCast(cir::CastKind::integral, y, encompassingCIRTy);
2026
2027 // Perform the operation on the extended values.
2028 mlir::Location loc = getLoc(e->getSourceRange());
2029 mlir::Value result, overflow;
2030 switch (builtinID) {
2031 default:
2032 llvm_unreachable("Unknown overflow builtin id.");
2033 case Builtin::BI__builtin_add_overflow:
2034 std::tie(result, overflow) =
2035 emitOverflowOp<cir::AddOverflowOp>(builder, loc, resultCIRTy, x, y);
2036 break;
2037 case Builtin::BI__builtin_sub_overflow:
2038 std::tie(result, overflow) =
2039 emitOverflowOp<cir::SubOverflowOp>(builder, loc, resultCIRTy, x, y);
2040 break;
2041 case Builtin::BI__builtin_mul_overflow:
2042 std::tie(result, overflow) =
2043 emitOverflowOp<cir::MulOverflowOp>(builder, loc, resultCIRTy, x, y);
2044 break;
2045 }
2046
2047 // Here is a slight difference from the original clang CodeGen:
2048 // - In the original clang CodeGen, the checked arithmetic result is
2049 // first computed as a value of the encompassing type, and then it is
2050 // truncated to the actual result type with a second overflow checking.
2051 // - In CIRGen, the checked arithmetic operation directly produce the
2052 // checked arithmetic result in its expected type.
2053 //
2054 // So we don't need a truncation and a second overflow checking here.
2055
2056 // Finally, store the result using the pointer.
2057 bool isVolatile =
2058 resultArg->getType()->getPointeeType().isVolatileQualified();
2059 builder.createStore(loc, result, resultPtr, isVolatile);
2060
2061 return RValue::get(overflow);
2062 }
2063
2064 case Builtin::BI__builtin_uadd_overflow:
2065 case Builtin::BI__builtin_uaddl_overflow:
2066 case Builtin::BI__builtin_uaddll_overflow:
2067 case Builtin::BI__builtin_usub_overflow:
2068 case Builtin::BI__builtin_usubl_overflow:
2069 case Builtin::BI__builtin_usubll_overflow:
2070 case Builtin::BI__builtin_umul_overflow:
2071 case Builtin::BI__builtin_umull_overflow:
2072 case Builtin::BI__builtin_umulll_overflow:
2073 case Builtin::BI__builtin_sadd_overflow:
2074 case Builtin::BI__builtin_saddl_overflow:
2075 case Builtin::BI__builtin_saddll_overflow:
2076 case Builtin::BI__builtin_ssub_overflow:
2077 case Builtin::BI__builtin_ssubl_overflow:
2078 case Builtin::BI__builtin_ssubll_overflow:
2079 case Builtin::BI__builtin_smul_overflow:
2080 case Builtin::BI__builtin_smull_overflow:
2081 case Builtin::BI__builtin_smulll_overflow: {
2082 // Scalarize our inputs.
2083 mlir::Value x = emitScalarExpr(e->getArg(0));
2084 mlir::Value y = emitScalarExpr(e->getArg(1));
2085
2086 const clang::Expr *resultArg = e->getArg(2);
2087 Address resultPtr = emitPointerWithAlignment(resultArg);
2088
2089 clang::QualType resultQTy =
2090 resultArg->getType()->castAs<clang::PointerType>()->getPointeeType();
2091 auto resultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(resultQTy));
2092
2093 // Create the appropriate overflow-checked arithmetic operation.
2094 mlir::Location loc = getLoc(e->getSourceRange());
2095 mlir::Value result, overflow;
2096 switch (builtinID) {
2097 default:
2098 llvm_unreachable("Unknown overflow builtin id.");
2099 case Builtin::BI__builtin_uadd_overflow:
2100 case Builtin::BI__builtin_uaddl_overflow:
2101 case Builtin::BI__builtin_uaddll_overflow:
2102 case Builtin::BI__builtin_sadd_overflow:
2103 case Builtin::BI__builtin_saddl_overflow:
2104 case Builtin::BI__builtin_saddll_overflow:
2105 std::tie(result, overflow) =
2106 emitOverflowOp<cir::AddOverflowOp>(builder, loc, resultCIRTy, x, y);
2107 break;
2108 case Builtin::BI__builtin_usub_overflow:
2109 case Builtin::BI__builtin_usubl_overflow:
2110 case Builtin::BI__builtin_usubll_overflow:
2111 case Builtin::BI__builtin_ssub_overflow:
2112 case Builtin::BI__builtin_ssubl_overflow:
2113 case Builtin::BI__builtin_ssubll_overflow:
2114 std::tie(result, overflow) =
2115 emitOverflowOp<cir::SubOverflowOp>(builder, loc, resultCIRTy, x, y);
2116 break;
2117 case Builtin::BI__builtin_umul_overflow:
2118 case Builtin::BI__builtin_umull_overflow:
2119 case Builtin::BI__builtin_umulll_overflow:
2120 case Builtin::BI__builtin_smul_overflow:
2121 case Builtin::BI__builtin_smull_overflow:
2122 case Builtin::BI__builtin_smulll_overflow:
2123 std::tie(result, overflow) =
2124 emitOverflowOp<cir::MulOverflowOp>(builder, loc, resultCIRTy, x, y);
2125 break;
2126 }
2127
2128 bool isVolatile =
2129 resultArg->getType()->getPointeeType().isVolatileQualified();
2130 builder.createStore(loc, emitToMemory(result, resultQTy), resultPtr,
2131 isVolatile);
2132
2133 return RValue::get(overflow);
2134 }
2135
2136 case Builtin::BIaddressof:
2137 case Builtin::BI__addressof:
2138 case Builtin::BI__builtin_addressof:
2139 return RValue::get(emitLValue(e->getArg(0)).getPointer());
2140 case Builtin::BI__builtin_function_start:
2141 return errorBuiltinNYI(*this, e, builtinID);
2142 case Builtin::BI__builtin_operator_new:
2144 e->getCallee()->getType()->castAs<FunctionProtoType>(), e, OO_New);
2145 case Builtin::BI__builtin_operator_delete:
2147 e->getCallee()->getType()->castAs<FunctionProtoType>(), e, OO_Delete);
2148 return RValue::get(nullptr);
2149 case Builtin::BI__builtin_is_aligned:
2150 case Builtin::BI__builtin_align_up:
2151 case Builtin::BI__builtin_align_down:
2152 case Builtin::BI__noop:
2153 case Builtin::BI__builtin_call_with_static_chain:
2154 case Builtin::BI_InterlockedExchange8:
2155 case Builtin::BI_InterlockedExchange16:
2156 case Builtin::BI_InterlockedExchange:
2157 case Builtin::BI_InterlockedExchangePointer:
2158 case Builtin::BI_InterlockedCompareExchangePointer:
2159 case Builtin::BI_InterlockedCompareExchangePointer_nf:
2160 case Builtin::BI_InterlockedCompareExchange8:
2161 case Builtin::BI_InterlockedCompareExchange16:
2162 case Builtin::BI_InterlockedCompareExchange:
2163 case Builtin::BI_InterlockedCompareExchange64:
2164 case Builtin::BI_InterlockedIncrement16:
2165 case Builtin::BI_InterlockedIncrement:
2166 case Builtin::BI_InterlockedDecrement16:
2167 case Builtin::BI_InterlockedDecrement:
2168 case Builtin::BI_InterlockedAnd8:
2169 case Builtin::BI_InterlockedAnd16:
2170 case Builtin::BI_InterlockedAnd:
2171 case Builtin::BI_InterlockedExchangeAdd8:
2172 case Builtin::BI_InterlockedExchangeAdd16:
2173 case Builtin::BI_InterlockedExchangeAdd:
2174 case Builtin::BI_InterlockedExchangeSub8:
2175 case Builtin::BI_InterlockedExchangeSub16:
2176 case Builtin::BI_InterlockedExchangeSub:
2177 case Builtin::BI_InterlockedOr8:
2178 case Builtin::BI_InterlockedOr16:
2179 case Builtin::BI_InterlockedOr:
2180 case Builtin::BI_InterlockedXor8:
2181 case Builtin::BI_InterlockedXor16:
2182 case Builtin::BI_InterlockedXor:
2183 case Builtin::BI_bittest64:
2184 case Builtin::BI_bittest:
2185 case Builtin::BI_bittestandcomplement64:
2186 case Builtin::BI_bittestandcomplement:
2187 case Builtin::BI_bittestandreset64:
2188 case Builtin::BI_bittestandreset:
2189 case Builtin::BI_bittestandset64:
2190 case Builtin::BI_bittestandset:
2191 case Builtin::BI_interlockedbittestandreset:
2192 case Builtin::BI_interlockedbittestandreset64:
2193 case Builtin::BI_interlockedbittestandreset64_acq:
2194 case Builtin::BI_interlockedbittestandreset64_rel:
2195 case Builtin::BI_interlockedbittestandreset64_nf:
2196 case Builtin::BI_interlockedbittestandset64:
2197 case Builtin::BI_interlockedbittestandset64_acq:
2198 case Builtin::BI_interlockedbittestandset64_rel:
2199 case Builtin::BI_interlockedbittestandset64_nf:
2200 case Builtin::BI_interlockedbittestandset:
2201 case Builtin::BI_interlockedbittestandset_acq:
2202 case Builtin::BI_interlockedbittestandset_rel:
2203 case Builtin::BI_interlockedbittestandset_nf:
2204 case Builtin::BI_interlockedbittestandreset_acq:
2205 case Builtin::BI_interlockedbittestandreset_rel:
2206 case Builtin::BI_interlockedbittestandreset_nf:
2207 case Builtin::BI__iso_volatile_load8:
2208 case Builtin::BI__iso_volatile_load16:
2209 case Builtin::BI__iso_volatile_load32:
2210 case Builtin::BI__iso_volatile_load64:
2211 case Builtin::BI__iso_volatile_store8:
2212 case Builtin::BI__iso_volatile_store16:
2213 case Builtin::BI__iso_volatile_store32:
2214 case Builtin::BI__iso_volatile_store64:
2215 case Builtin::BI__builtin_ptrauth_sign_constant:
2216 case Builtin::BI__builtin_ptrauth_auth:
2217 case Builtin::BI__builtin_ptrauth_auth_and_resign:
2218 case Builtin::BI__builtin_ptrauth_blend_discriminator:
2219 case Builtin::BI__builtin_ptrauth_sign_generic_data:
2220 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
2221 case Builtin::BI__builtin_ptrauth_strip:
2222 case Builtin::BI__builtin_get_vtable_pointer:
2223 case Builtin::BI__exception_code:
2224 case Builtin::BI_exception_code:
2225 case Builtin::BI__exception_info:
2226 case Builtin::BI_exception_info:
2227 case Builtin::BI__abnormal_termination:
2228 case Builtin::BI_abnormal_termination:
2229 case Builtin::BI_setjmpex:
2230 case Builtin::BI_setjmp:
2231 return errorBuiltinNYI(*this, e, builtinID);
2232 case Builtin::BImove:
2233 case Builtin::BImove_if_noexcept:
2234 case Builtin::BIforward:
2235 case Builtin::BIforward_like:
2236 case Builtin::BIas_const:
2237 return RValue::get(emitLValue(e->getArg(0)).getPointer());
2238 case Builtin::BI__GetExceptionInfo:
2239 case Builtin::BI__fastfail:
2240 case Builtin::BIread_pipe:
2241 case Builtin::BIwrite_pipe:
2242 case Builtin::BIreserve_read_pipe:
2243 case Builtin::BIreserve_write_pipe:
2244 case Builtin::BIwork_group_reserve_read_pipe:
2245 case Builtin::BIwork_group_reserve_write_pipe:
2246 case Builtin::BIsub_group_reserve_read_pipe:
2247 case Builtin::BIsub_group_reserve_write_pipe:
2248 case Builtin::BIcommit_read_pipe:
2249 case Builtin::BIcommit_write_pipe:
2250 case Builtin::BIwork_group_commit_read_pipe:
2251 case Builtin::BIwork_group_commit_write_pipe:
2252 case Builtin::BIsub_group_commit_read_pipe:
2253 case Builtin::BIsub_group_commit_write_pipe:
2254 case Builtin::BIget_pipe_num_packets:
2255 case Builtin::BIget_pipe_max_packets:
2256 case Builtin::BIto_global:
2257 case Builtin::BIto_local:
2258 case Builtin::BIto_private:
2259 case Builtin::BIenqueue_kernel:
2260 case Builtin::BIget_kernel_work_group_size:
2261 case Builtin::BIget_kernel_preferred_work_group_size_multiple:
2262 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
2263 case Builtin::BIget_kernel_sub_group_count_for_ndrange:
2264 case Builtin::BI__builtin_store_half:
2265 case Builtin::BI__builtin_store_halff:
2266 case Builtin::BI__builtin_load_half:
2267 case Builtin::BI__builtin_load_halff:
2268 return errorBuiltinNYI(*this, e, builtinID);
2269 case Builtin::BI__builtin_printf:
2270 case Builtin::BIprintf:
2271 break;
2272 case Builtin::BI__builtin_canonicalize:
2273 case Builtin::BI__builtin_canonicalizef:
2274 case Builtin::BI__builtin_canonicalizef16:
2275 case Builtin::BI__builtin_canonicalizel:
2276 case Builtin::BI__builtin_thread_pointer:
2277 case Builtin::BI__builtin_os_log_format:
2278 case Builtin::BI__xray_customevent:
2279 case Builtin::BI__xray_typedevent:
2280 case Builtin::BI__builtin_ms_va_start:
2281 case Builtin::BI__builtin_ms_va_end:
2282 case Builtin::BI__builtin_ms_va_copy:
2283 case Builtin::BI__builtin_get_device_side_mangled_name:
2284 return errorBuiltinNYI(*this, e, builtinID);
2285 }
2286
2287 // If this is an alias for a lib function (e.g. __builtin_sin), emit
2288 // the call using the normal call path, but using the unmangled
2289 // version of the function name.
2290 if (getContext().BuiltinInfo.isLibFunction(builtinID))
2291 return emitLibraryCall(*this, fd, e,
2292 cgm.getBuiltinLibFunction(fd, builtinID));
2293
2294 // If this is a predefined lib function (e.g. malloc), emit the call
2295 // using exactly the normal call path.
2296 if (getContext().BuiltinInfo.isPredefinedLibFunction(builtinID))
2297 return emitLibraryCall(*this, fd, e,
2298 emitScalarExpr(e->getCallee()).getDefiningOp());
2299
2300 // See if we have a target specific intrinsic.
2301 std::string name = getContext().BuiltinInfo.getName(builtinID);
2302 Intrinsic::ID intrinsicID = Intrinsic::not_intrinsic;
2303 StringRef prefix =
2304 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
2305 if (!prefix.empty()) {
2306 intrinsicID = Intrinsic::getIntrinsicForClangBuiltin(prefix.data(), name);
2307 // NOTE we don't need to perform a compatibility flag check here since the
2308 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
2309 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
2310 if (intrinsicID == Intrinsic::not_intrinsic)
2311 intrinsicID = Intrinsic::getIntrinsicForMSBuiltin(prefix.data(), name);
2312 }
2313
2314 if (intrinsicID != Intrinsic::not_intrinsic) {
2315 unsigned iceArguments = 0;
2317 getContext().GetBuiltinType(builtinID, error, &iceArguments);
2318 assert(error == ASTContext::GE_None && "Should not codegen an error");
2319
2320 llvm::StringRef name = llvm::Intrinsic::getName(intrinsicID);
2321 // cir::LLVMIntrinsicCallOp expects intrinsic name to not have prefix
2322 // "llvm." For example, `llvm.nvvm.barrier0` should be passed as
2323 // `nvvm.barrier0`.
2324 assert(name.starts_with("llvm.") && "expected llvm. prefix");
2325 name = name.drop_front(/*strlen("llvm.")=*/5);
2326
2327 cir::FuncType intrinsicType =
2328 getIntrinsicType(*this, &getMLIRContext(), intrinsicID);
2329
2331 const FunctionDecl *fd = e->getDirectCallee();
2332 for (unsigned i = 0; i < e->getNumArgs(); i++) {
2333 mlir::Value argValue =
2334 emitScalarOrConstFoldImmArg(iceArguments, i, e->getArg(i));
2335 // If the intrinsic arg type is different from the builtin arg type
2336 // we need to do a bit cast.
2337 mlir::Type argType = argValue.getType();
2338 mlir::Type expectedTy = intrinsicType.getInput(i);
2339
2340 // Correct integer signedness based on AST parameter type
2341 mlir::Type correctedExpectedTy = expectedTy;
2342 if (fd && i < fd->getNumParams()) {
2343 correctedExpectedTy = correctIntegerSignedness(
2344 expectedTy, fd->getParamDecl(i)->getType(), &getMLIRContext());
2345 }
2346
2347 if (mlir::isa<cir::PointerType>(expectedTy)) {
2348 bool argIsPointer = mlir::isa<cir::PointerType>(argType);
2349 bool argIsVectorOfPointer = false;
2350 if (auto vecTy = dyn_cast<mlir::VectorType>(argType))
2351 argIsVectorOfPointer =
2352 mlir::isa<cir::PointerType>(vecTy.getElementType());
2353
2354 if (!argIsPointer && !argIsVectorOfPointer) {
2355 cgm.errorNYI(
2356 e->getSourceRange(),
2357 "intrinsic expects a pointer type (NYI for non-pointer)");
2358 return getUndefRValue(e->getType());
2359 }
2360
2361 // Pointer handling (address-space cast / bitcast fallback).
2362 if (argType != expectedTy)
2363 argValue = getCorrectedPtr(argValue, expectedTy, builder);
2364 } else {
2365 // Non-pointer expected type: if needed, bitcast to the corrected
2366 // expected type to match signedness/representation.
2367 if (argType != correctedExpectedTy)
2368 argValue = builder.createBitcast(argValue, correctedExpectedTy);
2369 }
2370
2371 args.push_back(argValue);
2372 }
2373
2374 // Correct return type signedness based on AST return type before creating
2375 // the call, avoiding unnecessary casts in the IR.
2376 mlir::Type correctedReturnType = intrinsicType.getReturnType();
2377 if (fd) {
2378 correctedReturnType =
2379 correctIntegerSignedness(intrinsicType.getReturnType(),
2380 fd->getReturnType(), &getMLIRContext());
2381 }
2382
2383 cir::LLVMIntrinsicCallOp intrinsicCall = cir::LLVMIntrinsicCallOp::create(
2384 builder, getLoc(e->getExprLoc()), builder.getStringAttr(name),
2385 correctedReturnType, args);
2386
2387 mlir::Value intrinsicRes = intrinsicCall.getResult();
2388
2389 if (isa<cir::VoidType>(correctedReturnType))
2390 return RValue::get(nullptr);
2391
2392 return RValue::get(intrinsicRes);
2393 }
2394
2395 // Some target-specific builtins can have aggregate return values, e.g.
2396 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
2397 // returnValue to be non-null, so that the target-specific emission code can
2398 // always just emit into it.
2400 if (evalKind == cir::TEK_Aggregate && returnValue.isNull()) {
2401 cgm.errorNYI(e->getSourceRange(), "aggregate return value from builtin");
2402 return getUndefRValue(e->getType());
2403 }
2404
2405 // Now see if we can emit a target-specific builtin.
2406 // FIXME: This is a temporary mechanism (double-optional semantics) that will
2407 // go away once everything is implemented:
2408 // 1. return `mlir::Value{}` for cases where we have issued the diagnostic.
2409 // 2. return `std::nullopt` in cases where we didn't issue a diagnostic
2410 // but also didn't handle the builtin.
2411 if (std::optional<mlir::Value> rst =
2412 emitTargetBuiltinExpr(builtinID, e, returnValue)) {
2413 mlir::Value v = rst.value();
2414 // CIR dialect operations may have no results, no values will be returned
2415 // even if it executes successfully.
2416 if (!v)
2417 return RValue::get(nullptr);
2418
2419 switch (evalKind) {
2420 case cir::TEK_Scalar:
2421 if (mlir::isa<cir::VoidType>(v.getType()))
2422 return RValue::get(nullptr);
2423 return RValue::get(v);
2424 case cir::TEK_Aggregate:
2425 cgm.errorNYI(e->getSourceRange(), "aggregate return value from builtin");
2426 return getUndefRValue(e->getType());
2427 case cir::TEK_Complex:
2428 llvm_unreachable("No current target builtin returns complex");
2429 }
2430 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
2431 }
2432
2433 cgm.errorNYI(e->getSourceRange(),
2434 std::string("unimplemented builtin call: ") +
2435 getContext().BuiltinInfo.getName(builtinID));
2436 return getUndefRValue(e->getType());
2437}
2438
2439static std::optional<mlir::Value>
2441 const CallExpr *e, ReturnValueSlot &returnValue,
2442 llvm::Triple::ArchType arch) {
2443 // When compiling in HipStdPar mode we have to be conservative in rejecting
2444 // target specific features in the FE, and defer the possible error to the
2445 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
2446 // referenced by an accelerator executable function, we emit an error.
2447 // Returning nullptr here leads to the builtin being handled in
2448 // EmitStdParUnsupportedBuiltin.
2449 if (cgf->getLangOpts().HIPStdPar && cgf->getLangOpts().CUDAIsDevice &&
2450 arch != cgf->getTarget().getTriple().getArch())
2451 return std::nullopt;
2452
2453 switch (arch) {
2454 case llvm::Triple::arm:
2455 case llvm::Triple::armeb:
2456 case llvm::Triple::thumb:
2457 case llvm::Triple::thumbeb:
2458 // These are actually NYI, but that will be reported by emitBuiltinExpr.
2459 // At this point, we don't even know that the builtin is target-specific.
2460 return std::nullopt;
2461 case llvm::Triple::aarch64:
2462 case llvm::Triple::aarch64_32:
2463 case llvm::Triple::aarch64_be:
2464 return cgf->emitAArch64BuiltinExpr(builtinID, e, returnValue, arch);
2465 case llvm::Triple::bpfeb:
2466 case llvm::Triple::bpfel:
2467 // These are actually NYI, but that will be reported by emitBuiltinExpr.
2468 // At this point, we don't even know that the builtin is target-specific.
2469 return std::nullopt;
2470
2471 case llvm::Triple::x86:
2472 case llvm::Triple::x86_64:
2473 return cgf->emitX86BuiltinExpr(builtinID, e);
2474
2475 case llvm::Triple::ppc:
2476 case llvm::Triple::ppcle:
2477 case llvm::Triple::ppc64:
2478 case llvm::Triple::ppc64le:
2479 case llvm::Triple::r600:
2480 // These are actually NYI, but that will be reported by emitBuiltinExpr.
2481 // At this point, we don't even know that the builtin is target-specific.
2482 return std::nullopt;
2483 case llvm::Triple::amdgcn:
2484 return cgf->emitAMDGPUBuiltinExpr(builtinID, e);
2485 case llvm::Triple::systemz:
2486 case llvm::Triple::nvptx:
2487 case llvm::Triple::nvptx64:
2488 case llvm::Triple::wasm32:
2489 case llvm::Triple::wasm64:
2490 case llvm::Triple::hexagon:
2491 // These are actually NYI, but that will be reported by emitBuiltinExpr.
2492 // At this point, we don't even know that the builtin is target-specific.
2493 return std::nullopt;
2494 case llvm::Triple::riscv32:
2495 case llvm::Triple::riscv64:
2496 return cgf->emitRISCVBuiltinExpr(builtinID, e);
2497 default:
2498 return std::nullopt;
2499 }
2500}
2501
2502std::optional<mlir::Value>
2505 if (getContext().BuiltinInfo.isAuxBuiltinID(builtinID)) {
2506 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
2508 this, getContext().BuiltinInfo.getAuxBuiltinID(builtinID), e,
2509 returnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
2510 }
2511
2512 return emitTargetArchBuiltinExpr(this, builtinID, e, returnValue,
2513 getTarget().getTriple().getArch());
2514}
2515
2517 const unsigned iceArguments, const unsigned idx, const Expr *argExpr) {
2518 mlir::Value arg = {};
2519 if ((iceArguments & (1 << idx)) == 0) {
2520 arg = emitScalarExpr(argExpr);
2521 } else {
2522 // If this is required to be a constant, constant fold it so that we
2523 // know that the generated intrinsic gets a ConstantInt.
2524 const std::optional<llvm::APSInt> result =
2526 assert(result && "Expected argument to be a constant");
2527 arg = builder.getConstInt(getLoc(argExpr->getSourceRange()), *result);
2528 }
2529 return arg;
2530}
2531
2532/// Given a builtin id for a function like "__builtin_fabsf", return a Function*
2533/// for "fabsf".
2535 unsigned builtinID) {
2536 assert(astContext.BuiltinInfo.isLibFunction(builtinID));
2537
2538 // Get the name, skip over the __builtin_ prefix (if necessary). We may have
2539 // to build this up so provide a small stack buffer to handle the vast
2540 // majority of names.
2542
2544 name = astContext.BuiltinInfo.getName(builtinID).substr(10);
2545
2546 GlobalDecl d(fd);
2547 mlir::Type type = convertType(fd->getType());
2548 return getOrCreateCIRFunction(name, type, d, /*forVTable=*/false);
2549}
2550
2552 mlir::Value argValue = evaluateExprAsBool(e);
2553 if (!sanOpts.has(SanitizerKind::Builtin))
2554 return argValue;
2555
2557 cgm.errorNYI(e->getSourceRange(),
2558 "emitCheckedArgForAssume: sanitizers are NYI");
2559 return {};
2560}
2561
2562void CIRGenFunction::emitVAStart(mlir::Value vaList) {
2563 // LLVM codegen casts to *i8, no real gain on doing this for CIRGen this
2564 // early, defer to LLVM lowering.
2565 cir::VAStartOp::create(builder, vaList.getLoc(), vaList);
2566}
2567
2568void CIRGenFunction::emitVAEnd(mlir::Value vaList) {
2569 cir::VAEndOp::create(builder, vaList.getLoc(), vaList);
2570}
2571
2572// FIXME(cir): This completely abstracts away the ABI with a generic CIR Op. By
2573// default this lowers to llvm.va_arg which is incomplete and not ABI-compliant
2574// on most targets so cir.va_arg will need some ABI handling in LoweringPrepare
2576 assert(!cir::MissingFeatures::msabi());
2577 assert(!cir::MissingFeatures::vlas());
2578 mlir::Location loc = cgm.getLoc(ve->getExprLoc());
2579 mlir::Type type = convertType(ve->getType());
2580 mlir::Value vaList = emitVAListRef(ve->getSubExpr()).getPointer();
2581 return cir::VAArgOp::create(builder, loc, type, vaList);
2582}
2583
2584mlir::Value CIRGenFunction::emitBuiltinObjectSize(const Expr *e, unsigned type,
2585 cir::IntType resType,
2586 mlir::Value emittedE,
2587 bool isDynamic) {
2589
2590 // LLVM can't handle type=3 appropriately, and __builtin_object_size shouldn't
2591 // evaluate e for side-effects. In either case, just like original LLVM
2592 // lowering, we shouldn't lower to `cir.objsize` but to a constant instead.
2593 if (type == 3 || (!emittedE && e->HasSideEffects(getContext())))
2594 return builder.getConstInt(getLoc(e->getSourceRange()), resType,
2595 (type & 2) ? 0 : -1);
2596
2597 mlir::Value ptr = emittedE ? emittedE : emitScalarExpr(e);
2598 assert(mlir::isa<cir::PointerType>(ptr.getType()) &&
2599 "Non-pointer passed to __builtin_object_size?");
2600
2602
2603 // Extract the min/max mode from type. CIR only supports type 0
2604 // (max, whole object) and type 2 (min, whole object), not type 1 or 3
2605 // (closest subobject variants).
2606 const bool min = ((type & 2) != 0);
2607 // For GCC compatibility, __builtin_object_size treats NULL as unknown size.
2608 auto op =
2609 cir::ObjSizeOp::create(builder, getLoc(e->getSourceRange()), resType, ptr,
2610 min, /*nullUnknown=*/true, isDynamic);
2611 return op.getResult();
2612}
2613
2615 const Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE,
2616 bool isDynamic) {
2617 if (std::optional<uint64_t> objectSize =
2619 return builder.getConstInt(getLoc(e->getSourceRange()), resType,
2620 *objectSize);
2621 return emitBuiltinObjectSize(e, type, resType, emittedE, isDynamic);
2622}
static StringRef bytes(const std::vector< T, Allocator > &v)
Defines enum values for all the target-independent builtin functions.
static mlir::Value emitSignBit(mlir::Location loc, CIRGenFunction &cgf, mlir::Value val)
static mlir::Value emitBinaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static mlir::Type decodeFixedType(CIRGenFunction &cgf, ArrayRef< llvm::Intrinsic::IITDescriptor > &infos, mlir::MLIRContext *context)
static RValue emitUnaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitBinaryFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitBinaryAtomicPost(CIRGenFunction &cgf, cir::AtomicFetchKind atomicOpkind, const CallExpr *e, bool invert=false)
static std::optional< mlir::Value > emitTargetArchBuiltinExpr(CIRGenFunction *cgf, unsigned builtinID, const CallExpr *e, ReturnValueSlot &returnValue, llvm::Triple::ArchType arch)
static RValue emitUnaryFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitBinaryAtomic(CIRGenFunction &cgf, cir::AtomicFetchKind atomicOpkind, const CallExpr *e)
static mlir::Value emitToInt(CIRGenFunction &cgf, mlir::Value v, QualType t, cir::IntType intType)
Emit the conversions required to turn the given value into an integer of the given size.
static mlir::Value getCorrectedPtr(mlir::Value argValue, mlir::Type expectedTy, CIRGenBuilderTy &builder)
static std::pair< mlir::Value, mlir::Value > emitOverflowOp(CIRGenBuilderTy &builder, mlir::Location loc, cir::IntType resultTy, mlir::Value lhs, mlir::Value rhs)
Create a checked overflow arithmetic op and return its result and overflow flag.
static RValue emitLibraryCall(CIRGenFunction &cgf, const FunctionDecl *fd, const CallExpr *e, mlir::Operation *calleeValue)
static WidthAndSignedness getIntegerWidthAndSignedness(const clang::ASTContext &astContext, const clang::QualType type)
static RValue emitBuiltinBitOp(CIRGenFunction &cgf, const CallExpr *e, bool poisonZero=false)
static void emitAtomicFenceOp(CIRGenFunction &cgf, const CallExpr *expr, cir::SyncScopeKind syncScope)
static Address checkAtomicAlignment(CIRGenFunction &cgf, const CallExpr *e)
static bool shouldCIREmitFPMathIntrinsic(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static RValue tryEmitFPMathIntrinsic(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static cir::FuncType getIntrinsicType(CIRGenFunction &cgf, mlir::MLIRContext *context, llvm::Intrinsic::ID id)
static mlir::Value makeBinaryAtomicValue(CIRGenFunction &cgf, cir::AtomicFetchKind kind, const CallExpr *expr, mlir::Type *originalArgType=nullptr, mlir::Value *emittedArgValue=nullptr, cir::MemOrder ordering=cir::MemOrder::SequentiallyConsistent)
Utility to insert an atomic instruction based on Intrinsic::ID and the expression node.
static struct WidthAndSignedness EncompassingIntegerType(ArrayRef< struct WidthAndSignedness > types)
static RValue emitBuiltinAlloca(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static mlir::Type correctIntegerSignedness(mlir::Type iitType, QualType astType, mlir::MLIRContext *context)
Helper function to correct integer signedness for intrinsic arguments and return type.
static RValue emitUnaryMaybeConstrainedFPToIntBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue errorBuiltinNYI(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static mlir::Value emitFromInt(CIRGenFunction &cgf, mlir::Value v, QualType t, mlir::Type resultType)
static StringRef getTriple(const Command &Job)
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
Defines an enumeration for C++ overloaded operators.
static QualType getPointeeType(const MemRegion *R)
__DEVICE__ int min(int __a, int __b)
__device__ __2f16 b
__device__ __2f16 float c
cir::SignBitOp createSignBit(mlir::Location loc, mlir::Value val)
mlir::Value createIntToPtr(mlir::Value src, mlir::Type newTy)
mlir::Value createPtrToInt(mlir::Value src, mlir::Type newTy)
cir::PointerType getPointerTo(mlir::Type ty)
mlir::Value createIntCast(mlir::Value src, mlir::Type newTy)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
mlir::Value createNot(mlir::Location loc, mlir::Value value)
cir::PointerType getVoidPtrTy(clang::LangAS langAS=clang::LangAS::Default)
mlir::Value createAddrSpaceCast(mlir::Location loc, mlir::Value src, mlir::Type newTy)
mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, mlir::Type type, llvm::StringRef name, mlir::IntegerAttr alignment, mlir::Value dynAllocSize)
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
APSInt & getInt()
Definition APValue.h:508
bool isFloat() const
Definition APValue.h:486
bool isInt() const
Definition APValue.h:485
APFloat & getFloat()
Definition APValue.h:522
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
unsigned getIntWidth(QualType T) const
CanQualType VoidPtrTy
Builtin::Context & BuiltinInfo
Definition ASTContext.h:800
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error, unsigned *IntegerConstantArgs=nullptr) const
Return the type for the specified builtin.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:917
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
@ GE_None
No error.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
bool shouldGenerateFPMathIntrinsic(unsigned BuiltinID, llvm::Triple Trip, std::optional< bool > ErrnoOverwritten, bool MathErrnoEnabled, bool HasOptNoneAttr, bool IsOptimizationEnabled) const
Determine whether we can generate LLVM intrinsics for the given builtin ID, based on whether it has s...
Definition Builtins.cpp:225
bool isLibFunction(unsigned ID) const
Return true if this is a builtin for a libc/libm function, with a "__builtin_" prefix (e....
Definition Builtins.h:309
std::string getName(unsigned ID) const
Return the identifier name for the specified builtin, e.g.
Definition Builtins.cpp:80
mlir::Value getPointer() const
Definition Address.h:96
mlir::Type getElementType() const
Definition Address.h:123
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
Definition Address.h:136
Address withAlignment(clang::CharUnits newAlignment) const
Return address with different alignment, but same pointer and element type.
Definition Address.h:87
mlir::Value emitRawPointer() const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:110
cir::IntType getSIntNTy(int n)
cir::PointerType getUInt8PtrTy()
cir::IntType getUIntNTy(int n)
static CIRGenCallee forDirect(mlir::Operation *funcPtr, const CIRGenCalleeInfo &abstractInfo=CIRGenCalleeInfo())
Definition CIRGenCall.h:92
mlir::Type convertType(clang::QualType t)
mlir::Value emitCheckedArgForAssume(const Expr *e)
Emits an argument for a call to a __builtin_assume.
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
const clang::LangOptions & getLangOpts() const
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
const TargetInfo & getTarget() const
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
const clang::Decl * curFuncDecl
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
std::optional< mlir::Value > emitRISCVBuiltinExpr(unsigned builtinID, const CallExpr *expr)
mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
Returns a Value corresponding to the size of the given expression by emitting a cir....
std::optional< mlir::Value > emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot &returnValue)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
Address returnValue
The temporary alloca to hold the return value.
std::optional< mlir::Value > emitX86BuiltinExpr(unsigned builtinID, const CallExpr *expr)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
std::optional< mlir::Value > emitAMDGPUBuiltinExpr(unsigned builtinID, const CallExpr *expr)
Emit a call to an AMDGPU builtin function.
std::optional< mlir::Value > emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr, ReturnValueSlot returnValue, llvm::Triple::ArchType arch)
void emitAtomicExprWithMemOrder(const Expr *memOrder, bool isStore, bool isLoad, bool isFence, llvm::function_ref< void(cir::MemOrder)> emitAtomicOp)
void emitVAEnd(mlir::Value vaList)
Emits the end of a CIR variable-argument operation (cir.va_start)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
void emitVAStart(mlir::Value vaList)
Emits the start of a CIR variable-argument operation (cir.va_start)
void emitNonNullArgCheck(RValue rv, QualType argType, SourceLocation argLoc, AbstractCallee ac, unsigned paramNum)
Create a check for a function parameter that may potentially be declared as non-null.
mlir::MLIRContext & getMLIRContext()
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
RValue emitNewOrDeleteBuiltinCall(const FunctionProtoType *type, const CallExpr *callExpr, OverloadedOperatorKind op)
clang::ASTContext & getContext() const
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx, const Expr *argExpr)
mlir::Value emitVAArg(VAArgExpr *ve)
Generate code to get an argument from the passed in pointer and update it accordingly.
RValue emitRotate(const CallExpr *e, bool isRotateLeft)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
clang::ASTContext & getASTContext() const
mlir::Type convertType(clang::QualType type)
clang::DiagnosticsEngine & getDiags() const
cir::FuncOp getBuiltinLibFunction(const FunctionDecl *fd, unsigned builtinID)
Given a builtin id for a function like "__builtin_fabsf", return a Function* for "fabsf".
const llvm::Triple & getTriple() const
const cir::CIRDataLayout getDataLayout() const
const clang::CodeGenOptions & getCodeGenOpts() const
cir::FuncOp getOrCreateCIRFunction(llvm::StringRef mangledName, mlir::Type funcType, clang::GlobalDecl gd, bool forVTable, bool dontDefer=false, bool isThunk=false, ForDefinition_t isForDefinition=NotForDefinition, mlir::NamedAttrList extraAttrs={})
mlir::Value getPointer() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
static RValue getComplex(mlir::Value v)
Definition CIRGenValue.h:91
bool isIgnored() const
Definition CIRGenValue.h:52
static RValue getIgnored()
Definition CIRGenValue.h:78
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:260
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3150
bool hasStoredFPFeatures() const
Definition Expr.h:3105
SourceLocation getBeginLoc() const
Definition Expr.h:3280
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3129
Expr * getCallee()
Definition Expr.h:3093
FPOptionsOverride getFPFeatures() const
Definition Expr.h:3245
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3137
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
FunctionDecl * getAsFunction() LLVM_READONLY
Returns the function itself, or the templated function if this is a function template.
Definition DeclBase.cpp:273
bool hasAttr() const
Definition DeclBase.h:585
Concrete class used by the front-end to report problems and issues.
Definition Diagnostic.h:233
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
bool EvaluateAsFloat(llvm::APFloat &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsFloat - Return true if this is a constant which we can fold and convert to a floating point...
std::optional< llvm::APSInt > getIntegerConstantExpr(const ASTContext &Ctx) const
isIntegerConstantExpr - Return the value if this expression is a valid integer constant expression.
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3688
std::optional< uint64_t > tryEvaluateObjectSize(const ASTContext &Ctx, unsigned Type) const
If the current Expr is a pointer, this will try to statically determine the number of bytes available...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
Represents difference between two FPOptions values.
Represents a function declaration or definition.
Definition Decl.h:2015
const ParmVarDecl * getParamDecl(unsigned i) const
Definition Decl.h:2812
QualType getReturnType() const
Definition Decl.h:2860
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5357
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3378
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8515
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8557
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
Exposes information about the current target.
Definition TargetInfo.h:227
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
unsigned getSuitableAlign() const
Return the alignment that is the largest alignment ever used for any scalar/SIMD data type on the tar...
Definition TargetInfo.h:748
bool isBlockPointerType() const
Definition TypeBase.h:8688
bool isPointerType() const
Definition TypeBase.h:8668
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9328
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:754
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
Definition TypeBase.h:9156
bool isObjCObjectPointerType() const
Definition TypeBase.h:8847
bool isFloatingType() const
Definition Type.cpp:2342
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2285
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4960
const Expr * getSubExpr() const
Definition Expr.h:4976
QualType getType() const
Definition Decl.h:723
bool isMatchingAddressSpace(mlir::ptr::MemorySpaceAttrInterface cirAS, clang::LangAS as)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
U cast(CodeGen::Address addr)
Definition Address.h:327
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
static bool builtinCheckKind()
static bool addressSpace()
static bool asmLabelAttr()
static bool msvcBuiltins()
static bool builtinCallF128()
static bool isPPC_FP128Ty()
static bool emitCheckedInBoundsGEP()
static bool fpConstraints()
static bool countedBySize()
static bool opCallImplicitObjectSizeArgs()
static bool fastMathFlags()
static bool builtinCall()
static bool generateDebugInfo()
cir::PointerType allocaInt8PtrTy
void* in alloca address space
mlir::ptr::MemorySpaceAttrInterface getCIRAllocaAddressSpace() const
cir::PointerType voidPtrTy
void* in address space 0
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650
bool hasSideEffects() const
Return true if the evaluated expression has side effects.
Definition Expr.h:642
#define conj(__x)
Definition tgmath.h:1303