clang 23.0.0git
CIRGenBuiltin.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as CIR or a function call to be
10// later resolved.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CIRGenCall.h"
15#include "CIRGenFunction.h"
16#include "CIRGenModule.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/BuiltinAttributes.h"
19#include "mlir/IR/Value.h"
20#include "mlir/Support/LLVM.h"
21#include "clang/AST/DeclBase.h"
22#include "clang/AST/Expr.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/Support/ErrorHandling.h"
31
32using namespace clang;
33using namespace clang::CIRGen;
34using namespace llvm;
35
37 const CallExpr *e, mlir::Operation *calleeValue) {
38 CIRGenCallee callee = CIRGenCallee::forDirect(calleeValue, GlobalDecl(fd));
39 return cgf.emitCall(e->getCallee()->getType(), callee, e, ReturnValueSlot());
40}
41
42template <typename Op>
44 bool poisonZero = false) {
46
47 mlir::Value arg = cgf.emitScalarExpr(e->getArg(0));
48 CIRGenBuilderTy &builder = cgf.getBuilder();
49
50 Op op;
51 if constexpr (std::is_same_v<Op, cir::BitClzOp> ||
52 std::is_same_v<Op, cir::BitCtzOp>)
53 op = Op::create(builder, cgf.getLoc(e->getSourceRange()), arg, poisonZero);
54 else
55 op = Op::create(builder, cgf.getLoc(e->getSourceRange()), arg);
56
57 mlir::Value result = op.getResult();
58 mlir::Type exprTy = cgf.convertType(e->getType());
59 if (exprTy != result.getType())
60 result = builder.createIntCast(result, exprTy);
61
62 return RValue::get(result);
63}
64
65/// Emit the conversions required to turn the given value into an
66/// integer of the given size.
67static mlir::Value emitToInt(CIRGenFunction &cgf, mlir::Value v, QualType t,
68 cir::IntType intType) {
69 v = cgf.emitToMemory(v, t);
70
71 if (mlir::isa<cir::PointerType>(v.getType()))
72 return cgf.getBuilder().createPtrToInt(v, intType);
73
74 assert(v.getType() == intType);
75 return v;
76}
77
78static mlir::Value emitFromInt(CIRGenFunction &cgf, mlir::Value v, QualType t,
79 mlir::Type resultType) {
80 v = cgf.emitFromMemory(v, t);
81
82 if (mlir::isa<cir::PointerType>(resultType))
83 return cgf.getBuilder().createIntToPtr(v, resultType);
84
85 assert(v.getType() == resultType);
86 return v;
87}
88
89static mlir::Value emitSignBit(mlir::Location loc, CIRGenFunction &cgf,
90 mlir::Value val) {
92 cir::SignBitOp returnValue = cgf.getBuilder().createSignBit(loc, val);
93 return returnValue->getResult(0);
94}
95
97 ASTContext &astContext = cgf.getContext();
99 unsigned bytes =
100 mlir::isa<cir::PointerType>(ptr.getElementType())
101 ? astContext.getTypeSizeInChars(astContext.VoidPtrTy).getQuantity()
104
105 unsigned align = ptr.getAlignment().getQuantity();
106 if (align % bytes != 0) {
107 DiagnosticsEngine &diags = cgf.cgm.getDiags();
108 diags.Report(e->getBeginLoc(), diag::warn_sync_op_misaligned);
109 // Force address to be at least naturally-aligned.
111 }
112 return ptr;
113}
114
115/// Utility to insert an atomic instruction based on Intrinsic::ID
116/// and the expression node.
117static mlir::Value makeBinaryAtomicValue(
118 CIRGenFunction &cgf, cir::AtomicFetchKind kind, const CallExpr *expr,
119 mlir::Type *originalArgType, mlir::Value *emittedArgValue = nullptr,
120 cir::MemOrder ordering = cir::MemOrder::SequentiallyConsistent) {
121
122 QualType type = expr->getType();
123 QualType ptrType = expr->getArg(0)->getType();
124
125 assert(ptrType->isPointerType());
126 assert(
129 expr->getArg(1)->getType()));
130
131 Address destAddr = checkAtomicAlignment(cgf, expr);
132 CIRGenBuilderTy &builder = cgf.getBuilder();
133
134 mlir::Value val = cgf.emitScalarExpr(expr->getArg(1));
135 mlir::Type valueType = val.getType();
136 mlir::Value destValue = destAddr.emitRawPointer();
137
138 if (ptrType->getPointeeType()->isPointerType()) {
139 // Pointer to pointer
140 // `cir.atomic.fetch` expects a pointer to an integer type, so we cast
141 // ptr<ptr<T>> to ptr<intPtrSize>
142 cir::IntType ptrSizeInt =
143 builder.getSIntNTy(cgf.getContext().getTypeSize(ptrType));
144 destValue =
145 builder.createBitcast(destValue, builder.getPointerTo(ptrSizeInt));
146 val = emitToInt(cgf, val, type, ptrSizeInt);
147 } else {
148 // Pointer to integer type
149 cir::IntType intType =
151 ? builder.getUIntNTy(cgf.getContext().getTypeSize(type))
152 : builder.getSIntNTy(cgf.getContext().getTypeSize(type));
153 val = emitToInt(cgf, val, type, intType);
154 }
155
156 // This output argument is needed for post atomic fetch operations
157 // that calculate the result of the operation as return value of
158 // <binop>_and_fetch builtins. The `AtomicFetch` operation only updates the
159 // memory location and returns the old value.
160 if (emittedArgValue) {
161 *emittedArgValue = val;
162 *originalArgType = valueType;
163 }
164
165 auto rmwi = cir::AtomicFetchOp::create(
166 builder, cgf.getLoc(expr->getSourceRange()), destValue, val, kind,
167 ordering, cir::SyncScopeKind::System, false, /* is volatile */
168 true); /* fetch first */
169 return rmwi->getResult(0);
170}
171
172template <typename BinOp>
174 cir::AtomicFetchKind atomicOpkind,
175 const CallExpr *e, bool invert = false) {
176 mlir::Value emittedArgValue;
177 mlir::Type originalArgType;
178 clang::QualType typ = e->getType();
179 mlir::Value result = makeBinaryAtomicValue(
180 cgf, atomicOpkind, e, &originalArgType, &emittedArgValue);
182 result = BinOp::create(builder, result.getLoc(), result, emittedArgValue);
183
184 if (invert)
185 result = cir::UnaryOp::create(builder, result.getLoc(),
186 cir::UnaryOpKind::Not, result);
187
188 result = emitFromInt(cgf, result, typ, originalArgType);
189 return RValue::get(result);
190}
191
193 cir::SyncScopeKind syncScope) {
194 CIRGenBuilderTy &builder = cgf.getBuilder();
195 mlir::Location loc = cgf.getLoc(expr->getSourceRange());
196
197 auto emitAtomicOpCallBackFn = [&](cir::MemOrder memOrder) {
198 cir::AtomicFenceOp::create(
199 builder, loc, memOrder,
200 cir::SyncScopeKindAttr::get(&cgf.getMLIRContext(), syncScope));
201 };
202
203 cgf.emitAtomicExprWithMemOrder(expr->getArg(0), /*isStore*/ false,
204 /*isLoad*/ false, /*isFence*/ true,
205 emitAtomicOpCallBackFn);
206}
207
208namespace {
209struct WidthAndSignedness {
210 unsigned width;
211 bool isSigned;
212};
213} // namespace
214
215static WidthAndSignedness
217 const clang::QualType type) {
218 assert(type->isIntegerType() && "Given type is not an integer.");
219 unsigned width = type->isBooleanType() ? 1
220 : type->isBitIntType() ? astContext.getIntWidth(type)
221 : astContext.getTypeInfo(type).Width;
222 bool isSigned = type->isSignedIntegerType();
223 return {width, isSigned};
224}
225
226// Given one or more integer types, this function produces an integer type that
227// encompasses them: any value in one of the given types could be expressed in
228// the encompassing type.
229static struct WidthAndSignedness
230EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> types) {
231 assert(types.size() > 0 && "Empty list of types.");
232
233 // If any of the given types is signed, we must return a signed type.
234 bool isSigned = llvm::any_of(types, [](const auto &t) { return t.isSigned; });
235
236 // The encompassing type must have a width greater than or equal to the width
237 // of the specified types. Additionally, if the encompassing type is signed,
238 // its width must be strictly greater than the width of any unsigned types
239 // given.
240 unsigned width = 0;
241 for (const auto &type : types)
242 width = std::max(width, type.width + (isSigned && !type.isSigned));
243
244 return {width, isSigned};
245}
246
247RValue CIRGenFunction::emitRotate(const CallExpr *e, bool isRotateLeft) {
248 mlir::Value input = emitScalarExpr(e->getArg(0));
249 mlir::Value amount = emitScalarExpr(e->getArg(1));
250
251 // TODO(cir): MSVC flavor bit rotate builtins use different types for input
252 // and amount, but cir.rotate requires them to have the same type. Cast amount
253 // to the type of input when necessary.
255
256 auto r = cir::RotateOp::create(builder, getLoc(e->getSourceRange()), input,
257 amount, isRotateLeft);
258 return RValue::get(r);
259}
260
261template <class Operation>
263 const CallExpr &e) {
264 mlir::Value arg = cgf.emitScalarExpr(e.getArg(0));
265
266 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(cgf, &e);
268
269 auto call =
270 Operation::create(cgf.getBuilder(), arg.getLoc(), arg.getType(), arg);
271 return RValue::get(call->getResult(0));
272}
273
274template <class Operation>
276 mlir::Value arg = cgf.emitScalarExpr(e.getArg(0));
277 auto call =
278 Operation::create(cgf.getBuilder(), arg.getLoc(), arg.getType(), arg);
279 return RValue::get(call->getResult(0));
280}
281
282template <typename Op>
284 const CallExpr &e) {
285 mlir::Type resultType = cgf.convertType(e.getType());
286 mlir::Value src = cgf.emitScalarExpr(e.getArg(0));
287
289
290 auto call = Op::create(cgf.getBuilder(), src.getLoc(), resultType, src);
291 return RValue::get(call->getResult(0));
292}
293
294template <typename Op>
296 mlir::Value arg0 = cgf.emitScalarExpr(e.getArg(0));
297 mlir::Value arg1 = cgf.emitScalarExpr(e.getArg(1));
298
299 mlir::Location loc = cgf.getLoc(e.getExprLoc());
300 mlir::Type ty = cgf.convertType(e.getType());
301 auto call = Op::create(cgf.getBuilder(), loc, ty, arg0, arg1);
302
303 return RValue::get(call->getResult(0));
304}
305
306template <typename Op>
308 const CallExpr &e) {
309 mlir::Value arg0 = cgf.emitScalarExpr(e.getArg(0));
310 mlir::Value arg1 = cgf.emitScalarExpr(e.getArg(1));
311
312 mlir::Location loc = cgf.getLoc(e.getExprLoc());
313 mlir::Type ty = cgf.convertType(e.getType());
314
316
317 auto call = Op::create(cgf.getBuilder(), loc, ty, arg0, arg1);
318 return call->getResult(0);
319}
320
322 unsigned builtinID) {
323
324 if (cgf.getContext().BuiltinInfo.isLibFunction(builtinID)) {
325 cgf.cgm.errorNYI(
326 e->getSourceRange(),
327 std::string("unimplemented X86 library function builtin call: ") +
328 cgf.getContext().BuiltinInfo.getName(builtinID));
329 } else {
330 cgf.cgm.errorNYI(e->getSourceRange(),
331 std::string("unimplemented X86 builtin call: ") +
332 cgf.getContext().BuiltinInfo.getName(builtinID));
333 }
334
335 return cgf.getUndefRValue(e->getType());
336}
337
339 unsigned builtinID) {
340 assert(builtinID == Builtin::BI__builtin_alloca ||
341 builtinID == Builtin::BI__builtin_alloca_uninitialized ||
342 builtinID == Builtin::BIalloca || builtinID == Builtin::BI_alloca);
343
344 // Get alloca size input
345 mlir::Value size = cgf.emitScalarExpr(e->getArg(0));
346
347 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
348 const TargetInfo &ti = cgf.getContext().getTargetInfo();
349 const CharUnits suitableAlignmentInBytes =
351
352 // Emit the alloca op with type `u8 *` to match the semantics of
353 // `llvm.alloca`. We later bitcast the type to `void *` to match the
354 // semantics of C/C++
355 // FIXME(cir): It may make sense to allow AllocaOp of type `u8` to return a
356 // pointer of type `void *`. This will require a change to the allocaOp
357 // verifier.
358 CIRGenBuilderTy &builder = cgf.getBuilder();
359 mlir::Value allocaAddr = builder.createAlloca(
360 cgf.getLoc(e->getSourceRange()), builder.getUInt8PtrTy(),
361 builder.getUInt8Ty(), "bi_alloca", suitableAlignmentInBytes, size);
362
363 // Initialize the allocated buffer if required.
364 if (builtinID != Builtin::BI__builtin_alloca_uninitialized) {
365 // Initialize the alloca with the given size and alignment according to
366 // the lang opts. Only the trivial non-initialization is supported for
367 // now.
368
369 switch (cgf.getLangOpts().getTrivialAutoVarInit()) {
371 // Nothing to initialize.
372 break;
375 cgf.cgm.errorNYI("trivial auto var init");
376 break;
377 }
378 }
379
380 // An alloca will always return a pointer to the alloca (stack) address
381 // space. This address space need not be the same as the AST / Language
382 // default (e.g. in C / C++ auto vars are in the generic address space). At
383 // the AST level this is handled within CreateTempAlloca et al., but for the
384 // builtin / dynamic alloca we have to handle it here.
385
389 cgf.cgm.errorNYI(e->getSourceRange(),
390 "Address Space Cast for builtin alloca");
391 }
392
393 // Bitcast the alloca to the expected type.
394 return RValue::get(builder.createBitcast(
395 allocaAddr, builder.getVoidPtrTy(cgf.getCIRAllocaAddressSpace())));
396}
397
399 unsigned builtinID) {
400 std::optional<bool> errnoOverriden;
401 // ErrnoOverriden is true if math-errno is overriden via the
402 // '#pragma float_control(precise, on)'. This pragma disables fast-math,
403 // which implies math-errno.
404 if (e->hasStoredFPFeatures()) {
406 if (op.hasMathErrnoOverride())
407 errnoOverriden = op.getMathErrnoOverride();
408 }
409 // True if 'attribute__((optnone))' is used. This attribute overrides
410 // fast-math which implies math-errno.
411 bool optNone =
412 cgf.curFuncDecl && cgf.curFuncDecl->hasAttr<OptimizeNoneAttr>();
413 bool isOptimizationEnabled = cgf.cgm.getCodeGenOpts().OptimizationLevel != 0;
414 bool generateFPMathIntrinsics =
416 builtinID, cgf.cgm.getTriple(), errnoOverriden,
417 cgf.getLangOpts().MathErrno, optNone, isOptimizationEnabled);
418 return generateFPMathIntrinsics;
419}
420
422 unsigned builtinID) {
424 switch (builtinID) {
425 case Builtin::BIacos:
426 case Builtin::BIacosf:
427 case Builtin::BIacosl:
428 case Builtin::BI__builtin_acos:
429 case Builtin::BI__builtin_acosf:
430 case Builtin::BI__builtin_acosf16:
431 case Builtin::BI__builtin_acosl:
432 case Builtin::BI__builtin_acosf128:
433 case Builtin::BI__builtin_elementwise_acos:
435 case Builtin::BIasin:
436 case Builtin::BIasinf:
437 case Builtin::BIasinl:
438 case Builtin::BI__builtin_asin:
439 case Builtin::BI__builtin_asinf:
440 case Builtin::BI__builtin_asinf16:
441 case Builtin::BI__builtin_asinl:
442 case Builtin::BI__builtin_asinf128:
443 case Builtin::BI__builtin_elementwise_asin:
445 case Builtin::BIatan:
446 case Builtin::BIatanf:
447 case Builtin::BIatanl:
448 case Builtin::BI__builtin_atan:
449 case Builtin::BI__builtin_atanf:
450 case Builtin::BI__builtin_atanf16:
451 case Builtin::BI__builtin_atanl:
452 case Builtin::BI__builtin_atanf128:
453 case Builtin::BI__builtin_elementwise_atan:
455 case Builtin::BIatan2:
456 case Builtin::BIatan2f:
457 case Builtin::BIatan2l:
458 case Builtin::BI__builtin_atan2:
459 case Builtin::BI__builtin_atan2f:
460 case Builtin::BI__builtin_atan2f16:
461 case Builtin::BI__builtin_atan2l:
462 case Builtin::BI__builtin_atan2f128:
463 case Builtin::BI__builtin_elementwise_atan2:
464 return RValue::get(
466 case Builtin::BIceil:
467 case Builtin::BIceilf:
468 case Builtin::BIceill:
469 case Builtin::BI__builtin_ceil:
470 case Builtin::BI__builtin_ceilf:
471 case Builtin::BI__builtin_ceilf16:
472 case Builtin::BI__builtin_ceill:
473 case Builtin::BI__builtin_ceilf128:
475 case Builtin::BI__builtin_elementwise_ceil:
476 return RValue::getIgnored();
477 case Builtin::BIcopysign:
478 case Builtin::BIcopysignf:
479 case Builtin::BIcopysignl:
480 case Builtin::BI__builtin_copysign:
481 case Builtin::BI__builtin_copysignf:
482 case Builtin::BI__builtin_copysignf16:
483 case Builtin::BI__builtin_copysignl:
484 case Builtin::BI__builtin_copysignf128:
486 case Builtin::BIcos:
487 case Builtin::BIcosf:
488 case Builtin::BIcosl:
489 case Builtin::BI__builtin_cos:
490 case Builtin::BI__builtin_cosf:
491 case Builtin::BI__builtin_cosf16:
492 case Builtin::BI__builtin_cosl:
493 case Builtin::BI__builtin_cosf128:
495 case Builtin::BI__builtin_elementwise_cos:
496 case Builtin::BIcosh:
497 case Builtin::BIcoshf:
498 case Builtin::BIcoshl:
499 case Builtin::BI__builtin_cosh:
500 case Builtin::BI__builtin_coshf:
501 case Builtin::BI__builtin_coshf16:
502 case Builtin::BI__builtin_coshl:
503 case Builtin::BI__builtin_coshf128:
504 case Builtin::BI__builtin_elementwise_cosh:
505 return RValue::getIgnored();
506 case Builtin::BIexp:
507 case Builtin::BIexpf:
508 case Builtin::BIexpl:
509 case Builtin::BI__builtin_exp:
510 case Builtin::BI__builtin_expf:
511 case Builtin::BI__builtin_expf16:
512 case Builtin::BI__builtin_expl:
513 case Builtin::BI__builtin_expf128:
515 case Builtin::BI__builtin_elementwise_exp:
516 return RValue::getIgnored();
517 case Builtin::BIexp2:
518 case Builtin::BIexp2f:
519 case Builtin::BIexp2l:
520 case Builtin::BI__builtin_exp2:
521 case Builtin::BI__builtin_exp2f:
522 case Builtin::BI__builtin_exp2f16:
523 case Builtin::BI__builtin_exp2l:
524 case Builtin::BI__builtin_exp2f128:
526 case Builtin::BI__builtin_elementwise_exp2:
527 case Builtin::BI__builtin_exp10:
528 case Builtin::BI__builtin_exp10f:
529 case Builtin::BI__builtin_exp10f16:
530 case Builtin::BI__builtin_exp10l:
531 case Builtin::BI__builtin_exp10f128:
532 case Builtin::BI__builtin_elementwise_exp10:
533 return RValue::getIgnored();
534 case Builtin::BIfabs:
535 case Builtin::BIfabsf:
536 case Builtin::BIfabsl:
537 case Builtin::BI__builtin_fabs:
538 case Builtin::BI__builtin_fabsf:
539 case Builtin::BI__builtin_fabsf16:
540 case Builtin::BI__builtin_fabsl:
541 case Builtin::BI__builtin_fabsf128:
543 case Builtin::BIfloor:
544 case Builtin::BIfloorf:
545 case Builtin::BIfloorl:
546 case Builtin::BI__builtin_floor:
547 case Builtin::BI__builtin_floorf:
548 case Builtin::BI__builtin_floorf16:
549 case Builtin::BI__builtin_floorl:
550 case Builtin::BI__builtin_floorf128:
552 case Builtin::BI__builtin_elementwise_floor:
553 case Builtin::BIfma:
554 case Builtin::BIfmaf:
555 case Builtin::BIfmal:
556 case Builtin::BI__builtin_fma:
557 case Builtin::BI__builtin_fmaf:
558 case Builtin::BI__builtin_fmaf16:
559 case Builtin::BI__builtin_fmal:
560 case Builtin::BI__builtin_fmaf128:
561 case Builtin::BI__builtin_elementwise_fma:
562 return RValue::getIgnored();
563 case Builtin::BIfmax:
564 case Builtin::BIfmaxf:
565 case Builtin::BIfmaxl:
566 case Builtin::BI__builtin_fmax:
567 case Builtin::BI__builtin_fmaxf:
568 case Builtin::BI__builtin_fmaxf16:
569 case Builtin::BI__builtin_fmaxl:
570 case Builtin::BI__builtin_fmaxf128:
571 return RValue::get(
573 case Builtin::BIfmin:
574 case Builtin::BIfminf:
575 case Builtin::BIfminl:
576 case Builtin::BI__builtin_fmin:
577 case Builtin::BI__builtin_fminf:
578 case Builtin::BI__builtin_fminf16:
579 case Builtin::BI__builtin_fminl:
580 case Builtin::BI__builtin_fminf128:
581 return RValue::get(
583 case Builtin::BIfmaximum_num:
584 case Builtin::BIfmaximum_numf:
585 case Builtin::BIfmaximum_numl:
586 case Builtin::BI__builtin_fmaximum_num:
587 case Builtin::BI__builtin_fmaximum_numf:
588 case Builtin::BI__builtin_fmaximum_numf16:
589 case Builtin::BI__builtin_fmaximum_numl:
590 case Builtin::BI__builtin_fmaximum_numf128:
591 case Builtin::BIfminimum_num:
592 case Builtin::BIfminimum_numf:
593 case Builtin::BIfminimum_numl:
594 case Builtin::BI__builtin_fminimum_num:
595 case Builtin::BI__builtin_fminimum_numf:
596 case Builtin::BI__builtin_fminimum_numf16:
597 case Builtin::BI__builtin_fminimum_numl:
598 case Builtin::BI__builtin_fminimum_numf128:
599 return RValue::getIgnored();
600 case Builtin::BIfmod:
601 case Builtin::BIfmodf:
602 case Builtin::BIfmodl:
603 case Builtin::BI__builtin_fmod:
604 case Builtin::BI__builtin_fmodf:
605 case Builtin::BI__builtin_fmodf16:
606 case Builtin::BI__builtin_fmodl:
607 case Builtin::BI__builtin_fmodf128:
608 case Builtin::BI__builtin_elementwise_fmod:
609 return RValue::get(
611 case Builtin::BIlog:
612 case Builtin::BIlogf:
613 case Builtin::BIlogl:
614 case Builtin::BI__builtin_log:
615 case Builtin::BI__builtin_logf:
616 case Builtin::BI__builtin_logf16:
617 case Builtin::BI__builtin_logl:
618 case Builtin::BI__builtin_logf128:
619 case Builtin::BI__builtin_elementwise_log:
621 case Builtin::BIlog10:
622 case Builtin::BIlog10f:
623 case Builtin::BIlog10l:
624 case Builtin::BI__builtin_log10:
625 case Builtin::BI__builtin_log10f:
626 case Builtin::BI__builtin_log10f16:
627 case Builtin::BI__builtin_log10l:
628 case Builtin::BI__builtin_log10f128:
629 case Builtin::BI__builtin_elementwise_log10:
631 case Builtin::BIlog2:
632 case Builtin::BIlog2f:
633 case Builtin::BIlog2l:
634 case Builtin::BI__builtin_log2:
635 case Builtin::BI__builtin_log2f:
636 case Builtin::BI__builtin_log2f16:
637 case Builtin::BI__builtin_log2l:
638 case Builtin::BI__builtin_log2f128:
639 case Builtin::BI__builtin_elementwise_log2:
641 case Builtin::BInearbyint:
642 case Builtin::BInearbyintf:
643 case Builtin::BInearbyintl:
644 case Builtin::BI__builtin_nearbyint:
645 case Builtin::BI__builtin_nearbyintf:
646 case Builtin::BI__builtin_nearbyintl:
647 case Builtin::BI__builtin_nearbyintf128:
648 case Builtin::BI__builtin_elementwise_nearbyint:
650 case Builtin::BIpow:
651 case Builtin::BIpowf:
652 case Builtin::BIpowl:
653 case Builtin::BI__builtin_pow:
654 case Builtin::BI__builtin_powf:
655 case Builtin::BI__builtin_powf16:
656 case Builtin::BI__builtin_powl:
657 case Builtin::BI__builtin_powf128:
658 return RValue::get(
660 case Builtin::BI__builtin_elementwise_pow:
661 return RValue::getIgnored();
662 case Builtin::BIrint:
663 case Builtin::BIrintf:
664 case Builtin::BIrintl:
665 case Builtin::BI__builtin_rint:
666 case Builtin::BI__builtin_rintf:
667 case Builtin::BI__builtin_rintf16:
668 case Builtin::BI__builtin_rintl:
669 case Builtin::BI__builtin_rintf128:
670 case Builtin::BI__builtin_elementwise_rint:
672 case Builtin::BIround:
673 case Builtin::BIroundf:
674 case Builtin::BIroundl:
675 case Builtin::BI__builtin_round:
676 case Builtin::BI__builtin_roundf:
677 case Builtin::BI__builtin_roundf16:
678 case Builtin::BI__builtin_roundl:
679 case Builtin::BI__builtin_roundf128:
680 case Builtin::BI__builtin_elementwise_round:
682 case Builtin::BIroundeven:
683 case Builtin::BIroundevenf:
684 case Builtin::BIroundevenl:
685 case Builtin::BI__builtin_roundeven:
686 case Builtin::BI__builtin_roundevenf:
687 case Builtin::BI__builtin_roundevenf16:
688 case Builtin::BI__builtin_roundevenl:
689 case Builtin::BI__builtin_roundevenf128:
690 case Builtin::BI__builtin_elementwise_roundeven:
692 case Builtin::BIsin:
693 case Builtin::BIsinf:
694 case Builtin::BIsinl:
695 case Builtin::BI__builtin_sin:
696 case Builtin::BI__builtin_sinf:
697 case Builtin::BI__builtin_sinf16:
698 case Builtin::BI__builtin_sinl:
699 case Builtin::BI__builtin_sinf128:
700 case Builtin::BI__builtin_elementwise_sin:
702 case Builtin::BIsinh:
703 case Builtin::BIsinhf:
704 case Builtin::BIsinhl:
705 case Builtin::BI__builtin_sinh:
706 case Builtin::BI__builtin_sinhf:
707 case Builtin::BI__builtin_sinhf16:
708 case Builtin::BI__builtin_sinhl:
709 case Builtin::BI__builtin_sinhf128:
710 case Builtin::BI__builtin_elementwise_sinh:
711 case Builtin::BI__builtin_sincospi:
712 case Builtin::BI__builtin_sincospif:
713 case Builtin::BI__builtin_sincospil:
714 case Builtin::BIsincos:
715 case Builtin::BIsincosf:
716 case Builtin::BIsincosl:
717 case Builtin::BI__builtin_sincos:
718 case Builtin::BI__builtin_sincosf:
719 case Builtin::BI__builtin_sincosf16:
720 case Builtin::BI__builtin_sincosl:
721 case Builtin::BI__builtin_sincosf128:
722 return RValue::getIgnored();
723 case Builtin::BIsqrt:
724 case Builtin::BIsqrtf:
725 case Builtin::BIsqrtl:
726 case Builtin::BI__builtin_sqrt:
727 case Builtin::BI__builtin_sqrtf:
728 case Builtin::BI__builtin_sqrtf16:
729 case Builtin::BI__builtin_sqrtl:
730 case Builtin::BI__builtin_sqrtf128:
731 case Builtin::BI__builtin_elementwise_sqrt:
733 case Builtin::BItan:
734 case Builtin::BItanf:
735 case Builtin::BItanl:
736 case Builtin::BI__builtin_tan:
737 case Builtin::BI__builtin_tanf:
738 case Builtin::BI__builtin_tanf16:
739 case Builtin::BI__builtin_tanl:
740 case Builtin::BI__builtin_tanf128:
741 case Builtin::BI__builtin_elementwise_tan:
743 case Builtin::BItanh:
744 case Builtin::BItanhf:
745 case Builtin::BItanhl:
746 case Builtin::BI__builtin_tanh:
747 case Builtin::BI__builtin_tanhf:
748 case Builtin::BI__builtin_tanhf16:
749 case Builtin::BI__builtin_tanhl:
750 case Builtin::BI__builtin_tanhf128:
751 case Builtin::BI__builtin_elementwise_tanh:
752 return RValue::getIgnored();
753 case Builtin::BItrunc:
754 case Builtin::BItruncf:
755 case Builtin::BItruncl:
756 case Builtin::BI__builtin_trunc:
757 case Builtin::BI__builtin_truncf:
758 case Builtin::BI__builtin_truncf16:
759 case Builtin::BI__builtin_truncl:
760 case Builtin::BI__builtin_truncf128:
761 case Builtin::BI__builtin_elementwise_trunc:
763 case Builtin::BIlround:
764 case Builtin::BIlroundf:
765 case Builtin::BIlroundl:
766 case Builtin::BI__builtin_lround:
767 case Builtin::BI__builtin_lroundf:
768 case Builtin::BI__builtin_lroundl:
769 case Builtin::BI__builtin_lroundf128:
771 case Builtin::BIllround:
772 case Builtin::BIllroundf:
773 case Builtin::BIllroundl:
774 case Builtin::BI__builtin_llround:
775 case Builtin::BI__builtin_llroundf:
776 case Builtin::BI__builtin_llroundl:
777 case Builtin::BI__builtin_llroundf128:
779 case Builtin::BIlrint:
780 case Builtin::BIlrintf:
781 case Builtin::BIlrintl:
782 case Builtin::BI__builtin_lrint:
783 case Builtin::BI__builtin_lrintf:
784 case Builtin::BI__builtin_lrintl:
785 case Builtin::BI__builtin_lrintf128:
787 case Builtin::BIllrint:
788 case Builtin::BIllrintf:
789 case Builtin::BIllrintl:
790 case Builtin::BI__builtin_llrint:
791 case Builtin::BI__builtin_llrintf:
792 case Builtin::BI__builtin_llrintl:
793 case Builtin::BI__builtin_llrintf128:
795 case Builtin::BI__builtin_ldexp:
796 case Builtin::BI__builtin_ldexpf:
797 case Builtin::BI__builtin_ldexpl:
798 case Builtin::BI__builtin_ldexpf16:
799 case Builtin::BI__builtin_ldexpf128:
800 case Builtin::BI__builtin_elementwise_ldexp:
801 default:
802 break;
803 }
804
805 return RValue::getIgnored();
806}
807
808// FIXME: Remove cgf parameter when all descriptor kinds are implemented
809static mlir::Type
812 mlir::MLIRContext *context) {
813 using namespace llvm::Intrinsic;
814
815 IITDescriptor descriptor = infos.front();
816 infos = infos.slice(1);
817
818 switch (descriptor.Kind) {
819 case IITDescriptor::Void:
820 return cir::VoidType::get(context);
821 // If the intrinsic expects unsigned integers, the signedness is corrected in
822 // correctIntegerSignedness()
823 case IITDescriptor::Integer:
824 return cir::IntType::get(context, descriptor.Integer_Width,
825 /*isSigned=*/true);
826 case IITDescriptor::Vector: {
827 mlir::Type elementType = decodeFixedType(cgf, infos, context);
828 unsigned numElements = descriptor.Vector_Width.getFixedValue();
829 return cir::VectorType::get(elementType, numElements);
830 }
831 case IITDescriptor::Pointer: {
832 mlir::Builder builder(context);
833 auto addrSpace = cir::TargetAddressSpaceAttr::get(
834 context, descriptor.Pointer_AddressSpace);
835 return cir::PointerType::get(cir::VoidType::get(context), addrSpace);
836 }
837 default:
838 cgf.cgm.errorNYI("Unimplemented intrinsic type descriptor");
839 return cir::VoidType::get(context);
840 }
841}
842
843/// Helper function to correct integer signedness for intrinsic arguments and
844/// return type. IIT always returns signed integers, but the actual intrinsic
845/// may expect unsigned integers based on the AST FunctionDecl parameter types.
846static mlir::Type correctIntegerSignedness(mlir::Type iitType, QualType astType,
847 mlir::MLIRContext *context) {
848 auto intTy = dyn_cast<cir::IntType>(iitType);
849 if (!intTy)
850 return iitType;
851
852 if (astType->isUnsignedIntegerType())
853 return cir::IntType::get(context, intTy.getWidth(), /*isSigned=*/false);
854
855 return iitType;
856}
857
858static mlir::Value getCorrectedPtr(mlir::Value argValue, mlir::Type expectedTy,
859 CIRGenBuilderTy &builder) {
860 auto ptrType = mlir::cast<cir::PointerType>(argValue.getType());
861
862 auto expectedPtrType = mlir::cast<cir::PointerType>(expectedTy);
863 assert(ptrType != expectedPtrType && "types should not match");
864
865 if (ptrType.getAddrSpace() != expectedPtrType.getAddrSpace()) {
867 "address space handling not yet implemented");
868 auto newPtrType = cir::PointerType::get(ptrType.getPointee(),
869 expectedPtrType.getAddrSpace());
870 return builder.createAddrSpaceCast(argValue, newPtrType);
871 }
872
873 return builder.createBitcast(argValue, expectedTy);
874}
875
876static cir::FuncType getIntrinsicType(CIRGenFunction &cgf,
877 mlir::MLIRContext *context,
878 llvm::Intrinsic::ID id) {
879 using namespace llvm::Intrinsic;
880
882 getIntrinsicInfoTableEntries(id, table);
883
884 ArrayRef<IITDescriptor> tableRef = table;
885 mlir::Type resultTy = decodeFixedType(cgf, tableRef, context);
886
888 bool isVarArg = false;
889 while (!tableRef.empty()) {
890 llvm::Intrinsic::IITDescriptor::IITDescriptorKind kind =
891 tableRef.front().Kind;
892 if (kind == IITDescriptor::VarArg) {
893 isVarArg = true;
894 break; // VarArg is last
895 }
896 argTypes.push_back(decodeFixedType(cgf, tableRef, context));
897 }
898
899 // CIR convention: no explicit void return type
900 if (isa<cir::VoidType>(resultTy))
901 return cir::FuncType::get(context, argTypes, /*optionalReturnType=*/nullptr,
902 isVarArg);
903
904 return cir::FuncType::get(context, argTypes, resultTy, isVarArg);
905}
906
908 const CallExpr *e,
910 mlir::Location loc = getLoc(e->getSourceRange());
911
912 // See if we can constant fold this builtin. If so, don't emit it at all.
913 // TODO: Extend this handling to all builtin calls that we can constant-fold.
914 // Do not constant-fold immediate (target-specific) builtins; their ASTs can
915 // trigger the constant evaluator in cases it cannot safely handle.
916 // Skip EvaluateAsRValue for those.
917 Expr::EvalResult result;
918 if (e->isPRValue() && !getContext().BuiltinInfo.isImmediate(builtinID) &&
919 e->EvaluateAsRValue(result, cgm.getASTContext()) &&
920 !result.hasSideEffects()) {
921 if (result.Val.isInt()) {
922 QualType type = e->getType();
923 if (type->isBooleanType())
924 return RValue::get(
925 builder.getBool(result.Val.getInt().getBoolValue(), loc));
926 return RValue::get(builder.getConstInt(loc, result.Val.getInt()));
927 }
928 if (result.Val.isFloat()) {
929 // Note: we are using result type of CallExpr to determine the type of
930 // the constant. Classic codegen uses the result value to determine the
931 // type. We feel it should be Ok to use expression type because it is
932 // hard to imagine a builtin function evaluates to a value that
933 // over/underflows its own defined type.
934 mlir::Type type = convertType(e->getType());
935 return RValue::get(builder.getConstFP(loc, type, result.Val.getFloat()));
936 }
937 }
938
939 const FunctionDecl *fd = gd.getDecl()->getAsFunction();
940
942
943 // If the builtin has been declared explicitly with an assembler label,
944 // disable the specialized emitting below. Ideally we should communicate the
945 // rename in IR, or at least avoid generating the intrinsic calls that are
946 // likely to get lowered to the renamed library functions.
947 unsigned builtinIDIfNoAsmLabel = fd->hasAttr<AsmLabelAttr>() ? 0 : builtinID;
948
949 bool generateFPMathIntrinsics =
950 shouldCIREmitFPMathIntrinsic(*this, e, builtinID);
951
952 if (generateFPMathIntrinsics) {
953 // Try to match the builtinID with a floating point math builtin.
954 RValue rv = tryEmitFPMathIntrinsic(*this, e, builtinIDIfNoAsmLabel);
955
956 // Return the result directly if a math intrinsic was generated.
957 if (!rv.isIgnored()) {
958 return rv;
959 }
960 }
961
963
964 switch (builtinIDIfNoAsmLabel) {
965 default:
966 break;
967
968 // C stdarg builtins.
969 case Builtin::BI__builtin_stdarg_start:
970 case Builtin::BI__builtin_va_start:
971 case Builtin::BI__va_start: {
972 mlir::Value vaList = builtinID == Builtin::BI__va_start
973 ? emitScalarExpr(e->getArg(0))
975 emitVAStart(vaList);
976 return {};
977 }
978
979 case Builtin::BI__builtin_va_end:
981 return {};
982 case Builtin::BI__builtin_va_copy: {
983 mlir::Value dstPtr = emitVAListRef(e->getArg(0)).getPointer();
984 mlir::Value srcPtr = emitVAListRef(e->getArg(1)).getPointer();
985 cir::VACopyOp::create(builder, dstPtr.getLoc(), dstPtr, srcPtr);
986 return {};
987 }
988
989 case Builtin::BIabs:
990 case Builtin::BIlabs:
991 case Builtin::BIllabs:
992 case Builtin::BI__builtin_abs:
993 case Builtin::BI__builtin_labs:
994 case Builtin::BI__builtin_llabs: {
995 bool sanitizeOverflow = sanOpts.has(SanitizerKind::SignedIntegerOverflow);
996 mlir::Value arg = emitScalarExpr(e->getArg(0));
997 mlir::Value result;
998 switch (getLangOpts().getSignedOverflowBehavior()) {
1000 result = cir::AbsOp::create(builder, loc, arg.getType(), arg,
1001 /*minIsPoison=*/false);
1002 break;
1004 if (!sanitizeOverflow) {
1005 result = cir::AbsOp::create(builder, loc, arg.getType(), arg,
1006 /*minIsPoison=*/true);
1007 break;
1008 }
1009 [[fallthrough]];
1011 cgm.errorNYI(e->getSourceRange(), "abs with overflow handling");
1012 return RValue::get(nullptr);
1013 }
1014 return RValue::get(result);
1015 }
1016
1017 case Builtin::BI__assume:
1018 case Builtin::BI__builtin_assume: {
1019 if (e->getArg(0)->HasSideEffects(getContext()))
1020 return RValue::get(nullptr);
1021
1022 mlir::Value argValue = emitCheckedArgForAssume(e->getArg(0));
1023 cir::AssumeOp::create(builder, loc, argValue);
1024 return RValue::get(nullptr);
1025 }
1026
1027 case Builtin::BI__builtin_assume_separate_storage: {
1028 mlir::Value value0 = emitScalarExpr(e->getArg(0));
1029 mlir::Value value1 = emitScalarExpr(e->getArg(1));
1030 cir::AssumeSepStorageOp::create(builder, loc, value0, value1);
1031 return RValue::get(nullptr);
1032 }
1033
1034 case Builtin::BI__builtin_assume_aligned: {
1035 const Expr *ptrExpr = e->getArg(0);
1036 mlir::Value ptrValue = emitScalarExpr(ptrExpr);
1037 mlir::Value offsetValue =
1038 (e->getNumArgs() > 2) ? emitScalarExpr(e->getArg(2)) : nullptr;
1039
1040 std::optional<llvm::APSInt> alignment =
1042 assert(alignment.has_value() &&
1043 "the second argument to __builtin_assume_aligned must be an "
1044 "integral constant expression");
1045
1046 mlir::Value result =
1047 emitAlignmentAssumption(ptrValue, ptrExpr, ptrExpr->getExprLoc(),
1048 alignment->getSExtValue(), offsetValue);
1049 return RValue::get(result);
1050 }
1051
1052 case Builtin::BI__builtin_complex: {
1053 mlir::Value real = emitScalarExpr(e->getArg(0));
1054 mlir::Value imag = emitScalarExpr(e->getArg(1));
1055 mlir::Value complex = builder.createComplexCreate(loc, real, imag);
1056 return RValue::getComplex(complex);
1057 }
1058
1059 case Builtin::BI__builtin_creal:
1060 case Builtin::BI__builtin_crealf:
1061 case Builtin::BI__builtin_creall:
1062 case Builtin::BIcreal:
1063 case Builtin::BIcrealf:
1064 case Builtin::BIcreall: {
1065 mlir::Value complex = emitComplexExpr(e->getArg(0));
1066 mlir::Value real = builder.createComplexReal(loc, complex);
1067 return RValue::get(real);
1068 }
1069
1070 case Builtin::BI__builtin_cimag:
1071 case Builtin::BI__builtin_cimagf:
1072 case Builtin::BI__builtin_cimagl:
1073 case Builtin::BIcimag:
1074 case Builtin::BIcimagf:
1075 case Builtin::BIcimagl: {
1076 mlir::Value complex = emitComplexExpr(e->getArg(0));
1077 mlir::Value imag = builder.createComplexImag(loc, complex);
1078 return RValue::get(imag);
1079 }
1080
1081 case Builtin::BI__builtin_conj:
1082 case Builtin::BI__builtin_conjf:
1083 case Builtin::BI__builtin_conjl:
1084 case Builtin::BIconj:
1085 case Builtin::BIconjf:
1086 case Builtin::BIconjl: {
1087 mlir::Value complex = emitComplexExpr(e->getArg(0));
1088 mlir::Value conj = builder.createUnaryOp(getLoc(e->getExprLoc()),
1089 cir::UnaryOpKind::Not, complex);
1090 return RValue::getComplex(conj);
1091 }
1092
1093 case Builtin::BI__builtin_clrsb:
1094 case Builtin::BI__builtin_clrsbl:
1095 case Builtin::BI__builtin_clrsbll:
1096 return emitBuiltinBitOp<cir::BitClrsbOp>(*this, e);
1097
1098 case Builtin::BI__builtin_ctzs:
1099 case Builtin::BI__builtin_ctz:
1100 case Builtin::BI__builtin_ctzl:
1101 case Builtin::BI__builtin_ctzll:
1102 case Builtin::BI__builtin_ctzg:
1104 return emitBuiltinBitOp<cir::BitCtzOp>(*this, e, /*poisonZero=*/true);
1105
1106 case Builtin::BI__builtin_clzs:
1107 case Builtin::BI__builtin_clz:
1108 case Builtin::BI__builtin_clzl:
1109 case Builtin::BI__builtin_clzll:
1110 case Builtin::BI__builtin_clzg:
1112 return emitBuiltinBitOp<cir::BitClzOp>(*this, e, /*poisonZero=*/true);
1113
1114 case Builtin::BI__builtin_ffs:
1115 case Builtin::BI__builtin_ffsl:
1116 case Builtin::BI__builtin_ffsll:
1117 return emitBuiltinBitOp<cir::BitFfsOp>(*this, e);
1118
1119 case Builtin::BI__builtin_parity:
1120 case Builtin::BI__builtin_parityl:
1121 case Builtin::BI__builtin_parityll:
1122 return emitBuiltinBitOp<cir::BitParityOp>(*this, e);
1123
1124 case Builtin::BI__lzcnt16:
1125 case Builtin::BI__lzcnt:
1126 case Builtin::BI__lzcnt64:
1128 return emitBuiltinBitOp<cir::BitClzOp>(*this, e, /*poisonZero=*/false);
1129
1130 case Builtin::BI__popcnt16:
1131 case Builtin::BI__popcnt:
1132 case Builtin::BI__popcnt64:
1133 case Builtin::BI__builtin_popcount:
1134 case Builtin::BI__builtin_popcountl:
1135 case Builtin::BI__builtin_popcountll:
1136 case Builtin::BI__builtin_popcountg:
1137 return emitBuiltinBitOp<cir::BitPopcountOp>(*this, e);
1138
1139 // Always return the argument of __builtin_unpredictable. LLVM does not
1140 // have an intrinsic corresponding to this builtin. Metadata for this
1141 // builtin should be added directly to instructions such as branches or
1142 // switches that use it.
1143 case Builtin::BI__builtin_unpredictable: {
1144 return RValue::get(emitScalarExpr(e->getArg(0)));
1145 }
1146
1147 case Builtin::BI__builtin_expect:
1148 case Builtin::BI__builtin_expect_with_probability: {
1149 mlir::Value argValue = emitScalarExpr(e->getArg(0));
1150 if (cgm.getCodeGenOpts().OptimizationLevel == 0)
1151 return RValue::get(argValue);
1152
1153 mlir::Value expectedValue = emitScalarExpr(e->getArg(1));
1154
1155 mlir::FloatAttr probAttr;
1156 if (builtinIDIfNoAsmLabel == Builtin::BI__builtin_expect_with_probability) {
1157 llvm::APFloat probability(0.0);
1158 const Expr *probArg = e->getArg(2);
1159 [[maybe_unused]] bool evalSucceeded =
1160 probArg->EvaluateAsFloat(probability, cgm.getASTContext());
1161 assert(evalSucceeded &&
1162 "probability should be able to evaluate as float");
1163 bool loseInfo = false; // ignored
1164 probability.convert(llvm::APFloat::IEEEdouble(),
1165 llvm::RoundingMode::Dynamic, &loseInfo);
1166 probAttr = mlir::FloatAttr::get(mlir::Float64Type::get(&getMLIRContext()),
1167 probability);
1168 }
1169
1170 auto result = cir::ExpectOp::create(builder, loc, argValue.getType(),
1171 argValue, expectedValue, probAttr);
1172 return RValue::get(result);
1173 }
1174
1175 case Builtin::BI__builtin_bswap16:
1176 case Builtin::BI__builtin_bswap32:
1177 case Builtin::BI__builtin_bswap64:
1178 case Builtin::BI_byteswap_ushort:
1179 case Builtin::BI_byteswap_ulong:
1180 case Builtin::BI_byteswap_uint64: {
1181 mlir::Value arg = emitScalarExpr(e->getArg(0));
1182 return RValue::get(cir::ByteSwapOp::create(builder, loc, arg));
1183 }
1184
1185 case Builtin::BI__builtin_bitreverse8:
1186 case Builtin::BI__builtin_bitreverse16:
1187 case Builtin::BI__builtin_bitreverse32:
1188 case Builtin::BI__builtin_bitreverse64: {
1189 mlir::Value arg = emitScalarExpr(e->getArg(0));
1190 return RValue::get(cir::BitReverseOp::create(builder, loc, arg));
1191 }
1192
1193 case Builtin::BI__builtin_rotateleft8:
1194 case Builtin::BI__builtin_rotateleft16:
1195 case Builtin::BI__builtin_rotateleft32:
1196 case Builtin::BI__builtin_rotateleft64:
1197 return emitRotate(e, /*isRotateLeft=*/true);
1198
1199 case Builtin::BI__builtin_rotateright8:
1200 case Builtin::BI__builtin_rotateright16:
1201 case Builtin::BI__builtin_rotateright32:
1202 case Builtin::BI__builtin_rotateright64:
1203 return emitRotate(e, /*isRotateLeft=*/false);
1204
1205 case Builtin::BI__builtin_coro_id:
1206 case Builtin::BI__builtin_coro_promise:
1207 case Builtin::BI__builtin_coro_resume:
1208 case Builtin::BI__builtin_coro_noop:
1209 case Builtin::BI__builtin_coro_destroy:
1210 case Builtin::BI__builtin_coro_done:
1211 case Builtin::BI__builtin_coro_alloc:
1212 case Builtin::BI__builtin_coro_begin:
1213 case Builtin::BI__builtin_coro_end:
1214 case Builtin::BI__builtin_coro_suspend:
1215 case Builtin::BI__builtin_coro_align:
1216 cgm.errorNYI(e->getSourceRange(), "BI__builtin_coro_id like NYI");
1217 return getUndefRValue(e->getType());
1218
1219 case Builtin::BI__builtin_coro_frame: {
1220 return emitCoroutineFrame();
1221 }
1222 case Builtin::BI__builtin_coro_free:
1223 case Builtin::BI__builtin_coro_size: {
1224 GlobalDecl gd{fd};
1225 mlir::Type ty = cgm.getTypes().getFunctionType(
1226 cgm.getTypes().arrangeGlobalDeclaration(gd));
1227 const auto *nd = cast<NamedDecl>(gd.getDecl());
1228 cir::FuncOp fnOp =
1229 cgm.getOrCreateCIRFunction(nd->getName(), ty, gd, /*ForVTable=*/false);
1230 fnOp.setBuiltin(true);
1231 return emitCall(e->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), e,
1232 returnValue);
1233 }
1234
1235 case Builtin::BI__builtin_constant_p: {
1236 mlir::Type resultType = convertType(e->getType());
1237
1238 const Expr *arg = e->getArg(0);
1239 QualType argType = arg->getType();
1240 // FIXME: The allowance for Obj-C pointers and block pointers is historical
1241 // and likely a mistake.
1242 if (!argType->isIntegralOrEnumerationType() && !argType->isFloatingType() &&
1243 !argType->isObjCObjectPointerType() && !argType->isBlockPointerType()) {
1244 // Per the GCC documentation, only numeric constants are recognized after
1245 // inlining.
1246 return RValue::get(
1247 builder.getConstInt(getLoc(e->getSourceRange()),
1248 mlir::cast<cir::IntType>(resultType), 0));
1249 }
1250
1251 if (arg->HasSideEffects(getContext())) {
1252 // The argument is unevaluated, so be conservative if it might have
1253 // side-effects.
1254 return RValue::get(
1255 builder.getConstInt(getLoc(e->getSourceRange()),
1256 mlir::cast<cir::IntType>(resultType), 0));
1257 }
1258
1259 mlir::Value argValue = emitScalarExpr(arg);
1260 if (argType->isObjCObjectPointerType()) {
1261 cgm.errorNYI(e->getSourceRange(),
1262 "__builtin_constant_p: Obj-C object pointer");
1263 return {};
1264 }
1265 argValue = builder.createBitcast(argValue, convertType(argType));
1266
1267 mlir::Value result = cir::IsConstantOp::create(
1268 builder, getLoc(e->getSourceRange()), argValue);
1269 // IsConstantOp returns a bool, but __builtin_constant_p returns an int.
1270 result = builder.createBoolToInt(result, resultType);
1271 return RValue::get(result);
1272 }
1273 case Builtin::BI__builtin_dynamic_object_size:
1274 case Builtin::BI__builtin_object_size: {
1275 unsigned type =
1276 e->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
1277 auto resType = mlir::cast<cir::IntType>(convertType(e->getType()));
1278
1279 // We pass this builtin onto the optimizer so that it can figure out the
1280 // object size in more complex cases.
1281 bool isDynamic = builtinID == Builtin::BI__builtin_dynamic_object_size;
1282 return RValue::get(emitBuiltinObjectSize(e->getArg(0), type, resType,
1283 /*EmittedE=*/nullptr, isDynamic));
1284 }
1285
1286 case Builtin::BI__builtin_prefetch: {
1287 auto evaluateOperandAsInt = [&](const Expr *arg) {
1288 Expr::EvalResult res;
1289 [[maybe_unused]] bool evalSucceed =
1290 arg->EvaluateAsInt(res, cgm.getASTContext());
1291 assert(evalSucceed && "expression should be able to evaluate as int");
1292 return res.Val.getInt().getZExtValue();
1293 };
1294
1295 bool isWrite = false;
1296 if (e->getNumArgs() > 1)
1297 isWrite = evaluateOperandAsInt(e->getArg(1));
1298
1299 int locality = 3;
1300 if (e->getNumArgs() > 2)
1301 locality = evaluateOperandAsInt(e->getArg(2));
1302
1303 mlir::Value address = emitScalarExpr(e->getArg(0));
1304 cir::PrefetchOp::create(builder, loc, address, locality, isWrite);
1305 return RValue::get(nullptr);
1306 }
1307 case Builtin::BI__builtin_readcyclecounter:
1308 case Builtin::BI__builtin_readsteadycounter:
1309 return errorBuiltinNYI(*this, e, builtinID);
1310 case Builtin::BI__builtin___clear_cache: {
1311 mlir::Value begin =
1312 builder.createPtrBitcast(emitScalarExpr(e->getArg(0)), cgm.voidTy);
1313 mlir::Value end =
1314 builder.createPtrBitcast(emitScalarExpr(e->getArg(1)), cgm.voidTy);
1315 cir::ClearCacheOp::create(builder, getLoc(e->getSourceRange()), begin, end);
1316 return RValue::get(nullptr);
1317 }
1318 case Builtin::BI__builtin_trap:
1319 emitTrap(loc, /*createNewBlock=*/true);
1320 return RValue::getIgnored();
1321 case Builtin::BI__builtin_verbose_trap:
1322 case Builtin::BI__debugbreak:
1323 return errorBuiltinNYI(*this, e, builtinID);
1324 case Builtin::BI__builtin_unreachable:
1325 emitUnreachable(e->getExprLoc(), /*createNewBlock=*/true);
1326 return RValue::getIgnored();
1327 case Builtin::BI__builtin_powi:
1328 case Builtin::BI__builtin_powif:
1329 case Builtin::BI__builtin_powil:
1330 case Builtin::BI__builtin_frexpl:
1331 case Builtin::BI__builtin_frexp:
1332 case Builtin::BI__builtin_frexpf:
1333 case Builtin::BI__builtin_frexpf128:
1334 case Builtin::BI__builtin_frexpf16:
1335 case Builtin::BImodf:
1336 case Builtin::BImodff:
1337 case Builtin::BImodfl:
1338 case Builtin::BI__builtin_modf:
1339 case Builtin::BI__builtin_modff:
1340 case Builtin::BI__builtin_modfl:
1341 case Builtin::BI__builtin_isgreater:
1342 case Builtin::BI__builtin_isgreaterequal:
1343 case Builtin::BI__builtin_isless:
1344 case Builtin::BI__builtin_islessequal:
1345 case Builtin::BI__builtin_islessgreater:
1346 case Builtin::BI__builtin_isunordered:
1347 // From https://clang.llvm.org/docs/LanguageExtensions.html#builtin-isfpclass
1348 //
1349 // The `__builtin_isfpclass()` builtin is a generalization of functions
1350 // isnan, isinf, isfinite and some others defined by the C standard. It tests
1351 // if the floating-point value, specified by the first argument, falls into
1352 // any of data classes, specified by the second argument.
1353 case Builtin::BI__builtin_isnan: {
1354 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1355 mlir::Value v = emitScalarExpr(e->getArg(0));
1357 mlir::Location loc = getLoc(e->getBeginLoc());
1358 return RValue::get(builder.createBoolToInt(
1359 builder.createIsFPClass(loc, v, cir::FPClassTest::Nan),
1360 convertType(e->getType())));
1361 }
1362
1363 case Builtin::BI__builtin_issignaling: {
1364 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1365 mlir::Value v = emitScalarExpr(e->getArg(0));
1366 mlir::Location loc = getLoc(e->getBeginLoc());
1367 return RValue::get(builder.createBoolToInt(
1368 builder.createIsFPClass(loc, v, cir::FPClassTest::SignalingNaN),
1369 convertType(e->getType())));
1370 }
1371
1372 case Builtin::BI__builtin_isinf: {
1373 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1374 mlir::Value v = emitScalarExpr(e->getArg(0));
1376 mlir::Location loc = getLoc(e->getBeginLoc());
1377 return RValue::get(builder.createBoolToInt(
1378 builder.createIsFPClass(loc, v, cir::FPClassTest::Infinity),
1379 convertType(e->getType())));
1380 }
1381 case Builtin::BIfinite:
1382 case Builtin::BI__finite:
1383 case Builtin::BIfinitef:
1384 case Builtin::BI__finitef:
1385 case Builtin::BIfinitel:
1386 case Builtin::BI__finitel:
1387 case Builtin::BI__builtin_isfinite: {
1388 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1389 mlir::Value v = emitScalarExpr(e->getArg(0));
1391 mlir::Location loc = getLoc(e->getBeginLoc());
1392 return RValue::get(builder.createBoolToInt(
1393 builder.createIsFPClass(loc, v, cir::FPClassTest::Finite),
1394 convertType(e->getType())));
1395 }
1396
1397 case Builtin::BI__builtin_isnormal: {
1398 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1399 mlir::Value v = emitScalarExpr(e->getArg(0));
1400 mlir::Location loc = getLoc(e->getBeginLoc());
1401 return RValue::get(builder.createBoolToInt(
1402 builder.createIsFPClass(loc, v, cir::FPClassTest::Normal),
1403 convertType(e->getType())));
1404 }
1405
1406 case Builtin::BI__builtin_issubnormal: {
1407 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1408 mlir::Value v = emitScalarExpr(e->getArg(0));
1409 mlir::Location loc = getLoc(e->getBeginLoc());
1410 return RValue::get(builder.createBoolToInt(
1411 builder.createIsFPClass(loc, v, cir::FPClassTest::Subnormal),
1412 convertType(e->getType())));
1413 }
1414
1415 case Builtin::BI__builtin_iszero: {
1416 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1417 mlir::Value v = emitScalarExpr(e->getArg(0));
1418 mlir::Location loc = getLoc(e->getBeginLoc());
1419 return RValue::get(builder.createBoolToInt(
1420 builder.createIsFPClass(loc, v, cir::FPClassTest::Zero),
1421 convertType(e->getType())));
1422 }
1423 case Builtin::BI__builtin_isfpclass: {
1424 Expr::EvalResult result;
1425 if (!e->getArg(1)->EvaluateAsInt(result, cgm.getASTContext()))
1426 break;
1427
1428 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1429 mlir::Value v = emitScalarExpr(e->getArg(0));
1430 uint64_t test = result.Val.getInt().getLimitedValue();
1431 mlir::Location loc = getLoc(e->getBeginLoc());
1432 //
1433 return RValue::get(builder.createBoolToInt(
1434 builder.createIsFPClass(loc, v, cir::FPClassTest(test)),
1435 convertType(e->getType())));
1436 }
1437 case Builtin::BI__builtin_nondeterministic_value:
1438 return errorBuiltinNYI(*this, e, builtinID);
1439 case Builtin::BI__builtin_elementwise_abs: {
1440 mlir::Type cirTy = convertType(e->getArg(0)->getType());
1441 bool isIntTy = cir::isIntOrVectorOfIntType(cirTy);
1442 if (!isIntTy)
1443 return emitUnaryFPBuiltin<cir::FAbsOp>(*this, *e);
1444 mlir::Value arg = emitScalarExpr(e->getArg(0));
1445 mlir::Value result = cir::AbsOp::create(builder, getLoc(e->getExprLoc()),
1446 arg.getType(), arg, false);
1447 return RValue::get(result);
1448 }
1449 case Builtin::BI__builtin_elementwise_acos:
1451 case Builtin::BI__builtin_elementwise_asin:
1453 case Builtin::BI__builtin_elementwise_atan:
1455 case Builtin::BI__builtin_elementwise_atan2:
1456 return RValue::get(
1458 case Builtin::BI__builtin_elementwise_exp:
1460 case Builtin::BI__builtin_elementwise_exp2:
1462 case Builtin::BI__builtin_elementwise_log:
1464 case Builtin::BI__builtin_elementwise_log2:
1466 case Builtin::BI__builtin_elementwise_log10:
1468 case Builtin::BI__builtin_elementwise_cos:
1470 case Builtin::BI__builtin_elementwise_floor:
1472 case Builtin::BI__builtin_elementwise_round:
1474 case Builtin::BI__builtin_elementwise_rint:
1476 case Builtin::BI__builtin_elementwise_nearbyint:
1478 case Builtin::BI__builtin_elementwise_sin:
1480 case Builtin::BI__builtin_elementwise_sqrt:
1482 case Builtin::BI__builtin_elementwise_tan:
1484 case Builtin::BI__builtin_elementwise_trunc:
1486 case Builtin::BI__builtin_elementwise_fmod:
1487 return RValue::get(
1489 case Builtin::BI__builtin_elementwise_ceil:
1490 case Builtin::BI__builtin_elementwise_exp10:
1491 case Builtin::BI__builtin_elementwise_ldexp:
1492 case Builtin::BI__builtin_elementwise_pow:
1493 case Builtin::BI__builtin_elementwise_bitreverse:
1494 case Builtin::BI__builtin_elementwise_cosh:
1495 case Builtin::BI__builtin_elementwise_popcount:
1496 case Builtin::BI__builtin_elementwise_roundeven:
1497 case Builtin::BI__builtin_elementwise_sinh:
1498 case Builtin::BI__builtin_elementwise_tanh:
1499 case Builtin::BI__builtin_elementwise_canonicalize:
1500 case Builtin::BI__builtin_elementwise_copysign:
1501 case Builtin::BI__builtin_elementwise_fma:
1502 return errorBuiltinNYI(*this, e, builtinID);
1503 case Builtin::BI__builtin_elementwise_fshl: {
1504 mlir::Location loc = getLoc(e->getExprLoc());
1505 mlir::Value a = emitScalarExpr(e->getArg(0));
1506 mlir::Value b = emitScalarExpr(e->getArg(1));
1507 mlir::Value c = emitScalarExpr(e->getArg(2));
1508 return RValue::get(builder.emitIntrinsicCallOp(loc, "fshl", a.getType(),
1509 mlir::ValueRange{a, b, c}));
1510 }
1511 case Builtin::BI__builtin_elementwise_fshr: {
1512 mlir::Location loc = getLoc(e->getExprLoc());
1513 mlir::Value a = emitScalarExpr(e->getArg(0));
1514 mlir::Value b = emitScalarExpr(e->getArg(1));
1515 mlir::Value c = emitScalarExpr(e->getArg(2));
1516 return RValue::get(builder.emitIntrinsicCallOp(loc, "fshr", a.getType(),
1517 mlir::ValueRange{a, b, c}));
1518 }
1519 case Builtin::BI__builtin_elementwise_add_sat:
1520 case Builtin::BI__builtin_elementwise_sub_sat:
1521 case Builtin::BI__builtin_elementwise_max:
1522 case Builtin::BI__builtin_elementwise_min:
1523 case Builtin::BI__builtin_elementwise_maxnum:
1524 case Builtin::BI__builtin_elementwise_minnum:
1525 case Builtin::BI__builtin_elementwise_maximum:
1526 case Builtin::BI__builtin_elementwise_minimum:
1527 case Builtin::BI__builtin_elementwise_maximumnum:
1528 case Builtin::BI__builtin_elementwise_minimumnum:
1529 case Builtin::BI__builtin_reduce_max:
1530 case Builtin::BI__builtin_reduce_min:
1531 case Builtin::BI__builtin_reduce_add:
1532 case Builtin::BI__builtin_reduce_mul:
1533 case Builtin::BI__builtin_reduce_xor:
1534 case Builtin::BI__builtin_reduce_or:
1535 case Builtin::BI__builtin_reduce_and:
1536 case Builtin::BI__builtin_reduce_assoc_fadd:
1537 case Builtin::BI__builtin_reduce_in_order_fadd:
1538 case Builtin::BI__builtin_reduce_maximum:
1539 case Builtin::BI__builtin_reduce_minimum:
1540 case Builtin::BI__builtin_matrix_transpose:
1541 case Builtin::BI__builtin_matrix_column_major_load:
1542 case Builtin::BI__builtin_matrix_column_major_store:
1543 case Builtin::BI__builtin_masked_load:
1544 case Builtin::BI__builtin_masked_expand_load:
1545 case Builtin::BI__builtin_masked_gather:
1546 case Builtin::BI__builtin_masked_store:
1547 case Builtin::BI__builtin_masked_compress_store:
1548 case Builtin::BI__builtin_masked_scatter:
1549 return errorBuiltinNYI(*this, e, builtinID);
1550 case Builtin::BI__builtin_isinf_sign: {
1551 CIRGenFunction::CIRGenFPOptionsRAII FPOptsRAII(*this, e);
1552 mlir::Location loc = getLoc(e->getBeginLoc());
1553 mlir::Value arg = emitScalarExpr(e->getArg(0));
1554 mlir::Value isInf =
1555 builder.createIsFPClass(loc, arg, cir::FPClassTest::Infinity);
1556 mlir::Value isNeg = emitSignBit(loc, *this, arg);
1557 mlir::Type intTy = convertType(e->getType());
1558 cir::ConstantOp zero = builder.getNullValue(intTy, loc);
1559 cir::ConstantOp one = builder.getConstant(loc, cir::IntAttr::get(intTy, 1));
1560 cir::ConstantOp negativeOne =
1561 builder.getConstant(loc, cir::IntAttr::get(intTy, -1));
1562 mlir::Value signResult = builder.createSelect(loc, isNeg, negativeOne, one);
1563 mlir::Value result = builder.createSelect(loc, isInf, signResult, zero);
1564 return RValue::get(result);
1565 }
1566 case Builtin::BI__builtin_flt_rounds:
1567 case Builtin::BI__builtin_set_flt_rounds:
1568 case Builtin::BI__builtin_fpclassify:
1569 return errorBuiltinNYI(*this, e, builtinID);
1570 case Builtin::BIalloca:
1571 case Builtin::BI_alloca:
1572 case Builtin::BI__builtin_alloca_uninitialized:
1573 case Builtin::BI__builtin_alloca:
1574 return emitBuiltinAlloca(*this, e, builtinID);
1575 case Builtin::BI__builtin_alloca_with_align_uninitialized:
1576 case Builtin::BI__builtin_alloca_with_align:
1577 case Builtin::BI__builtin_infer_alloc_token:
1578 return errorBuiltinNYI(*this, e, builtinID);
1579 case Builtin::BIbzero:
1580 case Builtin::BI__builtin_bzero: {
1581 mlir::Location loc = getLoc(e->getSourceRange());
1582 Address destPtr = emitPointerWithAlignment(e->getArg(0));
1583 Address destPtrCast = destPtr.withElementType(builder, cgm.voidTy);
1584 mlir::Value size = emitScalarExpr(e->getArg(1));
1585 mlir::Value zero = builder.getNullValue(builder.getUInt8Ty(), loc);
1587 builder.createMemSet(loc, destPtrCast, zero, size);
1589 return RValue::getIgnored();
1590 }
1591 case Builtin::BIbcopy:
1592 case Builtin::BI__builtin_bcopy:
1593 return errorBuiltinNYI(*this, e, builtinID);
1594 case Builtin::BI__builtin_char_memchr:
1595 case Builtin::BI__builtin_memchr: {
1596 Address srcPtr = emitPointerWithAlignment(e->getArg(0));
1597 mlir::Value src =
1598 builder.createBitcast(srcPtr.getPointer(), builder.getVoidPtrTy());
1599 mlir::Value pattern = emitScalarExpr(e->getArg(1));
1600 mlir::Value len = emitScalarExpr(e->getArg(2));
1601 mlir::Value res = cir::MemChrOp::create(builder, getLoc(e->getExprLoc()),
1602 src, pattern, len);
1603 return RValue::get(res);
1604 }
1605 case Builtin::BImemcpy:
1606 case Builtin::BI__builtin_memcpy:
1607 case Builtin::BImempcpy:
1608 case Builtin::BI__builtin_mempcpy:
1609 case Builtin::BI__builtin_memcpy_inline:
1610 case Builtin::BI__builtin___memcpy_chk:
1611 case Builtin::BI__builtin_objc_memmove_collectable:
1612 case Builtin::BI__builtin___memmove_chk:
1613 case Builtin::BI__builtin_trivially_relocate:
1614 case Builtin::BImemmove:
1615 case Builtin::BI__builtin_memmove:
1616 case Builtin::BImemset:
1617 case Builtin::BI__builtin_memset:
1618 case Builtin::BI__builtin_memset_inline:
1619 case Builtin::BI__builtin___memset_chk:
1620 case Builtin::BI__builtin_wmemchr:
1621 case Builtin::BI__builtin_wmemcmp:
1622 break; // Handled as library calls below.
1623 case Builtin::BI__builtin_dwarf_cfa:
1624 return errorBuiltinNYI(*this, e, builtinID);
1625 case Builtin::BI__builtin_return_address: {
1626 llvm::APSInt level = e->getArg(0)->EvaluateKnownConstInt(getContext());
1627 return RValue::get(cir::ReturnAddrOp::create(
1628 builder, getLoc(e->getExprLoc()),
1629 builder.getConstAPInt(loc, builder.getUInt32Ty(), level)));
1630 }
1631 case Builtin::BI_ReturnAddress: {
1632 return RValue::get(cir::ReturnAddrOp::create(
1633 builder, getLoc(e->getExprLoc()),
1634 builder.getConstInt(loc, builder.getUInt32Ty(), 0)));
1635 }
1636 case Builtin::BI__builtin_frame_address: {
1637 llvm::APSInt level = e->getArg(0)->EvaluateKnownConstInt(getContext());
1638 mlir::Location loc = getLoc(e->getExprLoc());
1639 mlir::Value addr = cir::FrameAddrOp::create(
1640 builder, loc, allocaInt8PtrTy,
1641 builder.getConstAPInt(loc, builder.getUInt32Ty(), level));
1642 return RValue::get(
1643 builder.createCast(loc, cir::CastKind::bitcast, addr, voidPtrTy));
1644 }
1645 case Builtin::BI__builtin_extract_return_addr:
1646 case Builtin::BI__builtin_frob_return_addr:
1647 case Builtin::BI__builtin_dwarf_sp_column:
1648 case Builtin::BI__builtin_init_dwarf_reg_size_table:
1649 case Builtin::BI__builtin_eh_return:
1650 case Builtin::BI__builtin_unwind_init:
1651 case Builtin::BI__builtin_extend_pointer:
1652 return errorBuiltinNYI(*this, e, builtinID);
1653 case Builtin::BI__builtin_setjmp: {
1655 mlir::Location loc = getLoc(e->getExprLoc());
1656
1657 cir::PointerType voidPtrTy = builder.getVoidPtrTy();
1658 cir::PointerType ppTy = builder.getPointerTo(voidPtrTy);
1659 Address castBuf = buf.withElementType(builder, voidPtrTy);
1660
1662 if (getTarget().getTriple().isSystemZ()) {
1663 cgm.errorNYI(e->getExprLoc(), "setjmp on SystemZ");
1664 return {};
1665 }
1666
1667 mlir::Value frameAddress =
1668 cir::FrameAddrOp::create(builder, loc, voidPtrTy,
1669 mlir::ValueRange{builder.getUInt32(0, loc)})
1670 .getResult();
1671
1672 builder.createStore(loc, frameAddress, castBuf);
1673
1674 mlir::Value stacksave =
1675 cir::StackSaveOp::create(builder, loc, voidPtrTy).getResult();
1676 cir::PtrStrideOp stackSaveSlot = cir::PtrStrideOp::create(
1677 builder, loc, ppTy, castBuf.getPointer(), builder.getSInt32(2, loc));
1678 llvm::TypeSize voidPtrTySize =
1679 cgm.getDataLayout().getTypeAllocSize(voidPtrTy);
1680 CharUnits slotAlign = castBuf.getAlignment().alignmentAtOffset(
1681 CharUnits().fromQuantity(2 * voidPtrTySize));
1682 Address slotAddr = Address(stackSaveSlot, voidPtrTy, slotAlign);
1683 builder.createStore(loc, stacksave, slotAddr);
1684 auto op = cir::EhSetjmpOp::create(builder, loc, castBuf.getPointer());
1685 return RValue::get(op);
1686 }
1687 case Builtin::BI__builtin_longjmp: {
1688 mlir::Value buf = emitScalarExpr(e->getArg(0));
1689 mlir::Location loc = getLoc(e->getExprLoc());
1690
1691 cir::EhLongjmpOp::create(builder, loc, buf);
1692 cir::UnreachableOp::create(builder, loc);
1693 return RValue::get(nullptr);
1694 }
1695 case Builtin::BI__builtin_launder:
1696 case Builtin::BI__sync_fetch_and_add:
1697 case Builtin::BI__sync_fetch_and_sub:
1698 case Builtin::BI__sync_fetch_and_or:
1699 case Builtin::BI__sync_fetch_and_and:
1700 case Builtin::BI__sync_fetch_and_xor:
1701 case Builtin::BI__sync_fetch_and_nand:
1702 case Builtin::BI__sync_add_and_fetch:
1703 case Builtin::BI__sync_sub_and_fetch:
1704 case Builtin::BI__sync_and_and_fetch:
1705 case Builtin::BI__sync_or_and_fetch:
1706 case Builtin::BI__sync_xor_and_fetch:
1707 case Builtin::BI__sync_nand_and_fetch:
1708 case Builtin::BI__sync_val_compare_and_swap:
1709 case Builtin::BI__sync_bool_compare_and_swap:
1710 case Builtin::BI__sync_lock_test_and_set:
1711 case Builtin::BI__sync_lock_release:
1712 case Builtin::BI__sync_swap:
1713 case Builtin::BI__sync_fetch_and_add_1:
1714 case Builtin::BI__sync_fetch_and_add_2:
1715 case Builtin::BI__sync_fetch_and_add_4:
1716 case Builtin::BI__sync_fetch_and_add_8:
1717 case Builtin::BI__sync_fetch_and_add_16:
1718 case Builtin::BI__sync_fetch_and_sub_1:
1719 case Builtin::BI__sync_fetch_and_sub_2:
1720 case Builtin::BI__sync_fetch_and_sub_4:
1721 case Builtin::BI__sync_fetch_and_sub_8:
1722 case Builtin::BI__sync_fetch_and_sub_16:
1723 case Builtin::BI__sync_fetch_and_or_1:
1724 case Builtin::BI__sync_fetch_and_or_2:
1725 case Builtin::BI__sync_fetch_and_or_4:
1726 case Builtin::BI__sync_fetch_and_or_8:
1727 case Builtin::BI__sync_fetch_and_or_16:
1728 case Builtin::BI__sync_fetch_and_and_1:
1729 case Builtin::BI__sync_fetch_and_and_2:
1730 case Builtin::BI__sync_fetch_and_and_4:
1731 case Builtin::BI__sync_fetch_and_and_8:
1732 case Builtin::BI__sync_fetch_and_and_16:
1733 case Builtin::BI__sync_fetch_and_xor_1:
1734 case Builtin::BI__sync_fetch_and_xor_2:
1735 case Builtin::BI__sync_fetch_and_xor_4:
1736 case Builtin::BI__sync_fetch_and_xor_8:
1737 case Builtin::BI__sync_fetch_and_xor_16:
1738 case Builtin::BI__sync_fetch_and_nand_1:
1739 case Builtin::BI__sync_fetch_and_nand_2:
1740 case Builtin::BI__sync_fetch_and_nand_4:
1741 case Builtin::BI__sync_fetch_and_nand_8:
1742 case Builtin::BI__sync_fetch_and_nand_16:
1743 case Builtin::BI__sync_fetch_and_min:
1744 case Builtin::BI__sync_fetch_and_max:
1745 case Builtin::BI__sync_fetch_and_umin:
1746 case Builtin::BI__sync_fetch_and_umax:
1747 return errorBuiltinNYI(*this, e, builtinID);
1748 return getUndefRValue(e->getType());
1749 case Builtin::BI__sync_add_and_fetch_1:
1750 case Builtin::BI__sync_add_and_fetch_2:
1751 case Builtin::BI__sync_add_and_fetch_4:
1752 case Builtin::BI__sync_add_and_fetch_8:
1753 case Builtin::BI__sync_add_and_fetch_16:
1754 return emitBinaryAtomicPost<cir::AddOp>(*this, cir::AtomicFetchKind::Add,
1755 e);
1756 case Builtin::BI__sync_sub_and_fetch_1:
1757 case Builtin::BI__sync_sub_and_fetch_2:
1758 case Builtin::BI__sync_sub_and_fetch_4:
1759 case Builtin::BI__sync_sub_and_fetch_8:
1760 case Builtin::BI__sync_sub_and_fetch_16:
1761 return emitBinaryAtomicPost<cir::SubOp>(*this, cir::AtomicFetchKind::Sub,
1762 e);
1763 case Builtin::BI__sync_and_and_fetch_1:
1764 case Builtin::BI__sync_and_and_fetch_2:
1765 case Builtin::BI__sync_and_and_fetch_4:
1766 case Builtin::BI__sync_and_and_fetch_8:
1767 case Builtin::BI__sync_and_and_fetch_16:
1768 return emitBinaryAtomicPost<cir::AndOp>(*this, cir::AtomicFetchKind::And,
1769 e);
1770 case Builtin::BI__sync_or_and_fetch_1:
1771 case Builtin::BI__sync_or_and_fetch_2:
1772 case Builtin::BI__sync_or_and_fetch_4:
1773 case Builtin::BI__sync_or_and_fetch_8:
1774 case Builtin::BI__sync_or_and_fetch_16:
1775 return emitBinaryAtomicPost<cir::OrOp>(*this, cir::AtomicFetchKind::Or, e);
1776 case Builtin::BI__sync_xor_and_fetch_1:
1777 case Builtin::BI__sync_xor_and_fetch_2:
1778 case Builtin::BI__sync_xor_and_fetch_4:
1779 case Builtin::BI__sync_xor_and_fetch_8:
1780 case Builtin::BI__sync_xor_and_fetch_16:
1781 return emitBinaryAtomicPost<cir::XorOp>(*this, cir::AtomicFetchKind::Xor,
1782 e);
1783 case Builtin::BI__sync_nand_and_fetch_1:
1784 case Builtin::BI__sync_nand_and_fetch_2:
1785 case Builtin::BI__sync_nand_and_fetch_4:
1786 case Builtin::BI__sync_nand_and_fetch_8:
1787 case Builtin::BI__sync_nand_and_fetch_16:
1788 return emitBinaryAtomicPost<cir::AndOp>(*this, cir::AtomicFetchKind::Nand,
1789 e, /*invert=*/true);
1790 case Builtin::BI__sync_val_compare_and_swap_1:
1791 case Builtin::BI__sync_val_compare_and_swap_2:
1792 case Builtin::BI__sync_val_compare_and_swap_4:
1793 case Builtin::BI__sync_val_compare_and_swap_8:
1794 case Builtin::BI__sync_val_compare_and_swap_16:
1795 case Builtin::BI__sync_bool_compare_and_swap_1:
1796 case Builtin::BI__sync_bool_compare_and_swap_2:
1797 case Builtin::BI__sync_bool_compare_and_swap_4:
1798 case Builtin::BI__sync_bool_compare_and_swap_8:
1799 case Builtin::BI__sync_bool_compare_and_swap_16:
1800 case Builtin::BI__sync_swap_1:
1801 case Builtin::BI__sync_swap_2:
1802 case Builtin::BI__sync_swap_4:
1803 case Builtin::BI__sync_swap_8:
1804 case Builtin::BI__sync_swap_16:
1805 case Builtin::BI__sync_lock_test_and_set_1:
1806 case Builtin::BI__sync_lock_test_and_set_2:
1807 case Builtin::BI__sync_lock_test_and_set_4:
1808 case Builtin::BI__sync_lock_test_and_set_8:
1809 case Builtin::BI__sync_lock_test_and_set_16:
1810 case Builtin::BI__sync_lock_release_1:
1811 case Builtin::BI__sync_lock_release_2:
1812 case Builtin::BI__sync_lock_release_4:
1813 case Builtin::BI__sync_lock_release_8:
1814 case Builtin::BI__sync_lock_release_16:
1815 case Builtin::BI__sync_synchronize:
1816 case Builtin::BI__builtin_nontemporal_load:
1817 case Builtin::BI__builtin_nontemporal_store:
1818 case Builtin::BI__c11_atomic_is_lock_free:
1819 case Builtin::BI__atomic_is_lock_free:
1820 case Builtin::BI__atomic_test_and_set:
1821 case Builtin::BI__atomic_clear:
1822 return errorBuiltinNYI(*this, e, builtinID);
1823 case Builtin::BI__atomic_thread_fence:
1824 case Builtin::BI__c11_atomic_thread_fence: {
1825 emitAtomicFenceOp(*this, e, cir::SyncScopeKind::System);
1826 return RValue::get(nullptr);
1827 }
1828 case Builtin::BI__atomic_signal_fence:
1829 case Builtin::BI__c11_atomic_signal_fence: {
1830 emitAtomicFenceOp(*this, e, cir::SyncScopeKind::SingleThread);
1831 return RValue::get(nullptr);
1832 }
1833 case Builtin::BI__scoped_atomic_thread_fence:
1834 case Builtin::BI__builtin_signbit:
1835 case Builtin::BI__builtin_signbitf:
1836 case Builtin::BI__builtin_signbitl:
1837 case Builtin::BI__warn_memset_zero_len:
1838 case Builtin::BI__annotation:
1839 case Builtin::BI__builtin_annotation:
1840 case Builtin::BI__builtin_addcb:
1841 case Builtin::BI__builtin_addcs:
1842 case Builtin::BI__builtin_addc:
1843 case Builtin::BI__builtin_addcl:
1844 case Builtin::BI__builtin_addcll:
1845 case Builtin::BI__builtin_subcb:
1846 case Builtin::BI__builtin_subcs:
1847 case Builtin::BI__builtin_subc:
1848 case Builtin::BI__builtin_subcl:
1849 case Builtin::BI__builtin_subcll:
1850 return errorBuiltinNYI(*this, e, builtinID);
1851
1852 case Builtin::BI__builtin_add_overflow:
1853 case Builtin::BI__builtin_sub_overflow:
1854 case Builtin::BI__builtin_mul_overflow: {
1855 const clang::Expr *leftArg = e->getArg(0);
1856 const clang::Expr *rightArg = e->getArg(1);
1857 const clang::Expr *resultArg = e->getArg(2);
1858
1859 clang::QualType resultQTy =
1860 resultArg->getType()->castAs<clang::PointerType>()->getPointeeType();
1861
1862 WidthAndSignedness leftInfo =
1863 getIntegerWidthAndSignedness(cgm.getASTContext(), leftArg->getType());
1864 WidthAndSignedness rightInfo =
1865 getIntegerWidthAndSignedness(cgm.getASTContext(), rightArg->getType());
1866 WidthAndSignedness resultInfo =
1867 getIntegerWidthAndSignedness(cgm.getASTContext(), resultQTy);
1868
1869 // Note we compute the encompassing type with the consideration to the
1870 // result type, so later in LLVM lowering we don't get redundant integral
1871 // extension casts.
1872 WidthAndSignedness encompassingInfo =
1873 EncompassingIntegerType({leftInfo, rightInfo, resultInfo});
1874
1875 auto encompassingCIRTy = cir::IntType::get(
1876 &getMLIRContext(), encompassingInfo.width, encompassingInfo.isSigned);
1877 auto resultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(resultQTy));
1878
1879 mlir::Value left = emitScalarExpr(leftArg);
1880 mlir::Value right = emitScalarExpr(rightArg);
1881 Address resultPtr = emitPointerWithAlignment(resultArg);
1882
1883 // Extend each operand to the encompassing type, if necessary.
1884 if (left.getType() != encompassingCIRTy)
1885 left =
1886 builder.createCast(cir::CastKind::integral, left, encompassingCIRTy);
1887 if (right.getType() != encompassingCIRTy)
1888 right =
1889 builder.createCast(cir::CastKind::integral, right, encompassingCIRTy);
1890
1891 // Perform the operation on the extended values.
1892 cir::BinOpOverflowKind opKind;
1893 switch (builtinID) {
1894 default:
1895 llvm_unreachable("Unknown overflow builtin id.");
1896 case Builtin::BI__builtin_add_overflow:
1897 opKind = cir::BinOpOverflowKind::Add;
1898 break;
1899 case Builtin::BI__builtin_sub_overflow:
1900 opKind = cir::BinOpOverflowKind::Sub;
1901 break;
1902 case Builtin::BI__builtin_mul_overflow:
1903 opKind = cir::BinOpOverflowKind::Mul;
1904 break;
1905 }
1906
1907 mlir::Location loc = getLoc(e->getSourceRange());
1908 auto arithOp = cir::BinOpOverflowOp::create(builder, loc, resultCIRTy,
1909 opKind, left, right);
1910
1911 // Here is a slight difference from the original clang CodeGen:
1912 // - In the original clang CodeGen, the checked arithmetic result is
1913 // first computed as a value of the encompassing type, and then it is
1914 // truncated to the actual result type with a second overflow checking.
1915 // - In CIRGen, the checked arithmetic operation directly produce the
1916 // checked arithmetic result in its expected type.
1917 //
1918 // So we don't need a truncation and a second overflow checking here.
1919
1920 // Finally, store the result using the pointer.
1921 bool isVolatile =
1922 resultArg->getType()->getPointeeType().isVolatileQualified();
1923 builder.createStore(loc, arithOp.getResult(), resultPtr, isVolatile);
1924
1925 return RValue::get(arithOp.getOverflow());
1926 }
1927
1928 case Builtin::BI__builtin_uadd_overflow:
1929 case Builtin::BI__builtin_uaddl_overflow:
1930 case Builtin::BI__builtin_uaddll_overflow:
1931 case Builtin::BI__builtin_usub_overflow:
1932 case Builtin::BI__builtin_usubl_overflow:
1933 case Builtin::BI__builtin_usubll_overflow:
1934 case Builtin::BI__builtin_umul_overflow:
1935 case Builtin::BI__builtin_umull_overflow:
1936 case Builtin::BI__builtin_umulll_overflow:
1937 case Builtin::BI__builtin_sadd_overflow:
1938 case Builtin::BI__builtin_saddl_overflow:
1939 case Builtin::BI__builtin_saddll_overflow:
1940 case Builtin::BI__builtin_ssub_overflow:
1941 case Builtin::BI__builtin_ssubl_overflow:
1942 case Builtin::BI__builtin_ssubll_overflow:
1943 case Builtin::BI__builtin_smul_overflow:
1944 case Builtin::BI__builtin_smull_overflow:
1945 case Builtin::BI__builtin_smulll_overflow: {
1946 // Scalarize our inputs.
1947 mlir::Value x = emitScalarExpr(e->getArg(0));
1948 mlir::Value y = emitScalarExpr(e->getArg(1));
1949
1950 const clang::Expr *resultArg = e->getArg(2);
1951 Address resultPtr = emitPointerWithAlignment(resultArg);
1952
1953 // Decide which of the arithmetic operation we are lowering to:
1954 cir::BinOpOverflowKind arithKind;
1955 switch (builtinID) {
1956 default:
1957 llvm_unreachable("Unknown overflow builtin id.");
1958 case Builtin::BI__builtin_uadd_overflow:
1959 case Builtin::BI__builtin_uaddl_overflow:
1960 case Builtin::BI__builtin_uaddll_overflow:
1961 case Builtin::BI__builtin_sadd_overflow:
1962 case Builtin::BI__builtin_saddl_overflow:
1963 case Builtin::BI__builtin_saddll_overflow:
1964 arithKind = cir::BinOpOverflowKind::Add;
1965 break;
1966 case Builtin::BI__builtin_usub_overflow:
1967 case Builtin::BI__builtin_usubl_overflow:
1968 case Builtin::BI__builtin_usubll_overflow:
1969 case Builtin::BI__builtin_ssub_overflow:
1970 case Builtin::BI__builtin_ssubl_overflow:
1971 case Builtin::BI__builtin_ssubll_overflow:
1972 arithKind = cir::BinOpOverflowKind::Sub;
1973 break;
1974 case Builtin::BI__builtin_umul_overflow:
1975 case Builtin::BI__builtin_umull_overflow:
1976 case Builtin::BI__builtin_umulll_overflow:
1977 case Builtin::BI__builtin_smul_overflow:
1978 case Builtin::BI__builtin_smull_overflow:
1979 case Builtin::BI__builtin_smulll_overflow:
1980 arithKind = cir::BinOpOverflowKind::Mul;
1981 break;
1982 }
1983
1984 clang::QualType resultQTy =
1985 resultArg->getType()->castAs<clang::PointerType>()->getPointeeType();
1986 auto resultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(resultQTy));
1987
1988 mlir::Location loc = getLoc(e->getSourceRange());
1989 cir::BinOpOverflowOp arithOp = cir::BinOpOverflowOp::create(
1990 builder, loc, resultCIRTy, arithKind, x, y);
1991
1992 bool isVolatile =
1993 resultArg->getType()->getPointeeType().isVolatileQualified();
1994 builder.createStore(loc, emitToMemory(arithOp.getResult(), resultQTy),
1995 resultPtr, isVolatile);
1996
1997 return RValue::get(arithOp.getOverflow());
1998 }
1999
2000 case Builtin::BIaddressof:
2001 case Builtin::BI__addressof:
2002 case Builtin::BI__builtin_addressof:
2003 return RValue::get(emitLValue(e->getArg(0)).getPointer());
2004 case Builtin::BI__builtin_function_start:
2005 return errorBuiltinNYI(*this, e, builtinID);
2006 case Builtin::BI__builtin_operator_new:
2008 e->getCallee()->getType()->castAs<FunctionProtoType>(), e, OO_New);
2009 case Builtin::BI__builtin_operator_delete:
2011 e->getCallee()->getType()->castAs<FunctionProtoType>(), e, OO_Delete);
2012 return RValue::get(nullptr);
2013 case Builtin::BI__builtin_is_aligned:
2014 case Builtin::BI__builtin_align_up:
2015 case Builtin::BI__builtin_align_down:
2016 case Builtin::BI__noop:
2017 case Builtin::BI__builtin_call_with_static_chain:
2018 case Builtin::BI_InterlockedExchange8:
2019 case Builtin::BI_InterlockedExchange16:
2020 case Builtin::BI_InterlockedExchange:
2021 case Builtin::BI_InterlockedExchangePointer:
2022 case Builtin::BI_InterlockedCompareExchangePointer:
2023 case Builtin::BI_InterlockedCompareExchangePointer_nf:
2024 case Builtin::BI_InterlockedCompareExchange8:
2025 case Builtin::BI_InterlockedCompareExchange16:
2026 case Builtin::BI_InterlockedCompareExchange:
2027 case Builtin::BI_InterlockedCompareExchange64:
2028 case Builtin::BI_InterlockedIncrement16:
2029 case Builtin::BI_InterlockedIncrement:
2030 case Builtin::BI_InterlockedDecrement16:
2031 case Builtin::BI_InterlockedDecrement:
2032 case Builtin::BI_InterlockedAnd8:
2033 case Builtin::BI_InterlockedAnd16:
2034 case Builtin::BI_InterlockedAnd:
2035 case Builtin::BI_InterlockedExchangeAdd8:
2036 case Builtin::BI_InterlockedExchangeAdd16:
2037 case Builtin::BI_InterlockedExchangeAdd:
2038 case Builtin::BI_InterlockedExchangeSub8:
2039 case Builtin::BI_InterlockedExchangeSub16:
2040 case Builtin::BI_InterlockedExchangeSub:
2041 case Builtin::BI_InterlockedOr8:
2042 case Builtin::BI_InterlockedOr16:
2043 case Builtin::BI_InterlockedOr:
2044 case Builtin::BI_InterlockedXor8:
2045 case Builtin::BI_InterlockedXor16:
2046 case Builtin::BI_InterlockedXor:
2047 case Builtin::BI_bittest64:
2048 case Builtin::BI_bittest:
2049 case Builtin::BI_bittestandcomplement64:
2050 case Builtin::BI_bittestandcomplement:
2051 case Builtin::BI_bittestandreset64:
2052 case Builtin::BI_bittestandreset:
2053 case Builtin::BI_bittestandset64:
2054 case Builtin::BI_bittestandset:
2055 case Builtin::BI_interlockedbittestandreset:
2056 case Builtin::BI_interlockedbittestandreset64:
2057 case Builtin::BI_interlockedbittestandreset64_acq:
2058 case Builtin::BI_interlockedbittestandreset64_rel:
2059 case Builtin::BI_interlockedbittestandreset64_nf:
2060 case Builtin::BI_interlockedbittestandset64:
2061 case Builtin::BI_interlockedbittestandset64_acq:
2062 case Builtin::BI_interlockedbittestandset64_rel:
2063 case Builtin::BI_interlockedbittestandset64_nf:
2064 case Builtin::BI_interlockedbittestandset:
2065 case Builtin::BI_interlockedbittestandset_acq:
2066 case Builtin::BI_interlockedbittestandset_rel:
2067 case Builtin::BI_interlockedbittestandset_nf:
2068 case Builtin::BI_interlockedbittestandreset_acq:
2069 case Builtin::BI_interlockedbittestandreset_rel:
2070 case Builtin::BI_interlockedbittestandreset_nf:
2071 case Builtin::BI__iso_volatile_load8:
2072 case Builtin::BI__iso_volatile_load16:
2073 case Builtin::BI__iso_volatile_load32:
2074 case Builtin::BI__iso_volatile_load64:
2075 case Builtin::BI__iso_volatile_store8:
2076 case Builtin::BI__iso_volatile_store16:
2077 case Builtin::BI__iso_volatile_store32:
2078 case Builtin::BI__iso_volatile_store64:
2079 case Builtin::BI__builtin_ptrauth_sign_constant:
2080 case Builtin::BI__builtin_ptrauth_auth:
2081 case Builtin::BI__builtin_ptrauth_auth_and_resign:
2082 case Builtin::BI__builtin_ptrauth_blend_discriminator:
2083 case Builtin::BI__builtin_ptrauth_sign_generic_data:
2084 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
2085 case Builtin::BI__builtin_ptrauth_strip:
2086 case Builtin::BI__builtin_get_vtable_pointer:
2087 case Builtin::BI__exception_code:
2088 case Builtin::BI_exception_code:
2089 case Builtin::BI__exception_info:
2090 case Builtin::BI_exception_info:
2091 case Builtin::BI__abnormal_termination:
2092 case Builtin::BI_abnormal_termination:
2093 case Builtin::BI_setjmpex:
2094 case Builtin::BI_setjmp:
2095 return errorBuiltinNYI(*this, e, builtinID);
2096 case Builtin::BImove:
2097 case Builtin::BImove_if_noexcept:
2098 case Builtin::BIforward:
2099 case Builtin::BIforward_like:
2100 case Builtin::BIas_const:
2101 return RValue::get(emitLValue(e->getArg(0)).getPointer());
2102 case Builtin::BI__GetExceptionInfo:
2103 case Builtin::BI__fastfail:
2104 case Builtin::BIread_pipe:
2105 case Builtin::BIwrite_pipe:
2106 case Builtin::BIreserve_read_pipe:
2107 case Builtin::BIreserve_write_pipe:
2108 case Builtin::BIwork_group_reserve_read_pipe:
2109 case Builtin::BIwork_group_reserve_write_pipe:
2110 case Builtin::BIsub_group_reserve_read_pipe:
2111 case Builtin::BIsub_group_reserve_write_pipe:
2112 case Builtin::BIcommit_read_pipe:
2113 case Builtin::BIcommit_write_pipe:
2114 case Builtin::BIwork_group_commit_read_pipe:
2115 case Builtin::BIwork_group_commit_write_pipe:
2116 case Builtin::BIsub_group_commit_read_pipe:
2117 case Builtin::BIsub_group_commit_write_pipe:
2118 case Builtin::BIget_pipe_num_packets:
2119 case Builtin::BIget_pipe_max_packets:
2120 case Builtin::BIto_global:
2121 case Builtin::BIto_local:
2122 case Builtin::BIto_private:
2123 case Builtin::BIenqueue_kernel:
2124 case Builtin::BIget_kernel_work_group_size:
2125 case Builtin::BIget_kernel_preferred_work_group_size_multiple:
2126 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
2127 case Builtin::BIget_kernel_sub_group_count_for_ndrange:
2128 case Builtin::BI__builtin_store_half:
2129 case Builtin::BI__builtin_store_halff:
2130 case Builtin::BI__builtin_load_half:
2131 case Builtin::BI__builtin_load_halff:
2132 return errorBuiltinNYI(*this, e, builtinID);
2133 case Builtin::BI__builtin_printf:
2134 case Builtin::BIprintf:
2135 break;
2136 case Builtin::BI__builtin_canonicalize:
2137 case Builtin::BI__builtin_canonicalizef:
2138 case Builtin::BI__builtin_canonicalizef16:
2139 case Builtin::BI__builtin_canonicalizel:
2140 case Builtin::BI__builtin_thread_pointer:
2141 case Builtin::BI__builtin_os_log_format:
2142 case Builtin::BI__xray_customevent:
2143 case Builtin::BI__xray_typedevent:
2144 case Builtin::BI__builtin_ms_va_start:
2145 case Builtin::BI__builtin_ms_va_end:
2146 case Builtin::BI__builtin_ms_va_copy:
2147 case Builtin::BI__builtin_get_device_side_mangled_name:
2148 return errorBuiltinNYI(*this, e, builtinID);
2149 }
2150
2151 // If this is an alias for a lib function (e.g. __builtin_sin), emit
2152 // the call using the normal call path, but using the unmangled
2153 // version of the function name.
2154 if (getContext().BuiltinInfo.isLibFunction(builtinID))
2155 return emitLibraryCall(*this, fd, e,
2156 cgm.getBuiltinLibFunction(fd, builtinID));
2157
2158 // If this is a predefined lib function (e.g. malloc), emit the call
2159 // using exactly the normal call path.
2160 if (getContext().BuiltinInfo.isPredefinedLibFunction(builtinID))
2161 return emitLibraryCall(*this, fd, e,
2162 emitScalarExpr(e->getCallee()).getDefiningOp());
2163
2164 // See if we have a target specific intrinsic.
2165 std::string name = getContext().BuiltinInfo.getName(builtinID);
2166 Intrinsic::ID intrinsicID = Intrinsic::not_intrinsic;
2167 StringRef prefix =
2168 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
2169 if (!prefix.empty()) {
2170 intrinsicID = Intrinsic::getIntrinsicForClangBuiltin(prefix.data(), name);
2171 // NOTE we don't need to perform a compatibility flag check here since the
2172 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
2173 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
2174 if (intrinsicID == Intrinsic::not_intrinsic)
2175 intrinsicID = Intrinsic::getIntrinsicForMSBuiltin(prefix.data(), name);
2176 }
2177
2178 if (intrinsicID != Intrinsic::not_intrinsic) {
2179 unsigned iceArguments = 0;
2181 getContext().GetBuiltinType(builtinID, error, &iceArguments);
2182 assert(error == ASTContext::GE_None && "Should not codegen an error");
2183
2184 llvm::StringRef name = llvm::Intrinsic::getName(intrinsicID);
2185 // cir::LLVMIntrinsicCallOp expects intrinsic name to not have prefix
2186 // "llvm." For example, `llvm.nvvm.barrier0` should be passed as
2187 // `nvvm.barrier0`.
2188 assert(name.starts_with("llvm.") && "expected llvm. prefix");
2189 name = name.drop_front(/*strlen("llvm.")=*/5);
2190
2191 cir::FuncType intrinsicType =
2192 getIntrinsicType(*this, &getMLIRContext(), intrinsicID);
2193
2195 const FunctionDecl *fd = e->getDirectCallee();
2196 for (unsigned i = 0; i < e->getNumArgs(); i++) {
2197 mlir::Value argValue =
2198 emitScalarOrConstFoldImmArg(iceArguments, i, e->getArg(i));
2199 // If the intrinsic arg type is different from the builtin arg type
2200 // we need to do a bit cast.
2201 mlir::Type argType = argValue.getType();
2202 mlir::Type expectedTy = intrinsicType.getInput(i);
2203
2204 // Correct integer signedness based on AST parameter type
2205 mlir::Type correctedExpectedTy = expectedTy;
2206 if (fd && i < fd->getNumParams()) {
2207 correctedExpectedTy = correctIntegerSignedness(
2208 expectedTy, fd->getParamDecl(i)->getType(), &getMLIRContext());
2209 }
2210
2211 if (mlir::isa<cir::PointerType>(expectedTy)) {
2212 bool argIsPointer = mlir::isa<cir::PointerType>(argType);
2213 bool argIsVectorOfPointer = false;
2214 if (auto vecTy = dyn_cast<mlir::VectorType>(argType))
2215 argIsVectorOfPointer =
2216 mlir::isa<cir::PointerType>(vecTy.getElementType());
2217
2218 if (!argIsPointer && !argIsVectorOfPointer) {
2219 cgm.errorNYI(
2220 e->getSourceRange(),
2221 "intrinsic expects a pointer type (NYI for non-pointer)");
2222 return getUndefRValue(e->getType());
2223 }
2224
2225 // Pointer handling (address-space cast / bitcast fallback).
2226 if (argType != expectedTy)
2227 argValue = getCorrectedPtr(argValue, expectedTy, builder);
2228 } else {
2229 // Non-pointer expected type: if needed, bitcast to the corrected
2230 // expected type to match signedness/representation.
2231 if (argType != correctedExpectedTy)
2232 argValue = builder.createBitcast(argValue, correctedExpectedTy);
2233 }
2234
2235 args.push_back(argValue);
2236 }
2237
2238 // Correct return type signedness based on AST return type before creating
2239 // the call, avoiding unnecessary casts in the IR.
2240 mlir::Type correctedReturnType = intrinsicType.getReturnType();
2241 if (fd) {
2242 correctedReturnType =
2243 correctIntegerSignedness(intrinsicType.getReturnType(),
2244 fd->getReturnType(), &getMLIRContext());
2245 }
2246
2247 cir::LLVMIntrinsicCallOp intrinsicCall = cir::LLVMIntrinsicCallOp::create(
2248 builder, getLoc(e->getExprLoc()), builder.getStringAttr(name),
2249 correctedReturnType, args);
2250
2251 mlir::Value intrinsicRes = intrinsicCall.getResult();
2252
2253 if (isa<cir::VoidType>(correctedReturnType))
2254 return RValue::get(nullptr);
2255
2256 return RValue::get(intrinsicRes);
2257 }
2258
2259 // Some target-specific builtins can have aggregate return values, e.g.
2260 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
2261 // returnValue to be non-null, so that the target-specific emission code can
2262 // always just emit into it.
2264 if (evalKind == cir::TEK_Aggregate && returnValue.isNull()) {
2265 cgm.errorNYI(e->getSourceRange(), "aggregate return value from builtin");
2266 return getUndefRValue(e->getType());
2267 }
2268
2269 // Now see if we can emit a target-specific builtin.
2270 // FIXME: This is a temporary mechanism (double-optional semantics) that will
2271 // go away once everything is implemented:
2272 // 1. return `mlir::Value{}` for cases where we have issued the diagnostic.
2273 // 2. return `std::nullopt` in cases where we didn't issue a diagnostic
2274 // but also didn't handle the builtin.
2275 if (std::optional<mlir::Value> rst =
2276 emitTargetBuiltinExpr(builtinID, e, returnValue)) {
2277 mlir::Value v = rst.value();
2278 // CIR dialect operations may have no results, no values will be returned
2279 // even if it executes successfully.
2280 if (!v)
2281 return RValue::get(nullptr);
2282
2283 switch (evalKind) {
2284 case cir::TEK_Scalar:
2285 if (mlir::isa<cir::VoidType>(v.getType()))
2286 return RValue::get(nullptr);
2287 return RValue::get(v);
2288 case cir::TEK_Aggregate:
2289 cgm.errorNYI(e->getSourceRange(), "aggregate return value from builtin");
2290 return getUndefRValue(e->getType());
2291 case cir::TEK_Complex:
2292 llvm_unreachable("No current target builtin returns complex");
2293 }
2294 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
2295 }
2296
2297 cgm.errorNYI(e->getSourceRange(),
2298 std::string("unimplemented builtin call: ") +
2299 getContext().BuiltinInfo.getName(builtinID));
2300 return getUndefRValue(e->getType());
2301}
2302
2303static std::optional<mlir::Value>
2305 const CallExpr *e, ReturnValueSlot &returnValue,
2306 llvm::Triple::ArchType arch) {
2307 // When compiling in HipStdPar mode we have to be conservative in rejecting
2308 // target specific features in the FE, and defer the possible error to the
2309 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
2310 // referenced by an accelerator executable function, we emit an error.
2311 // Returning nullptr here leads to the builtin being handled in
2312 // EmitStdParUnsupportedBuiltin.
2313 if (cgf->getLangOpts().HIPStdPar && cgf->getLangOpts().CUDAIsDevice &&
2314 arch != cgf->getTarget().getTriple().getArch())
2315 return std::nullopt;
2316
2317 switch (arch) {
2318 case llvm::Triple::arm:
2319 case llvm::Triple::armeb:
2320 case llvm::Triple::thumb:
2321 case llvm::Triple::thumbeb:
2322 // These are actually NYI, but that will be reported by emitBuiltinExpr.
2323 // At this point, we don't even know that the builtin is target-specific.
2324 return std::nullopt;
2325 case llvm::Triple::aarch64:
2326 case llvm::Triple::aarch64_32:
2327 case llvm::Triple::aarch64_be:
2328 return cgf->emitAArch64BuiltinExpr(builtinID, e, returnValue, arch);
2329 case llvm::Triple::bpfeb:
2330 case llvm::Triple::bpfel:
2331 // These are actually NYI, but that will be reported by emitBuiltinExpr.
2332 // At this point, we don't even know that the builtin is target-specific.
2333 return std::nullopt;
2334
2335 case llvm::Triple::x86:
2336 case llvm::Triple::x86_64:
2337 return cgf->emitX86BuiltinExpr(builtinID, e);
2338
2339 case llvm::Triple::ppc:
2340 case llvm::Triple::ppcle:
2341 case llvm::Triple::ppc64:
2342 case llvm::Triple::ppc64le:
2343 case llvm::Triple::r600:
2344 // These are actually NYI, but that will be reported by emitBuiltinExpr.
2345 // At this point, we don't even know that the builtin is target-specific.
2346 return std::nullopt;
2347 case llvm::Triple::amdgcn:
2348 return cgf->emitAMDGPUBuiltinExpr(builtinID, e);
2349 case llvm::Triple::systemz:
2350 case llvm::Triple::nvptx:
2351 case llvm::Triple::nvptx64:
2352 case llvm::Triple::wasm32:
2353 case llvm::Triple::wasm64:
2354 case llvm::Triple::hexagon:
2355 case llvm::Triple::riscv32:
2356 case llvm::Triple::riscv64:
2357 // These are actually NYI, but that will be reported by emitBuiltinExpr.
2358 // At this point, we don't even know that the builtin is target-specific.
2359 return std::nullopt;
2360 default:
2361 return std::nullopt;
2362 }
2363}
2364
2365std::optional<mlir::Value>
2368 if (getContext().BuiltinInfo.isAuxBuiltinID(builtinID)) {
2369 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
2371 this, getContext().BuiltinInfo.getAuxBuiltinID(builtinID), e,
2372 returnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
2373 }
2374
2375 return emitTargetArchBuiltinExpr(this, builtinID, e, returnValue,
2376 getTarget().getTriple().getArch());
2377}
2378
2380 const unsigned iceArguments, const unsigned idx, const Expr *argExpr) {
2381 mlir::Value arg = {};
2382 if ((iceArguments & (1 << idx)) == 0) {
2383 arg = emitScalarExpr(argExpr);
2384 } else {
2385 // If this is required to be a constant, constant fold it so that we
2386 // know that the generated intrinsic gets a ConstantInt.
2387 const std::optional<llvm::APSInt> result =
2389 assert(result && "Expected argument to be a constant");
2390 arg = builder.getConstInt(getLoc(argExpr->getSourceRange()), *result);
2391 }
2392 return arg;
2393}
2394
2395/// Given a builtin id for a function like "__builtin_fabsf", return a Function*
2396/// for "fabsf".
2398 unsigned builtinID) {
2399 assert(astContext.BuiltinInfo.isLibFunction(builtinID));
2400
2401 // Get the name, skip over the __builtin_ prefix (if necessary). We may have
2402 // to build this up so provide a small stack buffer to handle the vast
2403 // majority of names.
2405
2407 name = astContext.BuiltinInfo.getName(builtinID).substr(10);
2408
2409 GlobalDecl d(fd);
2410 mlir::Type type = convertType(fd->getType());
2411 return getOrCreateCIRFunction(name, type, d, /*forVTable=*/false);
2412}
2413
2415 mlir::Value argValue = evaluateExprAsBool(e);
2416 if (!sanOpts.has(SanitizerKind::Builtin))
2417 return argValue;
2418
2420 cgm.errorNYI(e->getSourceRange(),
2421 "emitCheckedArgForAssume: sanitizers are NYI");
2422 return {};
2423}
2424
2425void CIRGenFunction::emitVAStart(mlir::Value vaList) {
2426 // LLVM codegen casts to *i8, no real gain on doing this for CIRGen this
2427 // early, defer to LLVM lowering.
2428 cir::VAStartOp::create(builder, vaList.getLoc(), vaList);
2429}
2430
2431void CIRGenFunction::emitVAEnd(mlir::Value vaList) {
2432 cir::VAEndOp::create(builder, vaList.getLoc(), vaList);
2433}
2434
2435// FIXME(cir): This completely abstracts away the ABI with a generic CIR Op. By
2436// default this lowers to llvm.va_arg which is incomplete and not ABI-compliant
2437// on most targets so cir.va_arg will need some ABI handling in LoweringPrepare
2439 assert(!cir::MissingFeatures::msabi());
2440 assert(!cir::MissingFeatures::vlas());
2441 mlir::Location loc = cgm.getLoc(ve->getExprLoc());
2442 mlir::Type type = convertType(ve->getType());
2443 mlir::Value vaList = emitVAListRef(ve->getSubExpr()).getPointer();
2444 return cir::VAArgOp::create(builder, loc, type, vaList);
2445}
2446
2447mlir::Value CIRGenFunction::emitBuiltinObjectSize(const Expr *e, unsigned type,
2448 cir::IntType resType,
2449 mlir::Value emittedE,
2450 bool isDynamic) {
2452
2453 // LLVM can't handle type=3 appropriately, and __builtin_object_size shouldn't
2454 // evaluate e for side-effects. In either case, just like original LLVM
2455 // lowering, we shouldn't lower to `cir.objsize` but to a constant instead.
2456 if (type == 3 || (!emittedE && e->HasSideEffects(getContext())))
2457 return builder.getConstInt(getLoc(e->getSourceRange()), resType,
2458 (type & 2) ? 0 : -1);
2459
2460 mlir::Value ptr = emittedE ? emittedE : emitScalarExpr(e);
2461 assert(mlir::isa<cir::PointerType>(ptr.getType()) &&
2462 "Non-pointer passed to __builtin_object_size?");
2463
2465
2466 // Extract the min/max mode from type. CIR only supports type 0
2467 // (max, whole object) and type 2 (min, whole object), not type 1 or 3
2468 // (closest subobject variants).
2469 const bool min = ((type & 2) != 0);
2470 // For GCC compatibility, __builtin_object_size treats NULL as unknown size.
2471 auto op =
2472 cir::ObjSizeOp::create(builder, getLoc(e->getSourceRange()), resType, ptr,
2473 min, /*nullUnknown=*/true, isDynamic);
2474 return op.getResult();
2475}
2476
2478 const Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE,
2479 bool isDynamic) {
2480 if (std::optional<uint64_t> objectSize =
2482 return builder.getConstInt(getLoc(e->getSourceRange()), resType,
2483 *objectSize);
2484 return emitBuiltinObjectSize(e, type, resType, emittedE, isDynamic);
2485}
static StringRef bytes(const std::vector< T, Allocator > &v)
Defines enum values for all the target-independent builtin functions.
static mlir::Value emitSignBit(mlir::Location loc, CIRGenFunction &cgf, mlir::Value val)
static mlir::Value emitBinaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static mlir::Type decodeFixedType(CIRGenFunction &cgf, ArrayRef< llvm::Intrinsic::IITDescriptor > &infos, mlir::MLIRContext *context)
static RValue emitUnaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitBinaryFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static mlir::Value makeBinaryAtomicValue(CIRGenFunction &cgf, cir::AtomicFetchKind kind, const CallExpr *expr, mlir::Type *originalArgType, mlir::Value *emittedArgValue=nullptr, cir::MemOrder ordering=cir::MemOrder::SequentiallyConsistent)
Utility to insert an atomic instruction based on Intrinsic::ID and the expression node.
static RValue emitBinaryAtomicPost(CIRGenFunction &cgf, cir::AtomicFetchKind atomicOpkind, const CallExpr *e, bool invert=false)
static std::optional< mlir::Value > emitTargetArchBuiltinExpr(CIRGenFunction *cgf, unsigned builtinID, const CallExpr *e, ReturnValueSlot &returnValue, llvm::Triple::ArchType arch)
static RValue emitUnaryFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static mlir::Value emitToInt(CIRGenFunction &cgf, mlir::Value v, QualType t, cir::IntType intType)
Emit the conversions required to turn the given value into an integer of the given size.
static mlir::Value getCorrectedPtr(mlir::Value argValue, mlir::Type expectedTy, CIRGenBuilderTy &builder)
static RValue emitLibraryCall(CIRGenFunction &cgf, const FunctionDecl *fd, const CallExpr *e, mlir::Operation *calleeValue)
static WidthAndSignedness getIntegerWidthAndSignedness(const clang::ASTContext &astContext, const clang::QualType type)
static RValue emitBuiltinBitOp(CIRGenFunction &cgf, const CallExpr *e, bool poisonZero=false)
static void emitAtomicFenceOp(CIRGenFunction &cgf, const CallExpr *expr, cir::SyncScopeKind syncScope)
static Address checkAtomicAlignment(CIRGenFunction &cgf, const CallExpr *e)
static bool shouldCIREmitFPMathIntrinsic(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static RValue tryEmitFPMathIntrinsic(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static cir::FuncType getIntrinsicType(CIRGenFunction &cgf, mlir::MLIRContext *context, llvm::Intrinsic::ID id)
static struct WidthAndSignedness EncompassingIntegerType(ArrayRef< struct WidthAndSignedness > types)
static RValue emitBuiltinAlloca(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static mlir::Type correctIntegerSignedness(mlir::Type iitType, QualType astType, mlir::MLIRContext *context)
Helper function to correct integer signedness for intrinsic arguments and return type.
static RValue emitUnaryMaybeConstrainedFPToIntBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue errorBuiltinNYI(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static mlir::Value emitFromInt(CIRGenFunction &cgf, mlir::Value v, QualType t, mlir::Type resultType)
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
Defines an enumeration for C++ overloaded operators.
static QualType getPointeeType(const MemRegion *R)
__DEVICE__ int min(int __a, int __b)
__device__ __2f16 b
__device__ __2f16 float c
cir::SignBitOp createSignBit(mlir::Location loc, mlir::Value val)
mlir::Value createIntToPtr(mlir::Value src, mlir::Type newTy)
mlir::Value createPtrToInt(mlir::Value src, mlir::Type newTy)
cir::PointerType getPointerTo(mlir::Type ty)
mlir::Value createIntCast(mlir::Value src, mlir::Type newTy)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
cir::PointerType getVoidPtrTy(clang::LangAS langAS=clang::LangAS::Default)
mlir::Value createAddrSpaceCast(mlir::Location loc, mlir::Value src, mlir::Type newTy)
mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, mlir::Type type, llvm::StringRef name, mlir::IntegerAttr alignment, mlir::Value dynAllocSize)
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
APSInt & getInt()
Definition APValue.h:508
bool isFloat() const
Definition APValue.h:486
bool isInt() const
Definition APValue.h:485
APFloat & getFloat()
Definition APValue.h:522
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
unsigned getIntWidth(QualType T) const
CanQualType VoidPtrTy
Builtin::Context & BuiltinInfo
Definition ASTContext.h:799
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error, unsigned *IntegerConstantArgs=nullptr) const
Return the type for the specified builtin.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:916
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
@ GE_None
No error.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
bool shouldGenerateFPMathIntrinsic(unsigned BuiltinID, llvm::Triple Trip, std::optional< bool > ErrnoOverwritten, bool MathErrnoEnabled, bool HasOptNoneAttr, bool IsOptimizationEnabled) const
Determine whether we can generate LLVM intrinsics for the given builtin ID, based on whether it has s...
Definition Builtins.cpp:225
bool isLibFunction(unsigned ID) const
Return true if this is a builtin for a libc/libm function, with a "__builtin_" prefix (e....
Definition Builtins.h:309
std::string getName(unsigned ID) const
Return the identifier name for the specified builtin, e.g.
Definition Builtins.cpp:80
mlir::Value getPointer() const
Definition Address.h:96
mlir::Type getElementType() const
Definition Address.h:123
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
clang::CharUnits getAlignment() const
Definition Address.h:136
Address withAlignment(clang::CharUnits newAlignment) const
Return address with different alignment, but same pointer and element type.
Definition Address.h:87
mlir::Value emitRawPointer() const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:110
cir::IntType getSIntNTy(int n)
cir::PointerType getUInt8PtrTy()
cir::IntType getUIntNTy(int n)
static CIRGenCallee forDirect(mlir::Operation *funcPtr, const CIRGenCalleeInfo &abstractInfo=CIRGenCalleeInfo())
Definition CIRGenCall.h:92
mlir::Type convertType(clang::QualType t)
mlir::Value emitCheckedArgForAssume(const Expr *e)
Emits an argument for a call to a __builtin_assume.
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
const clang::LangOptions & getLangOpts() const
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
const TargetInfo & getTarget() const
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
const clang::Decl * curFuncDecl
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
Returns a Value corresponding to the size of the given expression by emitting a cir....
std::optional< mlir::Value > emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot &returnValue)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
Address returnValue
The temporary alloca to hold the return value.
std::optional< mlir::Value > emitX86BuiltinExpr(unsigned builtinID, const CallExpr *expr)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
std::optional< mlir::Value > emitAMDGPUBuiltinExpr(unsigned builtinID, const CallExpr *expr)
Emit a call to an AMDGPU builtin function.
std::optional< mlir::Value > emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr, ReturnValueSlot returnValue, llvm::Triple::ArchType arch)
void emitAtomicExprWithMemOrder(const Expr *memOrder, bool isStore, bool isLoad, bool isFence, llvm::function_ref< void(cir::MemOrder)> emitAtomicOp)
void emitVAEnd(mlir::Value vaList)
Emits the end of a CIR variable-argument operation (cir.va_start)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
void emitVAStart(mlir::Value vaList)
Emits the start of a CIR variable-argument operation (cir.va_start)
mlir::MLIRContext & getMLIRContext()
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
RValue emitNewOrDeleteBuiltinCall(const FunctionProtoType *type, const CallExpr *callExpr, OverloadedOperatorKind op)
clang::ASTContext & getContext() const
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx, const Expr *argExpr)
mlir::Value emitVAArg(VAArgExpr *ve)
Generate code to get an argument from the passed in pointer and update it accordingly.
RValue emitRotate(const CallExpr *e, bool isRotateLeft)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
clang::ASTContext & getASTContext() const
mlir::Type convertType(clang::QualType type)
clang::DiagnosticsEngine & getDiags() const
cir::FuncOp getBuiltinLibFunction(const FunctionDecl *fd, unsigned builtinID)
Given a builtin id for a function like "__builtin_fabsf", return a Function* for "fabsf".
const llvm::Triple & getTriple() const
const cir::CIRDataLayout getDataLayout() const
const clang::CodeGenOptions & getCodeGenOpts() const
cir::FuncOp getOrCreateCIRFunction(llvm::StringRef mangledName, mlir::Type funcType, clang::GlobalDecl gd, bool forVTable, bool dontDefer=false, bool isThunk=false, ForDefinition_t isForDefinition=NotForDefinition, mlir::NamedAttrList extraAttrs={})
mlir::Value getPointer() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
static RValue getComplex(mlir::Value v)
Definition CIRGenValue.h:91
bool isIgnored() const
Definition CIRGenValue.h:52
static RValue getIgnored()
Definition CIRGenValue.h:78
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:260
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3150
bool hasStoredFPFeatures() const
Definition Expr.h:3105
SourceLocation getBeginLoc() const
Definition Expr.h:3280
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3129
Expr * getCallee()
Definition Expr.h:3093
FPOptionsOverride getFPFeatures() const
Definition Expr.h:3245
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3137
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
FunctionDecl * getAsFunction() LLVM_READONLY
Returns the function itself, or the templated function if this is a function template.
Definition DeclBase.cpp:273
bool hasAttr() const
Definition DeclBase.h:577
Concrete class used by the front-end to report problems and issues.
Definition Diagnostic.h:232
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
bool EvaluateAsFloat(llvm::APFloat &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsFloat - Return true if this is a constant which we can fold and convert to a floating point...
std::optional< llvm::APSInt > getIntegerConstantExpr(const ASTContext &Ctx) const
isIntegerConstantExpr - Return the value if this expression is a valid integer constant expression.
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3670
std::optional< uint64_t > tryEvaluateObjectSize(const ASTContext &Ctx, unsigned Type) const
If the current Expr is a pointer, this will try to statically determine the number of bytes available...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
Represents difference between two FPOptions values.
Represents a function declaration or definition.
Definition Decl.h:2000
const ParmVarDecl * getParamDecl(unsigned i) const
Definition Decl.h:2797
QualType getReturnType() const
Definition Decl.h:2845
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5315
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3336
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8472
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8514
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:343
Exposes information about the current target.
Definition TargetInfo.h:226
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
unsigned getSuitableAlign() const
Return the alignment that is the largest alignment ever used for any scalar/SIMD data type on the tar...
Definition TargetInfo.h:747
bool isBlockPointerType() const
Definition TypeBase.h:8645
bool isPointerType() const
Definition TypeBase.h:8625
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9285
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
Definition TypeBase.h:9113
bool isObjCObjectPointerType() const
Definition TypeBase.h:8804
bool isFloatingType() const
Definition Type.cpp:2341
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2284
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4960
const Expr * getSubExpr() const
Definition Expr.h:4976
QualType getType() const
Definition Decl.h:723
bool isMatchingAddressSpace(mlir::ptr::MemorySpaceAttrInterface cirAS, clang::LangAS as)
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
U cast(CodeGen::Address addr)
Definition Address.h:327
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
static bool builtinCheckKind()
static bool addressSpace()
static bool asmLabelAttr()
static bool msvcBuiltins()
static bool builtinCallF128()
static bool isPPC_FP128Ty()
static bool emitCheckedInBoundsGEP()
static bool fpConstraints()
static bool countedBySize()
static bool opCallImplicitObjectSizeArgs()
static bool fastMathFlags()
static bool builtinCall()
static bool generateDebugInfo()
cir::PointerType allocaInt8PtrTy
void* in alloca address space
mlir::ptr::MemorySpaceAttrInterface getCIRAllocaAddressSpace() const
cir::PointerType voidPtrTy
void* in address space 0
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650
bool hasSideEffects() const
Return true if the evaluated expression has side effects.
Definition Expr.h:642
#define conj(__x)
Definition tgmath.h:1303