clang 23.0.0git
CIRGenBuiltin.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as CIR or a function call to be
10// later resolved.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CIRGenCall.h"
15#include "CIRGenFunction.h"
16#include "CIRGenModule.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/BuiltinAttributes.h"
19#include "mlir/IR/Value.h"
20#include "mlir/Support/LLVM.h"
21#include "clang/AST/DeclBase.h"
22#include "clang/AST/Expr.h"
29#include "llvm/Support/ErrorHandling.h"
30
31using namespace clang;
32using namespace clang::CIRGen;
33using namespace llvm;
34
36 const CallExpr *e, mlir::Operation *calleeValue) {
37 CIRGenCallee callee = CIRGenCallee::forDirect(calleeValue, GlobalDecl(fd));
38 return cgf.emitCall(e->getCallee()->getType(), callee, e, ReturnValueSlot());
39}
40
41template <typename Op>
43 bool poisonZero = false) {
45
46 mlir::Value arg = cgf.emitScalarExpr(e->getArg(0));
47 CIRGenBuilderTy &builder = cgf.getBuilder();
48
49 Op op;
50 if constexpr (std::is_same_v<Op, cir::BitClzOp> ||
51 std::is_same_v<Op, cir::BitCtzOp>)
52 op = Op::create(builder, cgf.getLoc(e->getSourceRange()), arg, poisonZero);
53 else
54 op = Op::create(builder, cgf.getLoc(e->getSourceRange()), arg);
55
56 mlir::Value result = op.getResult();
57 mlir::Type exprTy = cgf.convertType(e->getType());
58 if (exprTy != result.getType())
59 result = builder.createIntCast(result, exprTy);
60
61 return RValue::get(result);
62}
63
64/// Emit the conversions required to turn the given value into an
65/// integer of the given size.
66static mlir::Value emitToInt(CIRGenFunction &cgf, mlir::Value v, QualType t,
67 cir::IntType intType) {
68 v = cgf.emitToMemory(v, t);
69
70 if (mlir::isa<cir::PointerType>(v.getType()))
71 return cgf.getBuilder().createPtrToInt(v, intType);
72
73 assert(v.getType() == intType);
74 return v;
75}
76
77static mlir::Value emitFromInt(CIRGenFunction &cgf, mlir::Value v, QualType t,
78 mlir::Type resultType) {
79 v = cgf.emitFromMemory(v, t);
80
81 if (mlir::isa<cir::PointerType>(resultType))
82 return cgf.getBuilder().createIntToPtr(v, resultType);
83
84 assert(v.getType() == resultType);
85 return v;
86}
87
89 ASTContext &astContext = cgf.getContext();
91 unsigned bytes =
92 mlir::isa<cir::PointerType>(ptr.getElementType())
93 ? astContext.getTypeSizeInChars(astContext.VoidPtrTy).getQuantity()
96
97 unsigned align = ptr.getAlignment().getQuantity();
98 if (align % bytes != 0) {
99 DiagnosticsEngine &diags = cgf.cgm.getDiags();
100 diags.Report(e->getBeginLoc(), diag::warn_sync_op_misaligned);
101 // Force address to be at least naturally-aligned.
103 }
104 return ptr;
105}
106
107/// Utility to insert an atomic instruction based on Intrinsic::ID
108/// and the expression node.
109static mlir::Value makeBinaryAtomicValue(
110 CIRGenFunction &cgf, cir::AtomicFetchKind kind, const CallExpr *expr,
111 mlir::Type *originalArgType, mlir::Value *emittedArgValue = nullptr,
112 cir::MemOrder ordering = cir::MemOrder::SequentiallyConsistent) {
113
114 QualType type = expr->getType();
115 QualType ptrType = expr->getArg(0)->getType();
116
117 assert(ptrType->isPointerType());
118 assert(
121 expr->getArg(1)->getType()));
122
123 Address destAddr = checkAtomicAlignment(cgf, expr);
124 CIRGenBuilderTy &builder = cgf.getBuilder();
125
126 mlir::Value val = cgf.emitScalarExpr(expr->getArg(1));
127 mlir::Type valueType = val.getType();
128 mlir::Value destValue = destAddr.emitRawPointer();
129
130 if (ptrType->getPointeeType()->isPointerType()) {
131 // Pointer to pointer
132 // `cir.atomic.fetch` expects a pointer to an integer type, so we cast
133 // ptr<ptr<T>> to ptr<intPtrSize>
134 cir::IntType ptrSizeInt =
135 builder.getSIntNTy(cgf.getContext().getTypeSize(ptrType));
136 destValue =
137 builder.createBitcast(destValue, builder.getPointerTo(ptrSizeInt));
138 val = emitToInt(cgf, val, type, ptrSizeInt);
139 } else {
140 // Pointer to integer type
141 cir::IntType intType =
143 ? builder.getUIntNTy(cgf.getContext().getTypeSize(type))
144 : builder.getSIntNTy(cgf.getContext().getTypeSize(type));
145 val = emitToInt(cgf, val, type, intType);
146 }
147
148 // This output argument is needed for post atomic fetch operations
149 // that calculate the result of the operation as return value of
150 // <binop>_and_fetch builtins. The `AtomicFetch` operation only updates the
151 // memory location and returns the old value.
152 if (emittedArgValue) {
153 *emittedArgValue = val;
154 *originalArgType = valueType;
155 }
156
157 auto rmwi = cir::AtomicFetchOp::create(
158 builder, cgf.getLoc(expr->getSourceRange()), destValue, val, kind,
159 ordering, false, /* is volatile */
160 true); /* fetch first */
161 return rmwi->getResult(0);
162}
163
165 cir::AtomicFetchKind atomicOpkind,
166 const CallExpr *e, cir::BinOpKind binopKind,
167 bool invert = false) {
168 mlir::Value emittedArgValue;
169 mlir::Type originalArgType;
170 clang::QualType typ = e->getType();
171 mlir::Value result = makeBinaryAtomicValue(
172 cgf, atomicOpkind, e, &originalArgType, &emittedArgValue);
174 result = cir::BinOp::create(builder, result.getLoc(), binopKind, result,
175 emittedArgValue);
176
177 if (invert)
178 result = cir::UnaryOp::create(builder, result.getLoc(),
179 cir::UnaryOpKind::Not, result);
180
181 result = emitFromInt(cgf, result, typ, originalArgType);
182 return RValue::get(result);
183}
184
186 cir::SyncScopeKind syncScope) {
187 CIRGenBuilderTy &builder = cgf.getBuilder();
188 mlir::Location loc = cgf.getLoc(expr->getSourceRange());
189
190 auto emitAtomicOpCallBackFn = [&](cir::MemOrder memOrder) {
191 cir::AtomicFenceOp::create(
192 builder, loc, memOrder,
193 cir::SyncScopeKindAttr::get(&cgf.getMLIRContext(), syncScope));
194 };
195
196 cgf.emitAtomicExprWithMemOrder(expr->getArg(0), /*isStore*/ false,
197 /*isLoad*/ false, /*isFence*/ true,
198 emitAtomicOpCallBackFn);
199}
200
201namespace {
202struct WidthAndSignedness {
203 unsigned width;
204 bool isSigned;
205};
206} // namespace
207
208static WidthAndSignedness
210 const clang::QualType type) {
211 assert(type->isIntegerType() && "Given type is not an integer.");
212 unsigned width = type->isBooleanType() ? 1
213 : type->isBitIntType() ? astContext.getIntWidth(type)
214 : astContext.getTypeInfo(type).Width;
215 bool isSigned = type->isSignedIntegerType();
216 return {width, isSigned};
217}
218
219// Given one or more integer types, this function produces an integer type that
220// encompasses them: any value in one of the given types could be expressed in
221// the encompassing type.
222static struct WidthAndSignedness
223EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> types) {
224 assert(types.size() > 0 && "Empty list of types.");
225
226 // If any of the given types is signed, we must return a signed type.
227 bool isSigned = llvm::any_of(types, [](const auto &t) { return t.isSigned; });
228
229 // The encompassing type must have a width greater than or equal to the width
230 // of the specified types. Additionally, if the encompassing type is signed,
231 // its width must be strictly greater than the width of any unsigned types
232 // given.
233 unsigned width = 0;
234 for (const auto &type : types)
235 width = std::max(width, type.width + (isSigned && !type.isSigned));
236
237 return {width, isSigned};
238}
239
240RValue CIRGenFunction::emitRotate(const CallExpr *e, bool isRotateLeft) {
241 mlir::Value input = emitScalarExpr(e->getArg(0));
242 mlir::Value amount = emitScalarExpr(e->getArg(1));
243
244 // TODO(cir): MSVC flavor bit rotate builtins use different types for input
245 // and amount, but cir.rotate requires them to have the same type. Cast amount
246 // to the type of input when necessary.
248
249 auto r = cir::RotateOp::create(builder, getLoc(e->getSourceRange()), input,
250 amount, isRotateLeft);
251 return RValue::get(r);
252}
253
254template <class Operation>
256 const CallExpr &e) {
257 mlir::Value arg = cgf.emitScalarExpr(e.getArg(0));
258
261
262 auto call =
263 Operation::create(cgf.getBuilder(), arg.getLoc(), arg.getType(), arg);
264 return RValue::get(call->getResult(0));
265}
266
267template <class Operation>
269 mlir::Value arg = cgf.emitScalarExpr(e.getArg(0));
270 auto call =
271 Operation::create(cgf.getBuilder(), arg.getLoc(), arg.getType(), arg);
272 return RValue::get(call->getResult(0));
273}
274
276 unsigned builtinID) {
277
278 if (cgf.getContext().BuiltinInfo.isLibFunction(builtinID)) {
279 cgf.cgm.errorNYI(
280 e->getSourceRange(),
281 std::string("unimplemented X86 library function builtin call: ") +
282 cgf.getContext().BuiltinInfo.getName(builtinID));
283 } else {
284 cgf.cgm.errorNYI(e->getSourceRange(),
285 std::string("unimplemented X86 builtin call: ") +
286 cgf.getContext().BuiltinInfo.getName(builtinID));
287 }
288
289 return cgf.getUndefRValue(e->getType());
290}
291
293 unsigned builtinID) {
294 assert(builtinID == Builtin::BI__builtin_alloca ||
295 builtinID == Builtin::BI__builtin_alloca_uninitialized ||
296 builtinID == Builtin::BIalloca || builtinID == Builtin::BI_alloca);
297
298 // Get alloca size input
299 mlir::Value size = cgf.emitScalarExpr(e->getArg(0));
300
301 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
302 const TargetInfo &ti = cgf.getContext().getTargetInfo();
303 const CharUnits suitableAlignmentInBytes =
305
306 // Emit the alloca op with type `u8 *` to match the semantics of
307 // `llvm.alloca`. We later bitcast the type to `void *` to match the
308 // semantics of C/C++
309 // FIXME(cir): It may make sense to allow AllocaOp of type `u8` to return a
310 // pointer of type `void *`. This will require a change to the allocaOp
311 // verifier.
312 CIRGenBuilderTy &builder = cgf.getBuilder();
313 mlir::Value allocaAddr = builder.createAlloca(
314 cgf.getLoc(e->getSourceRange()), builder.getUInt8PtrTy(),
315 builder.getUInt8Ty(), "bi_alloca", suitableAlignmentInBytes, size);
316
317 // Initialize the allocated buffer if required.
318 if (builtinID != Builtin::BI__builtin_alloca_uninitialized) {
319 // Initialize the alloca with the given size and alignment according to
320 // the lang opts. Only the trivial non-initialization is supported for
321 // now.
322
323 switch (cgf.getLangOpts().getTrivialAutoVarInit()) {
325 // Nothing to initialize.
326 break;
329 cgf.cgm.errorNYI("trivial auto var init");
330 break;
331 }
332 }
333
334 // An alloca will always return a pointer to the alloca (stack) address
335 // space. This address space need not be the same as the AST / Language
336 // default (e.g. in C / C++ auto vars are in the generic address space). At
337 // the AST level this is handled within CreateTempAlloca et al., but for the
338 // builtin / dynamic alloca we have to handle it here.
339
343 cgf.cgm.errorNYI(e->getSourceRange(),
344 "Non-default address space for alloca");
345 }
346
347 // Bitcast the alloca to the expected type.
348 return RValue::get(builder.createBitcast(
349 allocaAddr, builder.getVoidPtrTy(cgf.getCIRAllocaAddressSpace())));
350}
351
353 unsigned builtinID) {
354 std::optional<bool> errnoOverriden;
355 // ErrnoOverriden is true if math-errno is overriden via the
356 // '#pragma float_control(precise, on)'. This pragma disables fast-math,
357 // which implies math-errno.
358 if (e->hasStoredFPFeatures()) {
360 if (op.hasMathErrnoOverride())
361 errnoOverriden = op.getMathErrnoOverride();
362 }
363 // True if 'attribute__((optnone))' is used. This attribute overrides
364 // fast-math which implies math-errno.
365 bool optNone =
366 cgf.curFuncDecl && cgf.curFuncDecl->hasAttr<OptimizeNoneAttr>();
367 bool isOptimizationEnabled = cgf.cgm.getCodeGenOpts().OptimizationLevel != 0;
368 bool generateFPMathIntrinsics =
370 builtinID, cgf.cgm.getTriple(), errnoOverriden,
371 cgf.getLangOpts().MathErrno, optNone, isOptimizationEnabled);
372 return generateFPMathIntrinsics;
373}
374
376 unsigned builtinID) {
378 switch (builtinID) {
379 case Builtin::BIacos:
380 case Builtin::BIacosf:
381 case Builtin::BIacosl:
382 case Builtin::BI__builtin_acos:
383 case Builtin::BI__builtin_acosf:
384 case Builtin::BI__builtin_acosf16:
385 case Builtin::BI__builtin_acosl:
386 case Builtin::BI__builtin_acosf128:
387 case Builtin::BI__builtin_elementwise_acos:
388 case Builtin::BIasin:
389 case Builtin::BIasinf:
390 case Builtin::BIasinl:
391 case Builtin::BI__builtin_asin:
392 case Builtin::BI__builtin_asinf:
393 case Builtin::BI__builtin_asinf16:
394 case Builtin::BI__builtin_asinl:
395 case Builtin::BI__builtin_asinf128:
396 case Builtin::BI__builtin_elementwise_asin:
397 case Builtin::BIatan:
398 case Builtin::BIatanf:
399 case Builtin::BIatanl:
400 case Builtin::BI__builtin_atan:
401 case Builtin::BI__builtin_atanf:
402 case Builtin::BI__builtin_atanf16:
403 case Builtin::BI__builtin_atanl:
404 case Builtin::BI__builtin_atanf128:
405 case Builtin::BI__builtin_elementwise_atan:
406 case Builtin::BIatan2:
407 case Builtin::BIatan2f:
408 case Builtin::BIatan2l:
409 case Builtin::BI__builtin_atan2:
410 case Builtin::BI__builtin_atan2f:
411 case Builtin::BI__builtin_atan2f16:
412 case Builtin::BI__builtin_atan2l:
413 case Builtin::BI__builtin_atan2f128:
414 case Builtin::BI__builtin_elementwise_atan2:
415 return RValue::getIgnored();
416 case Builtin::BIceil:
417 case Builtin::BIceilf:
418 case Builtin::BIceill:
419 case Builtin::BI__builtin_ceil:
420 case Builtin::BI__builtin_ceilf:
421 case Builtin::BI__builtin_ceilf16:
422 case Builtin::BI__builtin_ceill:
423 case Builtin::BI__builtin_ceilf128:
425 case Builtin::BI__builtin_elementwise_ceil:
426 case Builtin::BIcopysign:
427 case Builtin::BIcopysignf:
428 case Builtin::BIcopysignl:
429 case Builtin::BI__builtin_copysign:
430 case Builtin::BI__builtin_copysignf:
431 case Builtin::BI__builtin_copysignf16:
432 case Builtin::BI__builtin_copysignl:
433 case Builtin::BI__builtin_copysignf128:
434 return RValue::getIgnored();
435 case Builtin::BIcos:
436 case Builtin::BIcosf:
437 case Builtin::BIcosl:
438 case Builtin::BI__builtin_cos:
439 case Builtin::BI__builtin_cosf:
440 case Builtin::BI__builtin_cosf16:
441 case Builtin::BI__builtin_cosl:
442 case Builtin::BI__builtin_cosf128:
444 case Builtin::BI__builtin_elementwise_cos:
445 case Builtin::BIcosh:
446 case Builtin::BIcoshf:
447 case Builtin::BIcoshl:
448 case Builtin::BI__builtin_cosh:
449 case Builtin::BI__builtin_coshf:
450 case Builtin::BI__builtin_coshf16:
451 case Builtin::BI__builtin_coshl:
452 case Builtin::BI__builtin_coshf128:
453 case Builtin::BI__builtin_elementwise_cosh:
454 return RValue::getIgnored();
455 case Builtin::BIexp:
456 case Builtin::BIexpf:
457 case Builtin::BIexpl:
458 case Builtin::BI__builtin_exp:
459 case Builtin::BI__builtin_expf:
460 case Builtin::BI__builtin_expf16:
461 case Builtin::BI__builtin_expl:
462 case Builtin::BI__builtin_expf128:
464 case Builtin::BI__builtin_elementwise_exp:
465 return RValue::getIgnored();
466 case Builtin::BIexp2:
467 case Builtin::BIexp2f:
468 case Builtin::BIexp2l:
469 case Builtin::BI__builtin_exp2:
470 case Builtin::BI__builtin_exp2f:
471 case Builtin::BI__builtin_exp2f16:
472 case Builtin::BI__builtin_exp2l:
473 case Builtin::BI__builtin_exp2f128:
475 case Builtin::BI__builtin_elementwise_exp2:
476 case Builtin::BI__builtin_exp10:
477 case Builtin::BI__builtin_exp10f:
478 case Builtin::BI__builtin_exp10f16:
479 case Builtin::BI__builtin_exp10l:
480 case Builtin::BI__builtin_exp10f128:
481 case Builtin::BI__builtin_elementwise_exp10:
482 return RValue::getIgnored();
483 case Builtin::BIfabs:
484 case Builtin::BIfabsf:
485 case Builtin::BIfabsl:
486 case Builtin::BI__builtin_fabs:
487 case Builtin::BI__builtin_fabsf:
488 case Builtin::BI__builtin_fabsf16:
489 case Builtin::BI__builtin_fabsl:
490 case Builtin::BI__builtin_fabsf128:
492 case Builtin::BIfloor:
493 case Builtin::BIfloorf:
494 case Builtin::BIfloorl:
495 case Builtin::BI__builtin_floor:
496 case Builtin::BI__builtin_floorf:
497 case Builtin::BI__builtin_floorf16:
498 case Builtin::BI__builtin_floorl:
499 case Builtin::BI__builtin_floorf128:
501 case Builtin::BI__builtin_elementwise_floor:
502 case Builtin::BIfma:
503 case Builtin::BIfmaf:
504 case Builtin::BIfmal:
505 case Builtin::BI__builtin_fma:
506 case Builtin::BI__builtin_fmaf:
507 case Builtin::BI__builtin_fmaf16:
508 case Builtin::BI__builtin_fmal:
509 case Builtin::BI__builtin_fmaf128:
510 case Builtin::BI__builtin_elementwise_fma:
511 case Builtin::BIfmax:
512 case Builtin::BIfmaxf:
513 case Builtin::BIfmaxl:
514 case Builtin::BI__builtin_fmax:
515 case Builtin::BI__builtin_fmaxf:
516 case Builtin::BI__builtin_fmaxf16:
517 case Builtin::BI__builtin_fmaxl:
518 case Builtin::BI__builtin_fmaxf128:
519 case Builtin::BIfmin:
520 case Builtin::BIfminf:
521 case Builtin::BIfminl:
522 case Builtin::BI__builtin_fmin:
523 case Builtin::BI__builtin_fminf:
524 case Builtin::BI__builtin_fminf16:
525 case Builtin::BI__builtin_fminl:
526 case Builtin::BI__builtin_fminf128:
527 case Builtin::BIfmaximum_num:
528 case Builtin::BIfmaximum_numf:
529 case Builtin::BIfmaximum_numl:
530 case Builtin::BI__builtin_fmaximum_num:
531 case Builtin::BI__builtin_fmaximum_numf:
532 case Builtin::BI__builtin_fmaximum_numf16:
533 case Builtin::BI__builtin_fmaximum_numl:
534 case Builtin::BI__builtin_fmaximum_numf128:
535 case Builtin::BIfminimum_num:
536 case Builtin::BIfminimum_numf:
537 case Builtin::BIfminimum_numl:
538 case Builtin::BI__builtin_fminimum_num:
539 case Builtin::BI__builtin_fminimum_numf:
540 case Builtin::BI__builtin_fminimum_numf16:
541 case Builtin::BI__builtin_fminimum_numl:
542 case Builtin::BI__builtin_fminimum_numf128:
543 case Builtin::BIfmod:
544 case Builtin::BIfmodf:
545 case Builtin::BIfmodl:
546 case Builtin::BI__builtin_fmod:
547 case Builtin::BI__builtin_fmodf:
548 case Builtin::BI__builtin_fmodf16:
549 case Builtin::BI__builtin_fmodl:
550 case Builtin::BI__builtin_fmodf128:
551 case Builtin::BI__builtin_elementwise_fmod:
552 case Builtin::BIlog:
553 case Builtin::BIlogf:
554 case Builtin::BIlogl:
555 case Builtin::BI__builtin_log:
556 case Builtin::BI__builtin_logf:
557 case Builtin::BI__builtin_logf16:
558 case Builtin::BI__builtin_logl:
559 case Builtin::BI__builtin_logf128:
560 case Builtin::BI__builtin_elementwise_log:
561 case Builtin::BIlog10:
562 case Builtin::BIlog10f:
563 case Builtin::BIlog10l:
564 case Builtin::BI__builtin_log10:
565 case Builtin::BI__builtin_log10f:
566 case Builtin::BI__builtin_log10f16:
567 case Builtin::BI__builtin_log10l:
568 case Builtin::BI__builtin_log10f128:
569 case Builtin::BI__builtin_elementwise_log10:
570 case Builtin::BIlog2:
571 case Builtin::BIlog2f:
572 case Builtin::BIlog2l:
573 case Builtin::BI__builtin_log2:
574 case Builtin::BI__builtin_log2f:
575 case Builtin::BI__builtin_log2f16:
576 case Builtin::BI__builtin_log2l:
577 case Builtin::BI__builtin_log2f128:
578 case Builtin::BI__builtin_elementwise_log2:
579 case Builtin::BInearbyint:
580 case Builtin::BInearbyintf:
581 case Builtin::BInearbyintl:
582 case Builtin::BI__builtin_nearbyint:
583 case Builtin::BI__builtin_nearbyintf:
584 case Builtin::BI__builtin_nearbyintl:
585 case Builtin::BI__builtin_nearbyintf128:
586 case Builtin::BI__builtin_elementwise_nearbyint:
587 case Builtin::BIpow:
588 case Builtin::BIpowf:
589 case Builtin::BIpowl:
590 case Builtin::BI__builtin_pow:
591 case Builtin::BI__builtin_powf:
592 case Builtin::BI__builtin_powf16:
593 case Builtin::BI__builtin_powl:
594 case Builtin::BI__builtin_powf128:
595 case Builtin::BI__builtin_elementwise_pow:
596 case Builtin::BIrint:
597 case Builtin::BIrintf:
598 case Builtin::BIrintl:
599 case Builtin::BI__builtin_rint:
600 case Builtin::BI__builtin_rintf:
601 case Builtin::BI__builtin_rintf16:
602 case Builtin::BI__builtin_rintl:
603 case Builtin::BI__builtin_rintf128:
604 case Builtin::BI__builtin_elementwise_rint:
605 case Builtin::BIround:
606 case Builtin::BIroundf:
607 case Builtin::BIroundl:
608 case Builtin::BI__builtin_round:
609 case Builtin::BI__builtin_roundf:
610 case Builtin::BI__builtin_roundf16:
611 case Builtin::BI__builtin_roundl:
612 case Builtin::BI__builtin_roundf128:
613 case Builtin::BI__builtin_elementwise_round:
614 case Builtin::BIroundeven:
615 case Builtin::BIroundevenf:
616 case Builtin::BIroundevenl:
617 case Builtin::BI__builtin_roundeven:
618 case Builtin::BI__builtin_roundevenf:
619 case Builtin::BI__builtin_roundevenf16:
620 case Builtin::BI__builtin_roundevenl:
621 case Builtin::BI__builtin_roundevenf128:
622 case Builtin::BI__builtin_elementwise_roundeven:
623 case Builtin::BIsin:
624 case Builtin::BIsinf:
625 case Builtin::BIsinl:
626 case Builtin::BI__builtin_sin:
627 case Builtin::BI__builtin_sinf:
628 case Builtin::BI__builtin_sinf16:
629 case Builtin::BI__builtin_sinl:
630 case Builtin::BI__builtin_sinf128:
631 case Builtin::BI__builtin_elementwise_sin:
632 case Builtin::BIsinh:
633 case Builtin::BIsinhf:
634 case Builtin::BIsinhl:
635 case Builtin::BI__builtin_sinh:
636 case Builtin::BI__builtin_sinhf:
637 case Builtin::BI__builtin_sinhf16:
638 case Builtin::BI__builtin_sinhl:
639 case Builtin::BI__builtin_sinhf128:
640 case Builtin::BI__builtin_elementwise_sinh:
641 case Builtin::BI__builtin_sincospi:
642 case Builtin::BI__builtin_sincospif:
643 case Builtin::BI__builtin_sincospil:
644 case Builtin::BIsincos:
645 case Builtin::BIsincosf:
646 case Builtin::BIsincosl:
647 case Builtin::BI__builtin_sincos:
648 case Builtin::BI__builtin_sincosf:
649 case Builtin::BI__builtin_sincosf16:
650 case Builtin::BI__builtin_sincosl:
651 case Builtin::BI__builtin_sincosf128:
652 case Builtin::BIsqrt:
653 case Builtin::BIsqrtf:
654 case Builtin::BIsqrtl:
655 case Builtin::BI__builtin_sqrt:
656 case Builtin::BI__builtin_sqrtf:
657 case Builtin::BI__builtin_sqrtf16:
658 case Builtin::BI__builtin_sqrtl:
659 case Builtin::BI__builtin_sqrtf128:
660 case Builtin::BI__builtin_elementwise_sqrt:
661 case Builtin::BItan:
662 case Builtin::BItanf:
663 case Builtin::BItanl:
664 case Builtin::BI__builtin_tan:
665 case Builtin::BI__builtin_tanf:
666 case Builtin::BI__builtin_tanf16:
667 case Builtin::BI__builtin_tanl:
668 case Builtin::BI__builtin_tanf128:
669 case Builtin::BI__builtin_elementwise_tan:
670 case Builtin::BItanh:
671 case Builtin::BItanhf:
672 case Builtin::BItanhl:
673 case Builtin::BI__builtin_tanh:
674 case Builtin::BI__builtin_tanhf:
675 case Builtin::BI__builtin_tanhf16:
676 case Builtin::BI__builtin_tanhl:
677 case Builtin::BI__builtin_tanhf128:
678 case Builtin::BI__builtin_elementwise_tanh:
679 case Builtin::BItrunc:
680 case Builtin::BItruncf:
681 case Builtin::BItruncl:
682 case Builtin::BI__builtin_trunc:
683 case Builtin::BI__builtin_truncf:
684 case Builtin::BI__builtin_truncf16:
685 case Builtin::BI__builtin_truncl:
686 case Builtin::BI__builtin_truncf128:
687 case Builtin::BI__builtin_elementwise_trunc:
688 case Builtin::BIlround:
689 case Builtin::BIlroundf:
690 case Builtin::BIlroundl:
691 case Builtin::BI__builtin_lround:
692 case Builtin::BI__builtin_lroundf:
693 case Builtin::BI__builtin_lroundl:
694 case Builtin::BI__builtin_lroundf128:
695 case Builtin::BIllround:
696 case Builtin::BIllroundf:
697 case Builtin::BIllroundl:
698 case Builtin::BI__builtin_llround:
699 case Builtin::BI__builtin_llroundf:
700 case Builtin::BI__builtin_llroundl:
701 case Builtin::BI__builtin_llroundf128:
702 case Builtin::BIlrint:
703 case Builtin::BIlrintf:
704 case Builtin::BIlrintl:
705 case Builtin::BI__builtin_lrint:
706 case Builtin::BI__builtin_lrintf:
707 case Builtin::BI__builtin_lrintl:
708 case Builtin::BI__builtin_lrintf128:
709 case Builtin::BIllrint:
710 case Builtin::BIllrintf:
711 case Builtin::BIllrintl:
712 case Builtin::BI__builtin_llrint:
713 case Builtin::BI__builtin_llrintf:
714 case Builtin::BI__builtin_llrintl:
715 case Builtin::BI__builtin_llrintf128:
716 case Builtin::BI__builtin_ldexp:
717 case Builtin::BI__builtin_ldexpf:
718 case Builtin::BI__builtin_ldexpl:
719 case Builtin::BI__builtin_ldexpf16:
720 case Builtin::BI__builtin_ldexpf128:
721 case Builtin::BI__builtin_elementwise_ldexp:
722 default:
723 break;
724 }
725
726 return RValue::getIgnored();
727}
728
730 const CallExpr *e,
732 mlir::Location loc = getLoc(e->getSourceRange());
733
734 // See if we can constant fold this builtin. If so, don't emit it at all.
735 // TODO: Extend this handling to all builtin calls that we can constant-fold.
736 Expr::EvalResult result;
737 if (e->isPRValue() && e->EvaluateAsRValue(result, cgm.getASTContext()) &&
738 !result.hasSideEffects()) {
739 if (result.Val.isInt())
740 return RValue::get(builder.getConstInt(loc, result.Val.getInt()));
741 if (result.Val.isFloat()) {
742 // Note: we are using result type of CallExpr to determine the type of
743 // the constant. Classic codegen uses the result value to determine the
744 // type. We feel it should be Ok to use expression type because it is
745 // hard to imagine a builtin function evaluates to a value that
746 // over/underflows its own defined type.
747 mlir::Type type = convertType(e->getType());
748 return RValue::get(builder.getConstFP(loc, type, result.Val.getFloat()));
749 }
750 }
751
752 const FunctionDecl *fd = gd.getDecl()->getAsFunction();
753
755
756 // If the builtin has been declared explicitly with an assembler label,
757 // disable the specialized emitting below. Ideally we should communicate the
758 // rename in IR, or at least avoid generating the intrinsic calls that are
759 // likely to get lowered to the renamed library functions.
760 unsigned builtinIDIfNoAsmLabel = fd->hasAttr<AsmLabelAttr>() ? 0 : builtinID;
761
762 bool generateFPMathIntrinsics =
763 shouldCIREmitFPMathIntrinsic(*this, e, builtinID);
764
765 if (generateFPMathIntrinsics) {
766 // Try to match the builtinID with a floating point math builtin.
767 RValue rv = tryEmitFPMathIntrinsic(*this, e, builtinIDIfNoAsmLabel);
768
769 // Return the result directly if a math intrinsic was generated.
770 if (!rv.isIgnored()) {
771 return rv;
772 }
773 }
774
776
777 switch (builtinIDIfNoAsmLabel) {
778 default:
779 break;
780
781 // C stdarg builtins.
782 case Builtin::BI__builtin_stdarg_start:
783 case Builtin::BI__builtin_va_start:
784 case Builtin::BI__va_start: {
785 mlir::Value vaList = builtinID == Builtin::BI__va_start
786 ? emitScalarExpr(e->getArg(0))
788 mlir::Value count = emitScalarExpr(e->getArg(1));
789 emitVAStart(vaList, count);
790 return {};
791 }
792
793 case Builtin::BI__builtin_va_end:
795 return {};
796 case Builtin::BI__builtin_va_copy: {
797 mlir::Value dstPtr = emitVAListRef(e->getArg(0)).getPointer();
798 mlir::Value srcPtr = emitVAListRef(e->getArg(1)).getPointer();
799 cir::VACopyOp::create(builder, dstPtr.getLoc(), dstPtr, srcPtr);
800 return {};
801 }
802 case Builtin::BI__assume:
803 case Builtin::BI__builtin_assume: {
804 if (e->getArg(0)->HasSideEffects(getContext()))
805 return RValue::get(nullptr);
806
807 mlir::Value argValue = emitCheckedArgForAssume(e->getArg(0));
808 cir::AssumeOp::create(builder, loc, argValue);
809 return RValue::get(nullptr);
810 }
811
812 case Builtin::BI__builtin_assume_separate_storage: {
813 mlir::Value value0 = emitScalarExpr(e->getArg(0));
814 mlir::Value value1 = emitScalarExpr(e->getArg(1));
815 cir::AssumeSepStorageOp::create(builder, loc, value0, value1);
816 return RValue::get(nullptr);
817 }
818
819 case Builtin::BI__builtin_assume_aligned: {
820 const Expr *ptrExpr = e->getArg(0);
821 mlir::Value ptrValue = emitScalarExpr(ptrExpr);
822 mlir::Value offsetValue =
823 (e->getNumArgs() > 2) ? emitScalarExpr(e->getArg(2)) : nullptr;
824
825 std::optional<llvm::APSInt> alignment =
827 assert(alignment.has_value() &&
828 "the second argument to __builtin_assume_aligned must be an "
829 "integral constant expression");
830
831 mlir::Value result =
832 emitAlignmentAssumption(ptrValue, ptrExpr, ptrExpr->getExprLoc(),
833 alignment->getSExtValue(), offsetValue);
834 return RValue::get(result);
835 }
836
837 case Builtin::BI__builtin_complex: {
838 mlir::Value real = emitScalarExpr(e->getArg(0));
839 mlir::Value imag = emitScalarExpr(e->getArg(1));
840 mlir::Value complex = builder.createComplexCreate(loc, real, imag);
841 return RValue::getComplex(complex);
842 }
843
844 case Builtin::BI__builtin_creal:
845 case Builtin::BI__builtin_crealf:
846 case Builtin::BI__builtin_creall:
847 case Builtin::BIcreal:
848 case Builtin::BIcrealf:
849 case Builtin::BIcreall: {
850 mlir::Value complex = emitComplexExpr(e->getArg(0));
851 mlir::Value real = builder.createComplexReal(loc, complex);
852 return RValue::get(real);
853 }
854
855 case Builtin::BI__builtin_cimag:
856 case Builtin::BI__builtin_cimagf:
857 case Builtin::BI__builtin_cimagl:
858 case Builtin::BIcimag:
859 case Builtin::BIcimagf:
860 case Builtin::BIcimagl: {
861 mlir::Value complex = emitComplexExpr(e->getArg(0));
862 mlir::Value imag = builder.createComplexImag(loc, complex);
863 return RValue::get(imag);
864 }
865
866 case Builtin::BI__builtin_conj:
867 case Builtin::BI__builtin_conjf:
868 case Builtin::BI__builtin_conjl:
869 case Builtin::BIconj:
870 case Builtin::BIconjf:
871 case Builtin::BIconjl: {
872 mlir::Value complex = emitComplexExpr(e->getArg(0));
873 mlir::Value conj = builder.createUnaryOp(getLoc(e->getExprLoc()),
874 cir::UnaryOpKind::Not, complex);
875 return RValue::getComplex(conj);
876 }
877
878 case Builtin::BI__builtin_clrsb:
879 case Builtin::BI__builtin_clrsbl:
880 case Builtin::BI__builtin_clrsbll:
881 return emitBuiltinBitOp<cir::BitClrsbOp>(*this, e);
882
883 case Builtin::BI__builtin_ctzs:
884 case Builtin::BI__builtin_ctz:
885 case Builtin::BI__builtin_ctzl:
886 case Builtin::BI__builtin_ctzll:
887 case Builtin::BI__builtin_ctzg:
889 return emitBuiltinBitOp<cir::BitCtzOp>(*this, e, /*poisonZero=*/true);
890
891 case Builtin::BI__builtin_clzs:
892 case Builtin::BI__builtin_clz:
893 case Builtin::BI__builtin_clzl:
894 case Builtin::BI__builtin_clzll:
895 case Builtin::BI__builtin_clzg:
897 return emitBuiltinBitOp<cir::BitClzOp>(*this, e, /*poisonZero=*/true);
898
899 case Builtin::BI__builtin_ffs:
900 case Builtin::BI__builtin_ffsl:
901 case Builtin::BI__builtin_ffsll:
902 return emitBuiltinBitOp<cir::BitFfsOp>(*this, e);
903
904 case Builtin::BI__builtin_parity:
905 case Builtin::BI__builtin_parityl:
906 case Builtin::BI__builtin_parityll:
907 return emitBuiltinBitOp<cir::BitParityOp>(*this, e);
908
909 case Builtin::BI__lzcnt16:
910 case Builtin::BI__lzcnt:
911 case Builtin::BI__lzcnt64:
913 return emitBuiltinBitOp<cir::BitClzOp>(*this, e, /*poisonZero=*/false);
914
915 case Builtin::BI__popcnt16:
916 case Builtin::BI__popcnt:
917 case Builtin::BI__popcnt64:
918 case Builtin::BI__builtin_popcount:
919 case Builtin::BI__builtin_popcountl:
920 case Builtin::BI__builtin_popcountll:
921 case Builtin::BI__builtin_popcountg:
923
924 case Builtin::BI__builtin_expect:
925 case Builtin::BI__builtin_expect_with_probability: {
926 mlir::Value argValue = emitScalarExpr(e->getArg(0));
927 mlir::Value expectedValue = emitScalarExpr(e->getArg(1));
928
929 mlir::FloatAttr probAttr;
930 if (builtinIDIfNoAsmLabel == Builtin::BI__builtin_expect_with_probability) {
931 llvm::APFloat probability(0.0);
932 const Expr *probArg = e->getArg(2);
933 [[maybe_unused]] bool evalSucceeded =
934 probArg->EvaluateAsFloat(probability, cgm.getASTContext());
935 assert(evalSucceeded &&
936 "probability should be able to evaluate as float");
937 bool loseInfo = false; // ignored
938 probability.convert(llvm::APFloat::IEEEdouble(),
939 llvm::RoundingMode::Dynamic, &loseInfo);
940 probAttr = mlir::FloatAttr::get(mlir::Float64Type::get(&getMLIRContext()),
941 probability);
942 }
943
944 auto result = cir::ExpectOp::create(builder, loc, argValue.getType(),
945 argValue, expectedValue, probAttr);
946 return RValue::get(result);
947 }
948
949 case Builtin::BI__builtin_bswap16:
950 case Builtin::BI__builtin_bswap32:
951 case Builtin::BI__builtin_bswap64:
952 case Builtin::BI_byteswap_ushort:
953 case Builtin::BI_byteswap_ulong:
954 case Builtin::BI_byteswap_uint64: {
955 mlir::Value arg = emitScalarExpr(e->getArg(0));
956 return RValue::get(cir::ByteSwapOp::create(builder, loc, arg));
957 }
958
959 case Builtin::BI__builtin_bitreverse8:
960 case Builtin::BI__builtin_bitreverse16:
961 case Builtin::BI__builtin_bitreverse32:
962 case Builtin::BI__builtin_bitreverse64: {
963 mlir::Value arg = emitScalarExpr(e->getArg(0));
964 return RValue::get(cir::BitReverseOp::create(builder, loc, arg));
965 }
966
967 case Builtin::BI__builtin_rotateleft8:
968 case Builtin::BI__builtin_rotateleft16:
969 case Builtin::BI__builtin_rotateleft32:
970 case Builtin::BI__builtin_rotateleft64:
971 return emitRotate(e, /*isRotateLeft=*/true);
972
973 case Builtin::BI__builtin_rotateright8:
974 case Builtin::BI__builtin_rotateright16:
975 case Builtin::BI__builtin_rotateright32:
976 case Builtin::BI__builtin_rotateright64:
977 return emitRotate(e, /*isRotateLeft=*/false);
978
979 case Builtin::BI__builtin_coro_id:
980 case Builtin::BI__builtin_coro_promise:
981 case Builtin::BI__builtin_coro_resume:
982 case Builtin::BI__builtin_coro_noop:
983 case Builtin::BI__builtin_coro_destroy:
984 case Builtin::BI__builtin_coro_done:
985 case Builtin::BI__builtin_coro_alloc:
986 case Builtin::BI__builtin_coro_begin:
987 case Builtin::BI__builtin_coro_end:
988 case Builtin::BI__builtin_coro_suspend:
989 case Builtin::BI__builtin_coro_align:
990 cgm.errorNYI(e->getSourceRange(), "BI__builtin_coro_id like NYI");
991 return getUndefRValue(e->getType());
992
993 case Builtin::BI__builtin_coro_frame: {
994 return emitCoroutineFrame();
995 }
996 case Builtin::BI__builtin_coro_free:
997 case Builtin::BI__builtin_coro_size: {
998 GlobalDecl gd{fd};
999 mlir::Type ty = cgm.getTypes().getFunctionType(
1000 cgm.getTypes().arrangeGlobalDeclaration(gd));
1001 const auto *nd = cast<NamedDecl>(gd.getDecl());
1002 cir::FuncOp fnOp =
1003 cgm.getOrCreateCIRFunction(nd->getName(), ty, gd, /*ForVTable=*/false);
1004 fnOp.setBuiltin(true);
1005 return emitCall(e->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), e,
1006 returnValue);
1007 }
1008
1009 case Builtin::BI__builtin_constant_p: {
1010 mlir::Type resultType = convertType(e->getType());
1011
1012 const Expr *arg = e->getArg(0);
1013 QualType argType = arg->getType();
1014 // FIXME: The allowance for Obj-C pointers and block pointers is historical
1015 // and likely a mistake.
1016 if (!argType->isIntegralOrEnumerationType() && !argType->isFloatingType() &&
1017 !argType->isObjCObjectPointerType() && !argType->isBlockPointerType()) {
1018 // Per the GCC documentation, only numeric constants are recognized after
1019 // inlining.
1020 return RValue::get(
1021 builder.getConstInt(getLoc(e->getSourceRange()),
1022 mlir::cast<cir::IntType>(resultType), 0));
1023 }
1024
1025 if (arg->HasSideEffects(getContext())) {
1026 // The argument is unevaluated, so be conservative if it might have
1027 // side-effects.
1028 return RValue::get(
1029 builder.getConstInt(getLoc(e->getSourceRange()),
1030 mlir::cast<cir::IntType>(resultType), 0));
1031 }
1032
1033 mlir::Value argValue = emitScalarExpr(arg);
1034 if (argType->isObjCObjectPointerType()) {
1035 cgm.errorNYI(e->getSourceRange(),
1036 "__builtin_constant_p: Obj-C object pointer");
1037 return {};
1038 }
1039 argValue = builder.createBitcast(argValue, convertType(argType));
1040
1041 mlir::Value result = cir::IsConstantOp::create(
1042 builder, getLoc(e->getSourceRange()), argValue);
1043 // IsConstantOp returns a bool, but __builtin_constant_p returns an int.
1044 result = builder.createBoolToInt(result, resultType);
1045 return RValue::get(result);
1046 }
1047 case Builtin::BI__builtin_dynamic_object_size:
1048 case Builtin::BI__builtin_object_size: {
1049 unsigned type =
1050 e->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
1051 auto resType = mlir::cast<cir::IntType>(convertType(e->getType()));
1052
1053 // We pass this builtin onto the optimizer so that it can figure out the
1054 // object size in more complex cases.
1055 bool isDynamic = builtinID == Builtin::BI__builtin_dynamic_object_size;
1056 return RValue::get(emitBuiltinObjectSize(e->getArg(0), type, resType,
1057 /*EmittedE=*/nullptr, isDynamic));
1058 }
1059
1060 case Builtin::BI__builtin_prefetch: {
1061 auto evaluateOperandAsInt = [&](const Expr *arg) {
1062 Expr::EvalResult res;
1063 [[maybe_unused]] bool evalSucceed =
1064 arg->EvaluateAsInt(res, cgm.getASTContext());
1065 assert(evalSucceed && "expression should be able to evaluate as int");
1066 return res.Val.getInt().getZExtValue();
1067 };
1068
1069 bool isWrite = false;
1070 if (e->getNumArgs() > 1)
1071 isWrite = evaluateOperandAsInt(e->getArg(1));
1072
1073 int locality = 3;
1074 if (e->getNumArgs() > 2)
1075 locality = evaluateOperandAsInt(e->getArg(2));
1076
1077 mlir::Value address = emitScalarExpr(e->getArg(0));
1078 cir::PrefetchOp::create(builder, loc, address, locality, isWrite);
1079 return RValue::get(nullptr);
1080 }
1081 case Builtin::BI__builtin_readcyclecounter:
1082 case Builtin::BI__builtin_readsteadycounter:
1083 case Builtin::BI__builtin___clear_cache:
1084 return errorBuiltinNYI(*this, e, builtinID);
1085 case Builtin::BI__builtin_trap:
1086 emitTrap(loc, /*createNewBlock=*/true);
1087 return RValue::getIgnored();
1088 case Builtin::BI__builtin_verbose_trap:
1089 case Builtin::BI__debugbreak:
1090 return errorBuiltinNYI(*this, e, builtinID);
1091 case Builtin::BI__builtin_unreachable:
1092 emitUnreachable(e->getExprLoc(), /*createNewBlock=*/true);
1093 return RValue::getIgnored();
1094 case Builtin::BI__builtin_powi:
1095 case Builtin::BI__builtin_powif:
1096 case Builtin::BI__builtin_powil:
1097 case Builtin::BI__builtin_frexpl:
1098 case Builtin::BI__builtin_frexp:
1099 case Builtin::BI__builtin_frexpf:
1100 case Builtin::BI__builtin_frexpf128:
1101 case Builtin::BI__builtin_frexpf16:
1102 case Builtin::BImodf:
1103 case Builtin::BImodff:
1104 case Builtin::BImodfl:
1105 case Builtin::BI__builtin_modf:
1106 case Builtin::BI__builtin_modff:
1107 case Builtin::BI__builtin_modfl:
1108 case Builtin::BI__builtin_isgreater:
1109 case Builtin::BI__builtin_isgreaterequal:
1110 case Builtin::BI__builtin_isless:
1111 case Builtin::BI__builtin_islessequal:
1112 case Builtin::BI__builtin_islessgreater:
1113 case Builtin::BI__builtin_isunordered:
1114 // From https://clang.llvm.org/docs/LanguageExtensions.html#builtin-isfpclass
1115 //
1116 // The `__builtin_isfpclass()` builtin is a generalization of functions
1117 // isnan, isinf, isfinite and some others defined by the C standard. It tests
1118 // if the floating-point value, specified by the first argument, falls into
1119 // any of data classes, specified by the second argument.
1120 case Builtin::BI__builtin_isnan: {
1122 mlir::Value v = emitScalarExpr(e->getArg(0));
1124 mlir::Location loc = getLoc(e->getBeginLoc());
1125 return RValue::get(builder.createBoolToInt(
1126 builder.createIsFPClass(loc, v, cir::FPClassTest::Nan),
1127 convertType(e->getType())));
1128 }
1129
1130 case Builtin::BI__builtin_issignaling: {
1132 mlir::Value v = emitScalarExpr(e->getArg(0));
1133 mlir::Location loc = getLoc(e->getBeginLoc());
1134 return RValue::get(builder.createBoolToInt(
1135 builder.createIsFPClass(loc, v, cir::FPClassTest::SignalingNaN),
1136 convertType(e->getType())));
1137 }
1138
1139 case Builtin::BI__builtin_isinf: {
1141 mlir::Value v = emitScalarExpr(e->getArg(0));
1143 mlir::Location loc = getLoc(e->getBeginLoc());
1144 return RValue::get(builder.createBoolToInt(
1145 builder.createIsFPClass(loc, v, cir::FPClassTest::Infinity),
1146 convertType(e->getType())));
1147 }
1148 case Builtin::BIfinite:
1149 case Builtin::BI__finite:
1150 case Builtin::BIfinitef:
1151 case Builtin::BI__finitef:
1152 case Builtin::BIfinitel:
1153 case Builtin::BI__finitel:
1154 case Builtin::BI__builtin_isfinite: {
1156 mlir::Value v = emitScalarExpr(e->getArg(0));
1158 mlir::Location loc = getLoc(e->getBeginLoc());
1159 return RValue::get(builder.createBoolToInt(
1160 builder.createIsFPClass(loc, v, cir::FPClassTest::Finite),
1161 convertType(e->getType())));
1162 }
1163
1164 case Builtin::BI__builtin_isnormal: {
1166 mlir::Value v = emitScalarExpr(e->getArg(0));
1167 mlir::Location loc = getLoc(e->getBeginLoc());
1168 return RValue::get(builder.createBoolToInt(
1169 builder.createIsFPClass(loc, v, cir::FPClassTest::Normal),
1170 convertType(e->getType())));
1171 }
1172
1173 case Builtin::BI__builtin_issubnormal: {
1175 mlir::Value v = emitScalarExpr(e->getArg(0));
1176 mlir::Location loc = getLoc(e->getBeginLoc());
1177 return RValue::get(builder.createBoolToInt(
1178 builder.createIsFPClass(loc, v, cir::FPClassTest::Subnormal),
1179 convertType(e->getType())));
1180 }
1181
1182 case Builtin::BI__builtin_iszero: {
1184 mlir::Value v = emitScalarExpr(e->getArg(0));
1185 mlir::Location loc = getLoc(e->getBeginLoc());
1186 return RValue::get(builder.createBoolToInt(
1187 builder.createIsFPClass(loc, v, cir::FPClassTest::Zero),
1188 convertType(e->getType())));
1189 }
1190 case Builtin::BI__builtin_isfpclass: {
1191 Expr::EvalResult result;
1192 if (!e->getArg(1)->EvaluateAsInt(result, cgm.getASTContext()))
1193 break;
1194
1196 mlir::Value v = emitScalarExpr(e->getArg(0));
1197 uint64_t test = result.Val.getInt().getLimitedValue();
1198 mlir::Location loc = getLoc(e->getBeginLoc());
1199 //
1200 return RValue::get(builder.createBoolToInt(
1201 builder.createIsFPClass(loc, v, cir::FPClassTest(test)),
1202 convertType(e->getType())));
1203 }
1204 case Builtin::BI__builtin_nondeterministic_value:
1205 case Builtin::BI__builtin_elementwise_abs:
1206 return errorBuiltinNYI(*this, e, builtinID);
1207 case Builtin::BI__builtin_elementwise_acos:
1208 return emitUnaryFPBuiltin<cir::ACosOp>(*this, *e);
1209 case Builtin::BI__builtin_elementwise_asin:
1210 return emitUnaryFPBuiltin<cir::ASinOp>(*this, *e);
1211 case Builtin::BI__builtin_elementwise_atan:
1212 return emitUnaryFPBuiltin<cir::ATanOp>(*this, *e);
1213 case Builtin::BI__builtin_elementwise_atan2:
1214 case Builtin::BI__builtin_elementwise_ceil:
1215 case Builtin::BI__builtin_elementwise_exp:
1216 case Builtin::BI__builtin_elementwise_exp2:
1217 case Builtin::BI__builtin_elementwise_exp10:
1218 case Builtin::BI__builtin_elementwise_ldexp:
1219 case Builtin::BI__builtin_elementwise_log:
1220 case Builtin::BI__builtin_elementwise_log2:
1221 case Builtin::BI__builtin_elementwise_log10:
1222 case Builtin::BI__builtin_elementwise_pow:
1223 case Builtin::BI__builtin_elementwise_bitreverse:
1224 return errorBuiltinNYI(*this, e, builtinID);
1225 case Builtin::BI__builtin_elementwise_cos:
1226 return emitUnaryFPBuiltin<cir::CosOp>(*this, *e);
1227 case Builtin::BI__builtin_elementwise_cosh:
1228 case Builtin::BI__builtin_elementwise_floor:
1229 case Builtin::BI__builtin_elementwise_popcount:
1230 case Builtin::BI__builtin_elementwise_roundeven:
1231 case Builtin::BI__builtin_elementwise_round:
1232 case Builtin::BI__builtin_elementwise_rint:
1233 case Builtin::BI__builtin_elementwise_nearbyint:
1234 case Builtin::BI__builtin_elementwise_sin:
1235 case Builtin::BI__builtin_elementwise_sinh:
1236 case Builtin::BI__builtin_elementwise_tan:
1237 case Builtin::BI__builtin_elementwise_tanh:
1238 case Builtin::BI__builtin_elementwise_trunc:
1239 case Builtin::BI__builtin_elementwise_canonicalize:
1240 case Builtin::BI__builtin_elementwise_copysign:
1241 case Builtin::BI__builtin_elementwise_fma:
1242 case Builtin::BI__builtin_elementwise_fshl:
1243 case Builtin::BI__builtin_elementwise_fshr:
1244 case Builtin::BI__builtin_elementwise_add_sat:
1245 case Builtin::BI__builtin_elementwise_sub_sat:
1246 case Builtin::BI__builtin_elementwise_max:
1247 case Builtin::BI__builtin_elementwise_min:
1248 case Builtin::BI__builtin_elementwise_maxnum:
1249 case Builtin::BI__builtin_elementwise_minnum:
1250 case Builtin::BI__builtin_elementwise_maximum:
1251 case Builtin::BI__builtin_elementwise_minimum:
1252 case Builtin::BI__builtin_elementwise_maximumnum:
1253 case Builtin::BI__builtin_elementwise_minimumnum:
1254 case Builtin::BI__builtin_reduce_max:
1255 case Builtin::BI__builtin_reduce_min:
1256 case Builtin::BI__builtin_reduce_add:
1257 case Builtin::BI__builtin_reduce_mul:
1258 case Builtin::BI__builtin_reduce_xor:
1259 case Builtin::BI__builtin_reduce_or:
1260 case Builtin::BI__builtin_reduce_and:
1261 case Builtin::BI__builtin_reduce_maximum:
1262 case Builtin::BI__builtin_reduce_minimum:
1263 case Builtin::BI__builtin_matrix_transpose:
1264 case Builtin::BI__builtin_matrix_column_major_load:
1265 case Builtin::BI__builtin_matrix_column_major_store:
1266 case Builtin::BI__builtin_masked_load:
1267 case Builtin::BI__builtin_masked_expand_load:
1268 case Builtin::BI__builtin_masked_gather:
1269 case Builtin::BI__builtin_masked_store:
1270 case Builtin::BI__builtin_masked_compress_store:
1271 case Builtin::BI__builtin_masked_scatter:
1272 case Builtin::BI__builtin_isinf_sign:
1273 case Builtin::BI__builtin_flt_rounds:
1274 case Builtin::BI__builtin_set_flt_rounds:
1275 case Builtin::BI__builtin_fpclassify:
1276 return errorBuiltinNYI(*this, e, builtinID);
1277 case Builtin::BIalloca:
1278 case Builtin::BI_alloca:
1279 case Builtin::BI__builtin_alloca_uninitialized:
1280 case Builtin::BI__builtin_alloca:
1281 return emitBuiltinAlloca(*this, e, builtinID);
1282 case Builtin::BI__builtin_alloca_with_align_uninitialized:
1283 case Builtin::BI__builtin_alloca_with_align:
1284 case Builtin::BI__builtin_infer_alloc_token:
1285 case Builtin::BIbzero:
1286 case Builtin::BI__builtin_bzero:
1287 case Builtin::BIbcopy:
1288 case Builtin::BI__builtin_bcopy:
1289 return errorBuiltinNYI(*this, e, builtinID);
1290 case Builtin::BImemcpy:
1291 case Builtin::BI__builtin_memcpy:
1292 case Builtin::BImempcpy:
1293 case Builtin::BI__builtin_mempcpy:
1294 case Builtin::BI__builtin_memcpy_inline:
1295 case Builtin::BI__builtin_char_memchr:
1296 case Builtin::BI__builtin___memcpy_chk:
1297 case Builtin::BI__builtin_objc_memmove_collectable:
1298 case Builtin::BI__builtin___memmove_chk:
1299 case Builtin::BI__builtin_trivially_relocate:
1300 case Builtin::BImemmove:
1301 case Builtin::BI__builtin_memmove:
1302 case Builtin::BImemset:
1303 case Builtin::BI__builtin_memset:
1304 case Builtin::BI__builtin_memset_inline:
1305 case Builtin::BI__builtin___memset_chk:
1306 case Builtin::BI__builtin_wmemchr:
1307 case Builtin::BI__builtin_wmemcmp:
1308 break; // Handled as library calls below.
1309 case Builtin::BI__builtin_dwarf_cfa:
1310 return errorBuiltinNYI(*this, e, builtinID);
1311 case Builtin::BI__builtin_return_address: {
1312 llvm::APSInt level = e->getArg(0)->EvaluateKnownConstInt(getContext());
1313 return RValue::get(cir::ReturnAddrOp::create(
1314 builder, getLoc(e->getExprLoc()),
1315 builder.getConstAPInt(loc, builder.getUInt32Ty(), level)));
1316 }
1317 case Builtin::BI_ReturnAddress: {
1318 return RValue::get(cir::ReturnAddrOp::create(
1319 builder, getLoc(e->getExprLoc()),
1320 builder.getConstInt(loc, builder.getUInt32Ty(), 0)));
1321 }
1322 case Builtin::BI__builtin_frame_address: {
1323 llvm::APSInt level = e->getArg(0)->EvaluateKnownConstInt(getContext());
1324 mlir::Location loc = getLoc(e->getExprLoc());
1325 mlir::Value addr = cir::FrameAddrOp::create(
1326 builder, loc, allocaInt8PtrTy,
1327 builder.getConstAPInt(loc, builder.getUInt32Ty(), level));
1328 return RValue::get(
1329 builder.createCast(loc, cir::CastKind::bitcast, addr, voidPtrTy));
1330 }
1331 case Builtin::BI__builtin_extract_return_addr:
1332 case Builtin::BI__builtin_frob_return_addr:
1333 case Builtin::BI__builtin_dwarf_sp_column:
1334 case Builtin::BI__builtin_init_dwarf_reg_size_table:
1335 case Builtin::BI__builtin_eh_return:
1336 case Builtin::BI__builtin_unwind_init:
1337 case Builtin::BI__builtin_extend_pointer:
1338 case Builtin::BI__builtin_setjmp:
1339 case Builtin::BI__builtin_longjmp:
1340 case Builtin::BI__builtin_launder:
1341 case Builtin::BI__sync_fetch_and_add:
1342 case Builtin::BI__sync_fetch_and_sub:
1343 case Builtin::BI__sync_fetch_and_or:
1344 case Builtin::BI__sync_fetch_and_and:
1345 case Builtin::BI__sync_fetch_and_xor:
1346 case Builtin::BI__sync_fetch_and_nand:
1347 case Builtin::BI__sync_add_and_fetch:
1348 case Builtin::BI__sync_sub_and_fetch:
1349 case Builtin::BI__sync_and_and_fetch:
1350 case Builtin::BI__sync_or_and_fetch:
1351 case Builtin::BI__sync_xor_and_fetch:
1352 case Builtin::BI__sync_nand_and_fetch:
1353 case Builtin::BI__sync_val_compare_and_swap:
1354 case Builtin::BI__sync_bool_compare_and_swap:
1355 case Builtin::BI__sync_lock_test_and_set:
1356 case Builtin::BI__sync_lock_release:
1357 case Builtin::BI__sync_swap:
1358 case Builtin::BI__sync_fetch_and_add_1:
1359 case Builtin::BI__sync_fetch_and_add_2:
1360 case Builtin::BI__sync_fetch_and_add_4:
1361 case Builtin::BI__sync_fetch_and_add_8:
1362 case Builtin::BI__sync_fetch_and_add_16:
1363 case Builtin::BI__sync_fetch_and_sub_1:
1364 case Builtin::BI__sync_fetch_and_sub_2:
1365 case Builtin::BI__sync_fetch_and_sub_4:
1366 case Builtin::BI__sync_fetch_and_sub_8:
1367 case Builtin::BI__sync_fetch_and_sub_16:
1368 case Builtin::BI__sync_fetch_and_or_1:
1369 case Builtin::BI__sync_fetch_and_or_2:
1370 case Builtin::BI__sync_fetch_and_or_4:
1371 case Builtin::BI__sync_fetch_and_or_8:
1372 case Builtin::BI__sync_fetch_and_or_16:
1373 case Builtin::BI__sync_fetch_and_and_1:
1374 case Builtin::BI__sync_fetch_and_and_2:
1375 case Builtin::BI__sync_fetch_and_and_4:
1376 case Builtin::BI__sync_fetch_and_and_8:
1377 case Builtin::BI__sync_fetch_and_and_16:
1378 case Builtin::BI__sync_fetch_and_xor_1:
1379 case Builtin::BI__sync_fetch_and_xor_2:
1380 case Builtin::BI__sync_fetch_and_xor_4:
1381 case Builtin::BI__sync_fetch_and_xor_8:
1382 case Builtin::BI__sync_fetch_and_xor_16:
1383 case Builtin::BI__sync_fetch_and_nand_1:
1384 case Builtin::BI__sync_fetch_and_nand_2:
1385 case Builtin::BI__sync_fetch_and_nand_4:
1386 case Builtin::BI__sync_fetch_and_nand_8:
1387 case Builtin::BI__sync_fetch_and_nand_16:
1388 case Builtin::BI__sync_fetch_and_min:
1389 case Builtin::BI__sync_fetch_and_max:
1390 case Builtin::BI__sync_fetch_and_umin:
1391 case Builtin::BI__sync_fetch_and_umax:
1392 return errorBuiltinNYI(*this, e, builtinID);
1393 return getUndefRValue(e->getType());
1394 case Builtin::BI__sync_add_and_fetch_1:
1395 case Builtin::BI__sync_add_and_fetch_2:
1396 case Builtin::BI__sync_add_and_fetch_4:
1397 case Builtin::BI__sync_add_and_fetch_8:
1398 case Builtin::BI__sync_add_and_fetch_16:
1399 return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Add, e,
1400 cir::BinOpKind::Add);
1401 case Builtin::BI__sync_sub_and_fetch_1:
1402 case Builtin::BI__sync_sub_and_fetch_2:
1403 case Builtin::BI__sync_sub_and_fetch_4:
1404 case Builtin::BI__sync_sub_and_fetch_8:
1405 case Builtin::BI__sync_sub_and_fetch_16:
1406 return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Sub, e,
1407 cir::BinOpKind::Sub);
1408 case Builtin::BI__sync_and_and_fetch_1:
1409 case Builtin::BI__sync_and_and_fetch_2:
1410 case Builtin::BI__sync_and_and_fetch_4:
1411 case Builtin::BI__sync_and_and_fetch_8:
1412 case Builtin::BI__sync_and_and_fetch_16:
1413 return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::And, e,
1414 cir::BinOpKind::And);
1415 case Builtin::BI__sync_or_and_fetch_1:
1416 case Builtin::BI__sync_or_and_fetch_2:
1417 case Builtin::BI__sync_or_and_fetch_4:
1418 case Builtin::BI__sync_or_and_fetch_8:
1419 case Builtin::BI__sync_or_and_fetch_16:
1420 return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Or, e,
1421 cir::BinOpKind::Or);
1422 case Builtin::BI__sync_xor_and_fetch_1:
1423 case Builtin::BI__sync_xor_and_fetch_2:
1424 case Builtin::BI__sync_xor_and_fetch_4:
1425 case Builtin::BI__sync_xor_and_fetch_8:
1426 case Builtin::BI__sync_xor_and_fetch_16:
1427 return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Xor, e,
1428 cir::BinOpKind::Xor);
1429 case Builtin::BI__sync_nand_and_fetch_1:
1430 case Builtin::BI__sync_nand_and_fetch_2:
1431 case Builtin::BI__sync_nand_and_fetch_4:
1432 case Builtin::BI__sync_nand_and_fetch_8:
1433 case Builtin::BI__sync_nand_and_fetch_16:
1434 return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Nand, e,
1435 cir::BinOpKind::And, true);
1436 case Builtin::BI__sync_val_compare_and_swap_1:
1437 case Builtin::BI__sync_val_compare_and_swap_2:
1438 case Builtin::BI__sync_val_compare_and_swap_4:
1439 case Builtin::BI__sync_val_compare_and_swap_8:
1440 case Builtin::BI__sync_val_compare_and_swap_16:
1441 case Builtin::BI__sync_bool_compare_and_swap_1:
1442 case Builtin::BI__sync_bool_compare_and_swap_2:
1443 case Builtin::BI__sync_bool_compare_and_swap_4:
1444 case Builtin::BI__sync_bool_compare_and_swap_8:
1445 case Builtin::BI__sync_bool_compare_and_swap_16:
1446 case Builtin::BI__sync_swap_1:
1447 case Builtin::BI__sync_swap_2:
1448 case Builtin::BI__sync_swap_4:
1449 case Builtin::BI__sync_swap_8:
1450 case Builtin::BI__sync_swap_16:
1451 case Builtin::BI__sync_lock_test_and_set_1:
1452 case Builtin::BI__sync_lock_test_and_set_2:
1453 case Builtin::BI__sync_lock_test_and_set_4:
1454 case Builtin::BI__sync_lock_test_and_set_8:
1455 case Builtin::BI__sync_lock_test_and_set_16:
1456 case Builtin::BI__sync_lock_release_1:
1457 case Builtin::BI__sync_lock_release_2:
1458 case Builtin::BI__sync_lock_release_4:
1459 case Builtin::BI__sync_lock_release_8:
1460 case Builtin::BI__sync_lock_release_16:
1461 case Builtin::BI__sync_synchronize:
1462 case Builtin::BI__builtin_nontemporal_load:
1463 case Builtin::BI__builtin_nontemporal_store:
1464 case Builtin::BI__c11_atomic_is_lock_free:
1465 case Builtin::BI__atomic_is_lock_free:
1466 case Builtin::BI__atomic_test_and_set:
1467 case Builtin::BI__atomic_clear:
1468 return errorBuiltinNYI(*this, e, builtinID);
1469 case Builtin::BI__atomic_thread_fence:
1470 case Builtin::BI__c11_atomic_thread_fence: {
1471 emitAtomicFenceOp(*this, e, cir::SyncScopeKind::System);
1472 return RValue::get(nullptr);
1473 }
1474 case Builtin::BI__atomic_signal_fence:
1475 case Builtin::BI__c11_atomic_signal_fence: {
1476 emitAtomicFenceOp(*this, e, cir::SyncScopeKind::SingleThread);
1477 return RValue::get(nullptr);
1478 }
1479 case Builtin::BI__scoped_atomic_thread_fence:
1480 case Builtin::BI__builtin_signbit:
1481 case Builtin::BI__builtin_signbitf:
1482 case Builtin::BI__builtin_signbitl:
1483 case Builtin::BI__warn_memset_zero_len:
1484 case Builtin::BI__annotation:
1485 case Builtin::BI__builtin_annotation:
1486 case Builtin::BI__builtin_addcb:
1487 case Builtin::BI__builtin_addcs:
1488 case Builtin::BI__builtin_addc:
1489 case Builtin::BI__builtin_addcl:
1490 case Builtin::BI__builtin_addcll:
1491 case Builtin::BI__builtin_subcb:
1492 case Builtin::BI__builtin_subcs:
1493 case Builtin::BI__builtin_subc:
1494 case Builtin::BI__builtin_subcl:
1495 case Builtin::BI__builtin_subcll:
1496 return errorBuiltinNYI(*this, e, builtinID);
1497
1498 case Builtin::BI__builtin_add_overflow:
1499 case Builtin::BI__builtin_sub_overflow:
1500 case Builtin::BI__builtin_mul_overflow: {
1501 const clang::Expr *leftArg = e->getArg(0);
1502 const clang::Expr *rightArg = e->getArg(1);
1503 const clang::Expr *resultArg = e->getArg(2);
1504
1505 clang::QualType resultQTy =
1506 resultArg->getType()->castAs<clang::PointerType>()->getPointeeType();
1507
1508 WidthAndSignedness leftInfo =
1509 getIntegerWidthAndSignedness(cgm.getASTContext(), leftArg->getType());
1510 WidthAndSignedness rightInfo =
1511 getIntegerWidthAndSignedness(cgm.getASTContext(), rightArg->getType());
1512 WidthAndSignedness resultInfo =
1513 getIntegerWidthAndSignedness(cgm.getASTContext(), resultQTy);
1514
1515 // Note we compute the encompassing type with the consideration to the
1516 // result type, so later in LLVM lowering we don't get redundant integral
1517 // extension casts.
1518 WidthAndSignedness encompassingInfo =
1519 EncompassingIntegerType({leftInfo, rightInfo, resultInfo});
1520
1521 auto encompassingCIRTy = cir::IntType::get(
1522 &getMLIRContext(), encompassingInfo.width, encompassingInfo.isSigned);
1523 auto resultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(resultQTy));
1524
1525 mlir::Value left = emitScalarExpr(leftArg);
1526 mlir::Value right = emitScalarExpr(rightArg);
1527 Address resultPtr = emitPointerWithAlignment(resultArg);
1528
1529 // Extend each operand to the encompassing type, if necessary.
1530 if (left.getType() != encompassingCIRTy)
1531 left =
1532 builder.createCast(cir::CastKind::integral, left, encompassingCIRTy);
1533 if (right.getType() != encompassingCIRTy)
1534 right =
1535 builder.createCast(cir::CastKind::integral, right, encompassingCIRTy);
1536
1537 // Perform the operation on the extended values.
1538 cir::BinOpOverflowKind opKind;
1539 switch (builtinID) {
1540 default:
1541 llvm_unreachable("Unknown overflow builtin id.");
1542 case Builtin::BI__builtin_add_overflow:
1543 opKind = cir::BinOpOverflowKind::Add;
1544 break;
1545 case Builtin::BI__builtin_sub_overflow:
1546 opKind = cir::BinOpOverflowKind::Sub;
1547 break;
1548 case Builtin::BI__builtin_mul_overflow:
1549 opKind = cir::BinOpOverflowKind::Mul;
1550 break;
1551 }
1552
1553 mlir::Location loc = getLoc(e->getSourceRange());
1554 auto arithOp = cir::BinOpOverflowOp::create(builder, loc, resultCIRTy,
1555 opKind, left, right);
1556
1557 // Here is a slight difference from the original clang CodeGen:
1558 // - In the original clang CodeGen, the checked arithmetic result is
1559 // first computed as a value of the encompassing type, and then it is
1560 // truncated to the actual result type with a second overflow checking.
1561 // - In CIRGen, the checked arithmetic operation directly produce the
1562 // checked arithmetic result in its expected type.
1563 //
1564 // So we don't need a truncation and a second overflow checking here.
1565
1566 // Finally, store the result using the pointer.
1567 bool isVolatile =
1568 resultArg->getType()->getPointeeType().isVolatileQualified();
1569 builder.createStore(loc, arithOp.getResult(), resultPtr, isVolatile);
1570
1571 return RValue::get(arithOp.getOverflow());
1572 }
1573
1574 case Builtin::BI__builtin_uadd_overflow:
1575 case Builtin::BI__builtin_uaddl_overflow:
1576 case Builtin::BI__builtin_uaddll_overflow:
1577 case Builtin::BI__builtin_usub_overflow:
1578 case Builtin::BI__builtin_usubl_overflow:
1579 case Builtin::BI__builtin_usubll_overflow:
1580 case Builtin::BI__builtin_umul_overflow:
1581 case Builtin::BI__builtin_umull_overflow:
1582 case Builtin::BI__builtin_umulll_overflow:
1583 case Builtin::BI__builtin_sadd_overflow:
1584 case Builtin::BI__builtin_saddl_overflow:
1585 case Builtin::BI__builtin_saddll_overflow:
1586 case Builtin::BI__builtin_ssub_overflow:
1587 case Builtin::BI__builtin_ssubl_overflow:
1588 case Builtin::BI__builtin_ssubll_overflow:
1589 case Builtin::BI__builtin_smul_overflow:
1590 case Builtin::BI__builtin_smull_overflow:
1591 case Builtin::BI__builtin_smulll_overflow: {
1592 // Scalarize our inputs.
1593 mlir::Value x = emitScalarExpr(e->getArg(0));
1594 mlir::Value y = emitScalarExpr(e->getArg(1));
1595
1596 const clang::Expr *resultArg = e->getArg(2);
1597 Address resultPtr = emitPointerWithAlignment(resultArg);
1598
1599 // Decide which of the arithmetic operation we are lowering to:
1600 cir::BinOpOverflowKind arithKind;
1601 switch (builtinID) {
1602 default:
1603 llvm_unreachable("Unknown overflow builtin id.");
1604 case Builtin::BI__builtin_uadd_overflow:
1605 case Builtin::BI__builtin_uaddl_overflow:
1606 case Builtin::BI__builtin_uaddll_overflow:
1607 case Builtin::BI__builtin_sadd_overflow:
1608 case Builtin::BI__builtin_saddl_overflow:
1609 case Builtin::BI__builtin_saddll_overflow:
1610 arithKind = cir::BinOpOverflowKind::Add;
1611 break;
1612 case Builtin::BI__builtin_usub_overflow:
1613 case Builtin::BI__builtin_usubl_overflow:
1614 case Builtin::BI__builtin_usubll_overflow:
1615 case Builtin::BI__builtin_ssub_overflow:
1616 case Builtin::BI__builtin_ssubl_overflow:
1617 case Builtin::BI__builtin_ssubll_overflow:
1618 arithKind = cir::BinOpOverflowKind::Sub;
1619 break;
1620 case Builtin::BI__builtin_umul_overflow:
1621 case Builtin::BI__builtin_umull_overflow:
1622 case Builtin::BI__builtin_umulll_overflow:
1623 case Builtin::BI__builtin_smul_overflow:
1624 case Builtin::BI__builtin_smull_overflow:
1625 case Builtin::BI__builtin_smulll_overflow:
1626 arithKind = cir::BinOpOverflowKind::Mul;
1627 break;
1628 }
1629
1630 clang::QualType resultQTy =
1631 resultArg->getType()->castAs<clang::PointerType>()->getPointeeType();
1632 auto resultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(resultQTy));
1633
1634 mlir::Location loc = getLoc(e->getSourceRange());
1635 cir::BinOpOverflowOp arithOp = cir::BinOpOverflowOp::create(
1636 builder, loc, resultCIRTy, arithKind, x, y);
1637
1638 bool isVolatile =
1639 resultArg->getType()->getPointeeType().isVolatileQualified();
1640 builder.createStore(loc, emitToMemory(arithOp.getResult(), resultQTy),
1641 resultPtr, isVolatile);
1642
1643 return RValue::get(arithOp.getOverflow());
1644 }
1645
1646 case Builtin::BIaddressof:
1647 case Builtin::BI__addressof:
1648 case Builtin::BI__builtin_addressof:
1649 case Builtin::BI__builtin_function_start:
1650 return errorBuiltinNYI(*this, e, builtinID);
1651 case Builtin::BI__builtin_operator_new:
1653 e->getCallee()->getType()->castAs<FunctionProtoType>(), e, OO_New);
1654 case Builtin::BI__builtin_operator_delete:
1656 e->getCallee()->getType()->castAs<FunctionProtoType>(), e, OO_Delete);
1657 return RValue::get(nullptr);
1658 case Builtin::BI__builtin_is_aligned:
1659 case Builtin::BI__builtin_align_up:
1660 case Builtin::BI__builtin_align_down:
1661 case Builtin::BI__noop:
1662 case Builtin::BI__builtin_call_with_static_chain:
1663 case Builtin::BI_InterlockedExchange8:
1664 case Builtin::BI_InterlockedExchange16:
1665 case Builtin::BI_InterlockedExchange:
1666 case Builtin::BI_InterlockedExchangePointer:
1667 case Builtin::BI_InterlockedCompareExchangePointer:
1668 case Builtin::BI_InterlockedCompareExchangePointer_nf:
1669 case Builtin::BI_InterlockedCompareExchange8:
1670 case Builtin::BI_InterlockedCompareExchange16:
1671 case Builtin::BI_InterlockedCompareExchange:
1672 case Builtin::BI_InterlockedCompareExchange64:
1673 case Builtin::BI_InterlockedIncrement16:
1674 case Builtin::BI_InterlockedIncrement:
1675 case Builtin::BI_InterlockedDecrement16:
1676 case Builtin::BI_InterlockedDecrement:
1677 case Builtin::BI_InterlockedAnd8:
1678 case Builtin::BI_InterlockedAnd16:
1679 case Builtin::BI_InterlockedAnd:
1680 case Builtin::BI_InterlockedExchangeAdd8:
1681 case Builtin::BI_InterlockedExchangeAdd16:
1682 case Builtin::BI_InterlockedExchangeAdd:
1683 case Builtin::BI_InterlockedExchangeSub8:
1684 case Builtin::BI_InterlockedExchangeSub16:
1685 case Builtin::BI_InterlockedExchangeSub:
1686 case Builtin::BI_InterlockedOr8:
1687 case Builtin::BI_InterlockedOr16:
1688 case Builtin::BI_InterlockedOr:
1689 case Builtin::BI_InterlockedXor8:
1690 case Builtin::BI_InterlockedXor16:
1691 case Builtin::BI_InterlockedXor:
1692 case Builtin::BI_bittest64:
1693 case Builtin::BI_bittest:
1694 case Builtin::BI_bittestandcomplement64:
1695 case Builtin::BI_bittestandcomplement:
1696 case Builtin::BI_bittestandreset64:
1697 case Builtin::BI_bittestandreset:
1698 case Builtin::BI_bittestandset64:
1699 case Builtin::BI_bittestandset:
1700 case Builtin::BI_interlockedbittestandreset:
1701 case Builtin::BI_interlockedbittestandreset64:
1702 case Builtin::BI_interlockedbittestandreset64_acq:
1703 case Builtin::BI_interlockedbittestandreset64_rel:
1704 case Builtin::BI_interlockedbittestandreset64_nf:
1705 case Builtin::BI_interlockedbittestandset64:
1706 case Builtin::BI_interlockedbittestandset64_acq:
1707 case Builtin::BI_interlockedbittestandset64_rel:
1708 case Builtin::BI_interlockedbittestandset64_nf:
1709 case Builtin::BI_interlockedbittestandset:
1710 case Builtin::BI_interlockedbittestandset_acq:
1711 case Builtin::BI_interlockedbittestandset_rel:
1712 case Builtin::BI_interlockedbittestandset_nf:
1713 case Builtin::BI_interlockedbittestandreset_acq:
1714 case Builtin::BI_interlockedbittestandreset_rel:
1715 case Builtin::BI_interlockedbittestandreset_nf:
1716 case Builtin::BI__iso_volatile_load8:
1717 case Builtin::BI__iso_volatile_load16:
1718 case Builtin::BI__iso_volatile_load32:
1719 case Builtin::BI__iso_volatile_load64:
1720 case Builtin::BI__iso_volatile_store8:
1721 case Builtin::BI__iso_volatile_store16:
1722 case Builtin::BI__iso_volatile_store32:
1723 case Builtin::BI__iso_volatile_store64:
1724 case Builtin::BI__builtin_ptrauth_sign_constant:
1725 case Builtin::BI__builtin_ptrauth_auth:
1726 case Builtin::BI__builtin_ptrauth_auth_and_resign:
1727 case Builtin::BI__builtin_ptrauth_blend_discriminator:
1728 case Builtin::BI__builtin_ptrauth_sign_generic_data:
1729 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
1730 case Builtin::BI__builtin_ptrauth_strip:
1731 case Builtin::BI__builtin_get_vtable_pointer:
1732 case Builtin::BI__exception_code:
1733 case Builtin::BI_exception_code:
1734 case Builtin::BI__exception_info:
1735 case Builtin::BI_exception_info:
1736 case Builtin::BI__abnormal_termination:
1737 case Builtin::BI_abnormal_termination:
1738 case Builtin::BI_setjmpex:
1739 case Builtin::BI_setjmp:
1740 case Builtin::BImove:
1741 case Builtin::BImove_if_noexcept:
1742 case Builtin::BIforward:
1743 case Builtin::BIforward_like:
1744 case Builtin::BIas_const:
1745 case Builtin::BI__GetExceptionInfo:
1746 case Builtin::BI__fastfail:
1747 case Builtin::BIread_pipe:
1748 case Builtin::BIwrite_pipe:
1749 case Builtin::BIreserve_read_pipe:
1750 case Builtin::BIreserve_write_pipe:
1751 case Builtin::BIwork_group_reserve_read_pipe:
1752 case Builtin::BIwork_group_reserve_write_pipe:
1753 case Builtin::BIsub_group_reserve_read_pipe:
1754 case Builtin::BIsub_group_reserve_write_pipe:
1755 case Builtin::BIcommit_read_pipe:
1756 case Builtin::BIcommit_write_pipe:
1757 case Builtin::BIwork_group_commit_read_pipe:
1758 case Builtin::BIwork_group_commit_write_pipe:
1759 case Builtin::BIsub_group_commit_read_pipe:
1760 case Builtin::BIsub_group_commit_write_pipe:
1761 case Builtin::BIget_pipe_num_packets:
1762 case Builtin::BIget_pipe_max_packets:
1763 case Builtin::BIto_global:
1764 case Builtin::BIto_local:
1765 case Builtin::BIto_private:
1766 case Builtin::BIenqueue_kernel:
1767 case Builtin::BIget_kernel_work_group_size:
1768 case Builtin::BIget_kernel_preferred_work_group_size_multiple:
1769 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
1770 case Builtin::BIget_kernel_sub_group_count_for_ndrange:
1771 case Builtin::BI__builtin_store_half:
1772 case Builtin::BI__builtin_store_halff:
1773 case Builtin::BI__builtin_load_half:
1774 case Builtin::BI__builtin_load_halff:
1775 return errorBuiltinNYI(*this, e, builtinID);
1776 case Builtin::BI__builtin_printf:
1777 case Builtin::BIprintf:
1778 break;
1779 case Builtin::BI__builtin_canonicalize:
1780 case Builtin::BI__builtin_canonicalizef:
1781 case Builtin::BI__builtin_canonicalizef16:
1782 case Builtin::BI__builtin_canonicalizel:
1783 case Builtin::BI__builtin_thread_pointer:
1784 case Builtin::BI__builtin_os_log_format:
1785 case Builtin::BI__xray_customevent:
1786 case Builtin::BI__xray_typedevent:
1787 case Builtin::BI__builtin_ms_va_start:
1788 case Builtin::BI__builtin_ms_va_end:
1789 case Builtin::BI__builtin_ms_va_copy:
1790 case Builtin::BI__builtin_get_device_side_mangled_name:
1791 return errorBuiltinNYI(*this, e, builtinID);
1792 }
1793
1794 // If this is an alias for a lib function (e.g. __builtin_sin), emit
1795 // the call using the normal call path, but using the unmangled
1796 // version of the function name.
1797 if (getContext().BuiltinInfo.isLibFunction(builtinID))
1798 return emitLibraryCall(*this, fd, e,
1799 cgm.getBuiltinLibFunction(fd, builtinID));
1800
1801 // Some target-specific builtins can have aggregate return values, e.g.
1802 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
1803 // returnValue to be non-null, so that the target-specific emission code can
1804 // always just emit into it.
1806 if (evalKind == cir::TEK_Aggregate && returnValue.isNull()) {
1807 cgm.errorNYI(e->getSourceRange(), "aggregate return value from builtin");
1808 return getUndefRValue(e->getType());
1809 }
1810
1811 // Now see if we can emit a target-specific builtin.
1812 // FIXME: This is a temporary mechanism (double-optional semantics) that will
1813 // go away once everything is implemented:
1814 // 1. return `mlir::Value{}` for cases where we have issued the diagnostic.
1815 // 2. return `std::nullopt` in cases where we didn't issue a diagnostic
1816 // but also didn't handle the builtin.
1817 if (std::optional<mlir::Value> rst =
1818 emitTargetBuiltinExpr(builtinID, e, returnValue)) {
1819 mlir::Value v = rst.value();
1820 // CIR dialect operations may have no results, no values will be returned
1821 // even if it executes successfully.
1822 if (!v)
1823 return RValue::get(nullptr);
1824
1825 switch (evalKind) {
1826 case cir::TEK_Scalar:
1827 if (mlir::isa<cir::VoidType>(v.getType()))
1828 return RValue::get(nullptr);
1829 return RValue::get(v);
1830 case cir::TEK_Aggregate:
1831 cgm.errorNYI(e->getSourceRange(), "aggregate return value from builtin");
1832 return getUndefRValue(e->getType());
1833 case cir::TEK_Complex:
1834 llvm_unreachable("No current target builtin returns complex");
1835 }
1836 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
1837 }
1838
1839 cgm.errorNYI(e->getSourceRange(),
1840 std::string("unimplemented builtin call: ") +
1841 getContext().BuiltinInfo.getName(builtinID));
1842 return getUndefRValue(e->getType());
1843}
1844
1845static std::optional<mlir::Value>
1847 const CallExpr *e, ReturnValueSlot &returnValue,
1848 llvm::Triple::ArchType arch) {
1849 // When compiling in HipStdPar mode we have to be conservative in rejecting
1850 // target specific features in the FE, and defer the possible error to the
1851 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
1852 // referenced by an accelerator executable function, we emit an error.
1853 // Returning nullptr here leads to the builtin being handled in
1854 // EmitStdParUnsupportedBuiltin.
1855 if (cgf->getLangOpts().HIPStdPar && cgf->getLangOpts().CUDAIsDevice &&
1856 arch != cgf->getTarget().getTriple().getArch())
1857 return std::nullopt;
1858
1859 switch (arch) {
1860 case llvm::Triple::arm:
1861 case llvm::Triple::armeb:
1862 case llvm::Triple::thumb:
1863 case llvm::Triple::thumbeb:
1864 // These are actually NYI, but that will be reported by emitBuiltinExpr.
1865 // At this point, we don't even know that the builtin is target-specific.
1866 return std::nullopt;
1867 case llvm::Triple::aarch64:
1868 case llvm::Triple::aarch64_32:
1869 case llvm::Triple::aarch64_be:
1870 return cgf->emitAArch64BuiltinExpr(builtinID, e, returnValue, arch);
1871 case llvm::Triple::bpfeb:
1872 case llvm::Triple::bpfel:
1873 // These are actually NYI, but that will be reported by emitBuiltinExpr.
1874 // At this point, we don't even know that the builtin is target-specific.
1875 return std::nullopt;
1876
1877 case llvm::Triple::x86:
1878 case llvm::Triple::x86_64:
1879 return cgf->emitX86BuiltinExpr(builtinID, e);
1880
1881 case llvm::Triple::ppc:
1882 case llvm::Triple::ppcle:
1883 case llvm::Triple::ppc64:
1884 case llvm::Triple::ppc64le:
1885 case llvm::Triple::r600:
1886 case llvm::Triple::amdgcn:
1887 case llvm::Triple::systemz:
1888 case llvm::Triple::nvptx:
1889 case llvm::Triple::nvptx64:
1890 case llvm::Triple::wasm32:
1891 case llvm::Triple::wasm64:
1892 case llvm::Triple::hexagon:
1893 case llvm::Triple::riscv32:
1894 case llvm::Triple::riscv64:
1895 // These are actually NYI, but that will be reported by emitBuiltinExpr.
1896 // At this point, we don't even know that the builtin is target-specific.
1897 return std::nullopt;
1898 default:
1899 return std::nullopt;
1900 }
1901}
1902
1903std::optional<mlir::Value>
1906 if (getContext().BuiltinInfo.isAuxBuiltinID(builtinID)) {
1907 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
1909 this, getContext().BuiltinInfo.getAuxBuiltinID(builtinID), e,
1910 returnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
1911 }
1912
1913 return emitTargetArchBuiltinExpr(this, builtinID, e, returnValue,
1914 getTarget().getTriple().getArch());
1915}
1916
1918 const unsigned iceArguments, const unsigned idx, const Expr *argExpr) {
1919 mlir::Value arg = {};
1920 if ((iceArguments & (1 << idx)) == 0) {
1921 arg = emitScalarExpr(argExpr);
1922 } else {
1923 // If this is required to be a constant, constant fold it so that we
1924 // know that the generated intrinsic gets a ConstantInt.
1925 const std::optional<llvm::APSInt> result =
1927 assert(result && "Expected argument to be a constant");
1928 arg = builder.getConstInt(getLoc(argExpr->getSourceRange()), *result);
1929 }
1930 return arg;
1931}
1932
1933/// Given a builtin id for a function like "__builtin_fabsf", return a Function*
1934/// for "fabsf".
1936 unsigned builtinID) {
1937 assert(astContext.BuiltinInfo.isLibFunction(builtinID));
1938
1939 // Get the name, skip over the __builtin_ prefix (if necessary). We may have
1940 // to build this up so provide a small stack buffer to handle the vast
1941 // majority of names.
1943
1945 name = astContext.BuiltinInfo.getName(builtinID).substr(10);
1946
1947 GlobalDecl d(fd);
1948 mlir::Type type = convertType(fd->getType());
1949 return getOrCreateCIRFunction(name, type, d, /*forVTable=*/false);
1950}
1951
1953 mlir::Value argValue = evaluateExprAsBool(e);
1954 if (!sanOpts.has(SanitizerKind::Builtin))
1955 return argValue;
1956
1958 cgm.errorNYI(e->getSourceRange(),
1959 "emitCheckedArgForAssume: sanitizers are NYI");
1960 return {};
1961}
1962
1963void CIRGenFunction::emitVAStart(mlir::Value vaList, mlir::Value count) {
1964 // LLVM codegen casts to *i8, no real gain on doing this for CIRGen this
1965 // early, defer to LLVM lowering.
1966 cir::VAStartOp::create(builder, vaList.getLoc(), vaList, count);
1967}
1968
1969void CIRGenFunction::emitVAEnd(mlir::Value vaList) {
1970 cir::VAEndOp::create(builder, vaList.getLoc(), vaList);
1971}
1972
1973// FIXME(cir): This completely abstracts away the ABI with a generic CIR Op. By
1974// default this lowers to llvm.va_arg which is incomplete and not ABI-compliant
1975// on most targets so cir.va_arg will need some ABI handling in LoweringPrepare
1977 assert(!cir::MissingFeatures::msabi());
1978 assert(!cir::MissingFeatures::vlas());
1979 mlir::Location loc = cgm.getLoc(ve->getExprLoc());
1980 mlir::Type type = convertType(ve->getType());
1981 mlir::Value vaList = emitVAListRef(ve->getSubExpr()).getPointer();
1982 return cir::VAArgOp::create(builder, loc, type, vaList);
1983}
1984
1985mlir::Value CIRGenFunction::emitBuiltinObjectSize(const Expr *e, unsigned type,
1986 cir::IntType resType,
1987 mlir::Value emittedE,
1988 bool isDynamic) {
1990
1991 // LLVM can't handle type=3 appropriately, and __builtin_object_size shouldn't
1992 // evaluate e for side-effects. In either case, just like original LLVM
1993 // lowering, we shouldn't lower to `cir.objsize` but to a constant instead.
1994 if (type == 3 || (!emittedE && e->HasSideEffects(getContext())))
1995 return builder.getConstInt(getLoc(e->getSourceRange()), resType,
1996 (type & 2) ? 0 : -1);
1997
1998 mlir::Value ptr = emittedE ? emittedE : emitScalarExpr(e);
1999 assert(mlir::isa<cir::PointerType>(ptr.getType()) &&
2000 "Non-pointer passed to __builtin_object_size?");
2001
2003
2004 // Extract the min/max mode from type. CIR only supports type 0
2005 // (max, whole object) and type 2 (min, whole object), not type 1 or 3
2006 // (closest subobject variants).
2007 const bool min = ((type & 2) != 0);
2008 // For GCC compatibility, __builtin_object_size treats NULL as unknown size.
2009 auto op =
2010 cir::ObjSizeOp::create(builder, getLoc(e->getSourceRange()), resType, ptr,
2011 min, /*nullUnknown=*/true, isDynamic);
2012 return op.getResult();
2013}
2014
2016 const Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE,
2017 bool isDynamic) {
2018 uint64_t objectSize;
2019 if (!e->tryEvaluateObjectSize(objectSize, getContext(), type))
2020 return emitBuiltinObjectSize(e, type, resType, emittedE, isDynamic);
2021 return builder.getConstInt(getLoc(e->getSourceRange()), resType, objectSize);
2022}
static StringRef bytes(const std::vector< T, Allocator > &v)
Defines enum values for all the target-independent builtin functions.
static RValue emitUnaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static mlir::Value makeBinaryAtomicValue(CIRGenFunction &cgf, cir::AtomicFetchKind kind, const CallExpr *expr, mlir::Type *originalArgType, mlir::Value *emittedArgValue=nullptr, cir::MemOrder ordering=cir::MemOrder::SequentiallyConsistent)
Utility to insert an atomic instruction based on Intrinsic::ID and the expression node.
static std::optional< mlir::Value > emitTargetArchBuiltinExpr(CIRGenFunction *cgf, unsigned builtinID, const CallExpr *e, ReturnValueSlot &returnValue, llvm::Triple::ArchType arch)
static RValue emitUnaryFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static mlir::Value emitToInt(CIRGenFunction &cgf, mlir::Value v, QualType t, cir::IntType intType)
Emit the conversions required to turn the given value into an integer of the given size.
static RValue emitLibraryCall(CIRGenFunction &cgf, const FunctionDecl *fd, const CallExpr *e, mlir::Operation *calleeValue)
static WidthAndSignedness getIntegerWidthAndSignedness(const clang::ASTContext &astContext, const clang::QualType type)
static RValue emitBuiltinBitOp(CIRGenFunction &cgf, const CallExpr *e, bool poisonZero=false)
static void emitAtomicFenceOp(CIRGenFunction &cgf, const CallExpr *expr, cir::SyncScopeKind syncScope)
static Address checkAtomicAlignment(CIRGenFunction &cgf, const CallExpr *e)
static bool shouldCIREmitFPMathIntrinsic(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static RValue tryEmitFPMathIntrinsic(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static struct WidthAndSignedness EncompassingIntegerType(ArrayRef< struct WidthAndSignedness > types)
static RValue emitBinaryAtomicPost(CIRGenFunction &cgf, cir::AtomicFetchKind atomicOpkind, const CallExpr *e, cir::BinOpKind binopKind, bool invert=false)
static RValue emitBuiltinAlloca(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static RValue errorBuiltinNYI(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static mlir::Value emitFromInt(CIRGenFunction &cgf, mlir::Value v, QualType t, mlir::Type resultType)
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
Defines an enumeration for C++ overloaded operators.
static QualType getPointeeType(const MemRegion *R)
__DEVICE__ int min(int __a, int __b)
mlir::Value createIntToPtr(mlir::Value src, mlir::Type newTy)
mlir::Value createPtrToInt(mlir::Value src, mlir::Type newTy)
cir::PointerType getPointerTo(mlir::Type ty)
mlir::Value createIntCast(mlir::Value src, mlir::Type newTy)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
cir::PointerType getVoidPtrTy(clang::LangAS langAS=clang::LangAS::Default)
mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, mlir::Type type, llvm::StringRef name, mlir::IntegerAttr alignment, mlir::Value dynAllocSize)
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
APSInt & getInt()
Definition APValue.h:489
bool isFloat() const
Definition APValue.h:468
bool isInt() const
Definition APValue.h:467
APFloat & getFloat()
Definition APValue.h:503
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
unsigned getIntWidth(QualType T) const
CanQualType VoidPtrTy
Builtin::Context & BuiltinInfo
Definition ASTContext.h:792
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:909
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
bool shouldGenerateFPMathIntrinsic(unsigned BuiltinID, llvm::Triple Trip, std::optional< bool > ErrnoOverwritten, bool MathErrnoEnabled, bool HasOptNoneAttr, bool IsOptimizationEnabled) const
Determine whether we can generate LLVM intrinsics for the given builtin ID, based on whether it has s...
Definition Builtins.cpp:225
bool isLibFunction(unsigned ID) const
Return true if this is a builtin for a libc/libm function, with a "__builtin_" prefix (e....
Definition Builtins.h:309
std::string getName(unsigned ID) const
Return the identifier name for the specified builtin, e.g.
Definition Builtins.cpp:80
mlir::Value getPointer() const
Definition Address.h:95
mlir::Type getElementType() const
Definition Address.h:122
clang::CharUnits getAlignment() const
Definition Address.h:135
Address withAlignment(clang::CharUnits newAlignment) const
Return address with different alignment, but same pointer and element type.
Definition Address.h:86
mlir::Value emitRawPointer() const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:109
cir::IntType getSIntNTy(int n)
cir::PointerType getUInt8PtrTy()
cir::IntType getUIntNTy(int n)
static CIRGenCallee forDirect(mlir::Operation *funcPtr, const CIRGenCalleeInfo &abstractInfo=CIRGenCalleeInfo())
Definition CIRGenCall.h:92
mlir::Type convertType(clang::QualType t)
mlir::Value emitCheckedArgForAssume(const Expr *e)
Emits an argument for a call to a __builtin_assume.
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
const clang::LangOptions & getLangOpts() const
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
const TargetInfo & getTarget() const
const clang::Decl * curFuncDecl
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitVAStart(mlir::Value vaList, mlir::Value count)
Emits the start of a CIR variable-argument operation (cir.va_start)
mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
Returns a Value corresponding to the size of the given expression by emitting a cir....
std::optional< mlir::Value > emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot &returnValue)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
Address returnValue
The temporary alloca to hold the return value.
std::optional< mlir::Value > emitX86BuiltinExpr(unsigned builtinID, const CallExpr *expr)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
std::optional< mlir::Value > emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr, ReturnValueSlot returnValue, llvm::Triple::ArchType arch)
void emitAtomicExprWithMemOrder(const Expr *memOrder, bool isStore, bool isLoad, bool isFence, llvm::function_ref< void(cir::MemOrder)> emitAtomicOp)
void emitVAEnd(mlir::Value vaList)
Emits the end of a CIR variable-argument operation (cir.va_start)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
RValue emitNewOrDeleteBuiltinCall(const FunctionProtoType *type, const CallExpr *callExpr, OverloadedOperatorKind op)
clang::ASTContext & getContext() const
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx, const Expr *argExpr)
mlir::Value emitVAArg(VAArgExpr *ve)
Generate code to get an argument from the passed in pointer and update it accordingly.
RValue emitRotate(const CallExpr *e, bool isRotateLeft)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
clang::ASTContext & getASTContext() const
mlir::Type convertType(clang::QualType type)
clang::DiagnosticsEngine & getDiags() const
cir::FuncOp getBuiltinLibFunction(const FunctionDecl *fd, unsigned builtinID)
Given a builtin id for a function like "__builtin_fabsf", return a Function* for "fabsf".
const llvm::Triple & getTriple() const
cir::FuncOp getOrCreateCIRFunction(llvm::StringRef mangledName, mlir::Type funcType, clang::GlobalDecl gd, bool forVTable, bool dontDefer=false, bool isThunk=false, ForDefinition_t isForDefinition=NotForDefinition, mlir::ArrayAttr extraAttrs={})
const cir::CIRDataLayout getDataLayout() const
const clang::CodeGenOptions & getCodeGenOpts() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
static RValue getComplex(mlir::Value v)
Definition CIRGenValue.h:91
bool isIgnored() const
Definition CIRGenValue.h:52
static RValue getIgnored()
Definition CIRGenValue.h:78
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:256
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2943
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3147
bool hasStoredFPFeatures() const
Definition Expr.h:3102
SourceLocation getBeginLoc() const
Definition Expr.h:3277
Expr * getCallee()
Definition Expr.h:3090
FPOptionsOverride getFPFeatures() const
Definition Expr.h:3242
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3134
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
FunctionDecl * getAsFunction() LLVM_READONLY
Returns the function itself, or the templated function if this is a function template.
Definition DeclBase.cpp:273
bool hasAttr() const
Definition DeclBase.h:577
Concrete class used by the front-end to report problems and issues.
Definition Diagnostic.h:232
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
bool EvaluateAsFloat(llvm::APFloat &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsFloat - Return true if this is a constant which we can fold and convert to a floating point...
std::optional< llvm::APSInt > getIntegerConstantExpr(const ASTContext &Ctx) const
isIntegerConstantExpr - Return the value if this expression is a valid integer constant expression.
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3669
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
bool tryEvaluateObjectSize(uint64_t &Result, ASTContext &Ctx, unsigned Type) const
If the current Expr is a pointer, this will try to statically determine the number of bytes available...
Represents difference between two FPOptions values.
Represents a function declaration or definition.
Definition Decl.h:2000
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5269
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3329
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8376
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8418
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
Exposes information about the current target.
Definition TargetInfo.h:226
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
unsigned getSuitableAlign() const
Return the alignment that is the largest alignment ever used for any scalar/SIMD data type on the tar...
Definition TargetInfo.h:747
bool isBlockPointerType() const
Definition TypeBase.h:8549
bool isPointerType() const
Definition TypeBase.h:8529
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9178
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
Definition TypeBase.h:9009
bool isObjCObjectPointerType() const
Definition TypeBase.h:8704
bool isFloatingType() const
Definition Type.cpp:2305
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2254
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4957
const Expr * getSubExpr() const
Definition Expr.h:4973
QualType getType() const
Definition Decl.h:723
bool isMatchingAddressSpace(cir::TargetAddressSpaceAttr cirAS, clang::LangAS as)
Definition CIRTypes.cpp:944
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
U cast(CodeGen::Address addr)
Definition Address.h:327
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
static bool builtinCheckKind()
static bool asmLabelAttr()
static bool msvcBuiltins()
static bool cgFPOptionsRAII()
static bool builtinCallF128()
static bool fpConstraints()
static bool countedBySize()
static bool opCallImplicitObjectSizeArgs()
static bool fastMathFlags()
static bool builtinCall()
cir::PointerType allocaInt8PtrTy
void* in alloca address space
cir::TargetAddressSpaceAttr getCIRAllocaAddressSpace() const
cir::PointerType voidPtrTy
void* in address space 0
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool hasSideEffects() const
Return true if the evaluated expression has side effects.
Definition Expr.h:639
#define conj(__x)
Definition tgmath.h:1303