clang 23.0.0git
CIRGenBuiltin.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as CIR or a function call to be
10// later resolved.
11//
12//===----------------------------------------------------------------------===//
13
14#include "CIRGenCall.h"
15#include "CIRGenFunction.h"
16#include "CIRGenModule.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/BuiltinAttributes.h"
19#include "mlir/IR/Value.h"
20#include "mlir/Support/LLVM.h"
21#include "clang/AST/DeclBase.h"
22#include "clang/AST/Expr.h"
29#include "llvm/Support/ErrorHandling.h"
30
31using namespace clang;
32using namespace clang::CIRGen;
33using namespace llvm;
34
36 const CallExpr *e, mlir::Operation *calleeValue) {
37 CIRGenCallee callee = CIRGenCallee::forDirect(calleeValue, GlobalDecl(fd));
38 return cgf.emitCall(e->getCallee()->getType(), callee, e, ReturnValueSlot());
39}
40
41template <typename Op>
43 bool poisonZero = false) {
45
46 mlir::Value arg = cgf.emitScalarExpr(e->getArg(0));
47 CIRGenBuilderTy &builder = cgf.getBuilder();
48
49 Op op;
50 if constexpr (std::is_same_v<Op, cir::BitClzOp> ||
51 std::is_same_v<Op, cir::BitCtzOp>)
52 op = Op::create(builder, cgf.getLoc(e->getSourceRange()), arg, poisonZero);
53 else
54 op = Op::create(builder, cgf.getLoc(e->getSourceRange()), arg);
55
56 mlir::Value result = op.getResult();
57 mlir::Type exprTy = cgf.convertType(e->getType());
58 if (exprTy != result.getType())
59 result = builder.createIntCast(result, exprTy);
60
61 return RValue::get(result);
62}
63
64/// Emit the conversions required to turn the given value into an
65/// integer of the given size.
66static mlir::Value emitToInt(CIRGenFunction &cgf, mlir::Value v, QualType t,
67 cir::IntType intType) {
68 v = cgf.emitToMemory(v, t);
69
70 if (mlir::isa<cir::PointerType>(v.getType()))
71 return cgf.getBuilder().createPtrToInt(v, intType);
72
73 assert(v.getType() == intType);
74 return v;
75}
76
77static mlir::Value emitFromInt(CIRGenFunction &cgf, mlir::Value v, QualType t,
78 mlir::Type resultType) {
79 v = cgf.emitFromMemory(v, t);
80
81 if (mlir::isa<cir::PointerType>(resultType))
82 return cgf.getBuilder().createIntToPtr(v, resultType);
83
84 assert(v.getType() == resultType);
85 return v;
86}
87
89 ASTContext &astContext = cgf.getContext();
91 unsigned bytes =
92 mlir::isa<cir::PointerType>(ptr.getElementType())
93 ? astContext.getTypeSizeInChars(astContext.VoidPtrTy).getQuantity()
96
97 unsigned align = ptr.getAlignment().getQuantity();
98 if (align % bytes != 0) {
99 DiagnosticsEngine &diags = cgf.cgm.getDiags();
100 diags.Report(e->getBeginLoc(), diag::warn_sync_op_misaligned);
101 // Force address to be at least naturally-aligned.
103 }
104 return ptr;
105}
106
107/// Utility to insert an atomic instruction based on Intrinsic::ID
108/// and the expression node.
109static mlir::Value makeBinaryAtomicValue(
110 CIRGenFunction &cgf, cir::AtomicFetchKind kind, const CallExpr *expr,
111 mlir::Type *originalArgType, mlir::Value *emittedArgValue = nullptr,
112 cir::MemOrder ordering = cir::MemOrder::SequentiallyConsistent) {
113
114 QualType type = expr->getType();
115 QualType ptrType = expr->getArg(0)->getType();
116
117 assert(ptrType->isPointerType());
118 assert(
121 expr->getArg(1)->getType()));
122
123 Address destAddr = checkAtomicAlignment(cgf, expr);
124 CIRGenBuilderTy &builder = cgf.getBuilder();
125
126 mlir::Value val = cgf.emitScalarExpr(expr->getArg(1));
127 mlir::Type valueType = val.getType();
128 mlir::Value destValue = destAddr.emitRawPointer();
129
130 if (ptrType->getPointeeType()->isPointerType()) {
131 // Pointer to pointer
132 // `cir.atomic.fetch` expects a pointer to an integer type, so we cast
133 // ptr<ptr<T>> to ptr<intPtrSize>
134 cir::IntType ptrSizeInt =
135 builder.getSIntNTy(cgf.getContext().getTypeSize(ptrType));
136 destValue =
137 builder.createBitcast(destValue, builder.getPointerTo(ptrSizeInt));
138 val = emitToInt(cgf, val, type, ptrSizeInt);
139 } else {
140 // Pointer to integer type
141 cir::IntType intType =
143 ? builder.getUIntNTy(cgf.getContext().getTypeSize(type))
144 : builder.getSIntNTy(cgf.getContext().getTypeSize(type));
145 val = emitToInt(cgf, val, type, intType);
146 }
147
148 // This output argument is needed for post atomic fetch operations
149 // that calculate the result of the operation as return value of
150 // <binop>_and_fetch builtins. The `AtomicFetch` operation only updates the
151 // memory location and returns the old value.
152 if (emittedArgValue) {
153 *emittedArgValue = val;
154 *originalArgType = valueType;
155 }
156
157 auto rmwi = cir::AtomicFetchOp::create(
158 builder, cgf.getLoc(expr->getSourceRange()), destValue, val, kind,
159 ordering, false, /* is volatile */
160 true); /* fetch first */
161 return rmwi->getResult(0);
162}
163
165 cir::AtomicFetchKind atomicOpkind,
166 const CallExpr *e, cir::BinOpKind binopKind,
167 bool invert = false) {
168 mlir::Value emittedArgValue;
169 mlir::Type originalArgType;
170 clang::QualType typ = e->getType();
171 mlir::Value result = makeBinaryAtomicValue(
172 cgf, atomicOpkind, e, &originalArgType, &emittedArgValue);
174 result = cir::BinOp::create(builder, result.getLoc(), binopKind, result,
175 emittedArgValue);
176
177 if (invert)
178 result = cir::UnaryOp::create(builder, result.getLoc(),
179 cir::UnaryOpKind::Not, result);
180
181 result = emitFromInt(cgf, result, typ, originalArgType);
182 return RValue::get(result);
183}
184
186 cir::SyncScopeKind syncScope) {
187 CIRGenBuilderTy &builder = cgf.getBuilder();
188 mlir::Location loc = cgf.getLoc(expr->getSourceRange());
189
190 auto emitAtomicOpCallBackFn = [&](cir::MemOrder memOrder) {
191 cir::AtomicFenceOp::create(
192 builder, loc, memOrder,
193 cir::SyncScopeKindAttr::get(&cgf.getMLIRContext(), syncScope));
194 };
195
196 cgf.emitAtomicExprWithMemOrder(expr->getArg(0), /*isStore*/ false,
197 /*isLoad*/ false, /*isFence*/ true,
198 emitAtomicOpCallBackFn);
199}
200
201namespace {
202struct WidthAndSignedness {
203 unsigned width;
204 bool isSigned;
205};
206} // namespace
207
208static WidthAndSignedness
210 const clang::QualType type) {
211 assert(type->isIntegerType() && "Given type is not an integer.");
212 unsigned width = type->isBooleanType() ? 1
213 : type->isBitIntType() ? astContext.getIntWidth(type)
214 : astContext.getTypeInfo(type).Width;
215 bool isSigned = type->isSignedIntegerType();
216 return {width, isSigned};
217}
218
219// Given one or more integer types, this function produces an integer type that
220// encompasses them: any value in one of the given types could be expressed in
221// the encompassing type.
222static struct WidthAndSignedness
223EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> types) {
224 assert(types.size() > 0 && "Empty list of types.");
225
226 // If any of the given types is signed, we must return a signed type.
227 bool isSigned = llvm::any_of(types, [](const auto &t) { return t.isSigned; });
228
229 // The encompassing type must have a width greater than or equal to the width
230 // of the specified types. Additionally, if the encompassing type is signed,
231 // its width must be strictly greater than the width of any unsigned types
232 // given.
233 unsigned width = 0;
234 for (const auto &type : types)
235 width = std::max(width, type.width + (isSigned && !type.isSigned));
236
237 return {width, isSigned};
238}
239
240RValue CIRGenFunction::emitRotate(const CallExpr *e, bool isRotateLeft) {
241 mlir::Value input = emitScalarExpr(e->getArg(0));
242 mlir::Value amount = emitScalarExpr(e->getArg(1));
243
244 // TODO(cir): MSVC flavor bit rotate builtins use different types for input
245 // and amount, but cir.rotate requires them to have the same type. Cast amount
246 // to the type of input when necessary.
248
249 auto r = cir::RotateOp::create(builder, getLoc(e->getSourceRange()), input,
250 amount, isRotateLeft);
251 return RValue::get(r);
252}
253
254template <class Operation>
256 const CallExpr &e) {
257 mlir::Value arg = cgf.emitScalarExpr(e.getArg(0));
258
261
262 auto call =
263 Operation::create(cgf.getBuilder(), arg.getLoc(), arg.getType(), arg);
264 return RValue::get(call->getResult(0));
265}
266
267template <class Operation>
269 mlir::Value arg = cgf.emitScalarExpr(e.getArg(0));
270 auto call =
271 Operation::create(cgf.getBuilder(), arg.getLoc(), arg.getType(), arg);
272 return RValue::get(call->getResult(0));
273}
274
276 unsigned builtinID) {
277
278 if (cgf.getContext().BuiltinInfo.isLibFunction(builtinID)) {
279 cgf.cgm.errorNYI(
280 e->getSourceRange(),
281 std::string("unimplemented X86 library function builtin call: ") +
282 cgf.getContext().BuiltinInfo.getName(builtinID));
283 } else {
284 cgf.cgm.errorNYI(e->getSourceRange(),
285 std::string("unimplemented X86 builtin call: ") +
286 cgf.getContext().BuiltinInfo.getName(builtinID));
287 }
288
289 return cgf.getUndefRValue(e->getType());
290}
291
293 unsigned builtinID) {
294 assert(builtinID == Builtin::BI__builtin_alloca ||
295 builtinID == Builtin::BI__builtin_alloca_uninitialized ||
296 builtinID == Builtin::BIalloca || builtinID == Builtin::BI_alloca);
297
298 // Get alloca size input
299 mlir::Value size = cgf.emitScalarExpr(e->getArg(0));
300
301 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
302 const TargetInfo &ti = cgf.getContext().getTargetInfo();
303 const CharUnits suitableAlignmentInBytes =
305
306 // Emit the alloca op with type `u8 *` to match the semantics of
307 // `llvm.alloca`. We later bitcast the type to `void *` to match the
308 // semantics of C/C++
309 // FIXME(cir): It may make sense to allow AllocaOp of type `u8` to return a
310 // pointer of type `void *`. This will require a change to the allocaOp
311 // verifier.
312 CIRGenBuilderTy &builder = cgf.getBuilder();
313 mlir::Value allocaAddr = builder.createAlloca(
314 cgf.getLoc(e->getSourceRange()), builder.getUInt8PtrTy(),
315 builder.getUInt8Ty(), "bi_alloca", suitableAlignmentInBytes, size);
316
317 // Initialize the allocated buffer if required.
318 if (builtinID != Builtin::BI__builtin_alloca_uninitialized) {
319 // Initialize the alloca with the given size and alignment according to
320 // the lang opts. Only the trivial non-initialization is supported for
321 // now.
322
323 switch (cgf.getLangOpts().getTrivialAutoVarInit()) {
325 // Nothing to initialize.
326 break;
329 cgf.cgm.errorNYI("trivial auto var init");
330 break;
331 }
332 }
333
334 // An alloca will always return a pointer to the alloca (stack) address
335 // space. This address space need not be the same as the AST / Language
336 // default (e.g. in C / C++ auto vars are in the generic address space). At
337 // the AST level this is handled within CreateTempAlloca et al., but for the
338 // builtin / dynamic alloca we have to handle it here.
339
343 cgf.cgm.errorNYI(e->getSourceRange(),
344 "Non-default address space for alloca");
345 }
346
347 // Bitcast the alloca to the expected type.
348 return RValue::get(builder.createBitcast(
349 allocaAddr, builder.getVoidPtrTy(cgf.getCIRAllocaAddressSpace())));
350}
351
353 unsigned builtinID) {
354 std::optional<bool> errnoOverriden;
355 // ErrnoOverriden is true if math-errno is overriden via the
356 // '#pragma float_control(precise, on)'. This pragma disables fast-math,
357 // which implies math-errno.
358 if (e->hasStoredFPFeatures()) {
360 if (op.hasMathErrnoOverride())
361 errnoOverriden = op.getMathErrnoOverride();
362 }
363 // True if 'attribute__((optnone))' is used. This attribute overrides
364 // fast-math which implies math-errno.
365 bool optNone =
366 cgf.curFuncDecl && cgf.curFuncDecl->hasAttr<OptimizeNoneAttr>();
367 bool isOptimizationEnabled = cgf.cgm.getCodeGenOpts().OptimizationLevel != 0;
368 bool generateFPMathIntrinsics =
370 builtinID, cgf.cgm.getTriple(), errnoOverriden,
371 cgf.getLangOpts().MathErrno, optNone, isOptimizationEnabled);
372 return generateFPMathIntrinsics;
373}
374
376 unsigned builtinID) {
378 switch (builtinID) {
379 case Builtin::BIacos:
380 case Builtin::BIacosf:
381 case Builtin::BIacosl:
382 case Builtin::BI__builtin_acos:
383 case Builtin::BI__builtin_acosf:
384 case Builtin::BI__builtin_acosf16:
385 case Builtin::BI__builtin_acosl:
386 case Builtin::BI__builtin_acosf128:
387 case Builtin::BI__builtin_elementwise_acos:
388 case Builtin::BIasin:
389 case Builtin::BIasinf:
390 case Builtin::BIasinl:
391 case Builtin::BI__builtin_asin:
392 case Builtin::BI__builtin_asinf:
393 case Builtin::BI__builtin_asinf16:
394 case Builtin::BI__builtin_asinl:
395 case Builtin::BI__builtin_asinf128:
396 case Builtin::BI__builtin_elementwise_asin:
397 case Builtin::BIatan:
398 case Builtin::BIatanf:
399 case Builtin::BIatanl:
400 case Builtin::BI__builtin_atan:
401 case Builtin::BI__builtin_atanf:
402 case Builtin::BI__builtin_atanf16:
403 case Builtin::BI__builtin_atanl:
404 case Builtin::BI__builtin_atanf128:
405 case Builtin::BI__builtin_elementwise_atan:
406 case Builtin::BIatan2:
407 case Builtin::BIatan2f:
408 case Builtin::BIatan2l:
409 case Builtin::BI__builtin_atan2:
410 case Builtin::BI__builtin_atan2f:
411 case Builtin::BI__builtin_atan2f16:
412 case Builtin::BI__builtin_atan2l:
413 case Builtin::BI__builtin_atan2f128:
414 case Builtin::BI__builtin_elementwise_atan2:
415 return RValue::getIgnored();
416 case Builtin::BIceil:
417 case Builtin::BIceilf:
418 case Builtin::BIceill:
419 case Builtin::BI__builtin_ceil:
420 case Builtin::BI__builtin_ceilf:
421 case Builtin::BI__builtin_ceilf16:
422 case Builtin::BI__builtin_ceill:
423 case Builtin::BI__builtin_ceilf128:
425 case Builtin::BI__builtin_elementwise_ceil:
426 case Builtin::BIcopysign:
427 case Builtin::BIcopysignf:
428 case Builtin::BIcopysignl:
429 case Builtin::BI__builtin_copysign:
430 case Builtin::BI__builtin_copysignf:
431 case Builtin::BI__builtin_copysignf16:
432 case Builtin::BI__builtin_copysignl:
433 case Builtin::BI__builtin_copysignf128:
434 return RValue::getIgnored();
435 case Builtin::BIcos:
436 case Builtin::BIcosf:
437 case Builtin::BIcosl:
438 case Builtin::BI__builtin_cos:
439 case Builtin::BI__builtin_cosf:
440 case Builtin::BI__builtin_cosf16:
441 case Builtin::BI__builtin_cosl:
442 case Builtin::BI__builtin_cosf128:
444 case Builtin::BI__builtin_elementwise_cos:
445 case Builtin::BIcosh:
446 case Builtin::BIcoshf:
447 case Builtin::BIcoshl:
448 case Builtin::BI__builtin_cosh:
449 case Builtin::BI__builtin_coshf:
450 case Builtin::BI__builtin_coshf16:
451 case Builtin::BI__builtin_coshl:
452 case Builtin::BI__builtin_coshf128:
453 case Builtin::BI__builtin_elementwise_cosh:
454 return RValue::getIgnored();
455 case Builtin::BIexp:
456 case Builtin::BIexpf:
457 case Builtin::BIexpl:
458 case Builtin::BI__builtin_exp:
459 case Builtin::BI__builtin_expf:
460 case Builtin::BI__builtin_expf16:
461 case Builtin::BI__builtin_expl:
462 case Builtin::BI__builtin_expf128:
464 case Builtin::BI__builtin_elementwise_exp:
465 return RValue::getIgnored();
466 case Builtin::BIexp2:
467 case Builtin::BIexp2f:
468 case Builtin::BIexp2l:
469 case Builtin::BI__builtin_exp2:
470 case Builtin::BI__builtin_exp2f:
471 case Builtin::BI__builtin_exp2f16:
472 case Builtin::BI__builtin_exp2l:
473 case Builtin::BI__builtin_exp2f128:
475 case Builtin::BI__builtin_elementwise_exp2:
476 case Builtin::BI__builtin_exp10:
477 case Builtin::BI__builtin_exp10f:
478 case Builtin::BI__builtin_exp10f16:
479 case Builtin::BI__builtin_exp10l:
480 case Builtin::BI__builtin_exp10f128:
481 case Builtin::BI__builtin_elementwise_exp10:
482 return RValue::getIgnored();
483 case Builtin::BIfabs:
484 case Builtin::BIfabsf:
485 case Builtin::BIfabsl:
486 case Builtin::BI__builtin_fabs:
487 case Builtin::BI__builtin_fabsf:
488 case Builtin::BI__builtin_fabsf16:
489 case Builtin::BI__builtin_fabsl:
490 case Builtin::BI__builtin_fabsf128:
492 case Builtin::BIfloor:
493 case Builtin::BIfloorf:
494 case Builtin::BIfloorl:
495 case Builtin::BI__builtin_floor:
496 case Builtin::BI__builtin_floorf:
497 case Builtin::BI__builtin_floorf16:
498 case Builtin::BI__builtin_floorl:
499 case Builtin::BI__builtin_floorf128:
501 case Builtin::BI__builtin_elementwise_floor:
502 case Builtin::BIfma:
503 case Builtin::BIfmaf:
504 case Builtin::BIfmal:
505 case Builtin::BI__builtin_fma:
506 case Builtin::BI__builtin_fmaf:
507 case Builtin::BI__builtin_fmaf16:
508 case Builtin::BI__builtin_fmal:
509 case Builtin::BI__builtin_fmaf128:
510 case Builtin::BI__builtin_elementwise_fma:
511 case Builtin::BIfmax:
512 case Builtin::BIfmaxf:
513 case Builtin::BIfmaxl:
514 case Builtin::BI__builtin_fmax:
515 case Builtin::BI__builtin_fmaxf:
516 case Builtin::BI__builtin_fmaxf16:
517 case Builtin::BI__builtin_fmaxl:
518 case Builtin::BI__builtin_fmaxf128:
519 case Builtin::BIfmin:
520 case Builtin::BIfminf:
521 case Builtin::BIfminl:
522 case Builtin::BI__builtin_fmin:
523 case Builtin::BI__builtin_fminf:
524 case Builtin::BI__builtin_fminf16:
525 case Builtin::BI__builtin_fminl:
526 case Builtin::BI__builtin_fminf128:
527 case Builtin::BIfmaximum_num:
528 case Builtin::BIfmaximum_numf:
529 case Builtin::BIfmaximum_numl:
530 case Builtin::BI__builtin_fmaximum_num:
531 case Builtin::BI__builtin_fmaximum_numf:
532 case Builtin::BI__builtin_fmaximum_numf16:
533 case Builtin::BI__builtin_fmaximum_numl:
534 case Builtin::BI__builtin_fmaximum_numf128:
535 case Builtin::BIfminimum_num:
536 case Builtin::BIfminimum_numf:
537 case Builtin::BIfminimum_numl:
538 case Builtin::BI__builtin_fminimum_num:
539 case Builtin::BI__builtin_fminimum_numf:
540 case Builtin::BI__builtin_fminimum_numf16:
541 case Builtin::BI__builtin_fminimum_numl:
542 case Builtin::BI__builtin_fminimum_numf128:
543 case Builtin::BIfmod:
544 case Builtin::BIfmodf:
545 case Builtin::BIfmodl:
546 case Builtin::BI__builtin_fmod:
547 case Builtin::BI__builtin_fmodf:
548 case Builtin::BI__builtin_fmodf16:
549 case Builtin::BI__builtin_fmodl:
550 case Builtin::BI__builtin_fmodf128:
551 case Builtin::BI__builtin_elementwise_fmod:
552 case Builtin::BIlog:
553 case Builtin::BIlogf:
554 case Builtin::BIlogl:
555 case Builtin::BI__builtin_log:
556 case Builtin::BI__builtin_logf:
557 case Builtin::BI__builtin_logf16:
558 case Builtin::BI__builtin_logl:
559 case Builtin::BI__builtin_logf128:
560 case Builtin::BI__builtin_elementwise_log:
561 case Builtin::BIlog10:
562 case Builtin::BIlog10f:
563 case Builtin::BIlog10l:
564 case Builtin::BI__builtin_log10:
565 case Builtin::BI__builtin_log10f:
566 case Builtin::BI__builtin_log10f16:
567 case Builtin::BI__builtin_log10l:
568 case Builtin::BI__builtin_log10f128:
569 case Builtin::BI__builtin_elementwise_log10:
570 case Builtin::BIlog2:
571 case Builtin::BIlog2f:
572 case Builtin::BIlog2l:
573 case Builtin::BI__builtin_log2:
574 case Builtin::BI__builtin_log2f:
575 case Builtin::BI__builtin_log2f16:
576 case Builtin::BI__builtin_log2l:
577 case Builtin::BI__builtin_log2f128:
578 case Builtin::BI__builtin_elementwise_log2:
579 case Builtin::BInearbyint:
580 case Builtin::BInearbyintf:
581 case Builtin::BInearbyintl:
582 case Builtin::BI__builtin_nearbyint:
583 case Builtin::BI__builtin_nearbyintf:
584 case Builtin::BI__builtin_nearbyintl:
585 case Builtin::BI__builtin_nearbyintf128:
586 case Builtin::BI__builtin_elementwise_nearbyint:
587 case Builtin::BIpow:
588 case Builtin::BIpowf:
589 case Builtin::BIpowl:
590 case Builtin::BI__builtin_pow:
591 case Builtin::BI__builtin_powf:
592 case Builtin::BI__builtin_powf16:
593 case Builtin::BI__builtin_powl:
594 case Builtin::BI__builtin_powf128:
595 case Builtin::BI__builtin_elementwise_pow:
596 case Builtin::BIrint:
597 case Builtin::BIrintf:
598 case Builtin::BIrintl:
599 case Builtin::BI__builtin_rint:
600 case Builtin::BI__builtin_rintf:
601 case Builtin::BI__builtin_rintf16:
602 case Builtin::BI__builtin_rintl:
603 case Builtin::BI__builtin_rintf128:
604 case Builtin::BI__builtin_elementwise_rint:
605 case Builtin::BIround:
606 case Builtin::BIroundf:
607 case Builtin::BIroundl:
608 case Builtin::BI__builtin_round:
609 case Builtin::BI__builtin_roundf:
610 case Builtin::BI__builtin_roundf16:
611 case Builtin::BI__builtin_roundl:
612 case Builtin::BI__builtin_roundf128:
613 case Builtin::BI__builtin_elementwise_round:
614 case Builtin::BIroundeven:
615 case Builtin::BIroundevenf:
616 case Builtin::BIroundevenl:
617 case Builtin::BI__builtin_roundeven:
618 case Builtin::BI__builtin_roundevenf:
619 case Builtin::BI__builtin_roundevenf16:
620 case Builtin::BI__builtin_roundevenl:
621 case Builtin::BI__builtin_roundevenf128:
622 case Builtin::BI__builtin_elementwise_roundeven:
623 case Builtin::BIsin:
624 case Builtin::BIsinf:
625 case Builtin::BIsinl:
626 case Builtin::BI__builtin_sin:
627 case Builtin::BI__builtin_sinf:
628 case Builtin::BI__builtin_sinf16:
629 case Builtin::BI__builtin_sinl:
630 case Builtin::BI__builtin_sinf128:
631 case Builtin::BI__builtin_elementwise_sin:
632 case Builtin::BIsinh:
633 case Builtin::BIsinhf:
634 case Builtin::BIsinhl:
635 case Builtin::BI__builtin_sinh:
636 case Builtin::BI__builtin_sinhf:
637 case Builtin::BI__builtin_sinhf16:
638 case Builtin::BI__builtin_sinhl:
639 case Builtin::BI__builtin_sinhf128:
640 case Builtin::BI__builtin_elementwise_sinh:
641 case Builtin::BI__builtin_sincospi:
642 case Builtin::BI__builtin_sincospif:
643 case Builtin::BI__builtin_sincospil:
644 case Builtin::BIsincos:
645 case Builtin::BIsincosf:
646 case Builtin::BIsincosl:
647 case Builtin::BI__builtin_sincos:
648 case Builtin::BI__builtin_sincosf:
649 case Builtin::BI__builtin_sincosf16:
650 case Builtin::BI__builtin_sincosl:
651 case Builtin::BI__builtin_sincosf128:
652 case Builtin::BIsqrt:
653 case Builtin::BIsqrtf:
654 case Builtin::BIsqrtl:
655 case Builtin::BI__builtin_sqrt:
656 case Builtin::BI__builtin_sqrtf:
657 case Builtin::BI__builtin_sqrtf16:
658 case Builtin::BI__builtin_sqrtl:
659 case Builtin::BI__builtin_sqrtf128:
660 case Builtin::BI__builtin_elementwise_sqrt:
661 case Builtin::BItan:
662 case Builtin::BItanf:
663 case Builtin::BItanl:
664 case Builtin::BI__builtin_tan:
665 case Builtin::BI__builtin_tanf:
666 case Builtin::BI__builtin_tanf16:
667 case Builtin::BI__builtin_tanl:
668 case Builtin::BI__builtin_tanf128:
669 case Builtin::BI__builtin_elementwise_tan:
670 case Builtin::BItanh:
671 case Builtin::BItanhf:
672 case Builtin::BItanhl:
673 case Builtin::BI__builtin_tanh:
674 case Builtin::BI__builtin_tanhf:
675 case Builtin::BI__builtin_tanhf16:
676 case Builtin::BI__builtin_tanhl:
677 case Builtin::BI__builtin_tanhf128:
678 case Builtin::BI__builtin_elementwise_tanh:
679 case Builtin::BItrunc:
680 case Builtin::BItruncf:
681 case Builtin::BItruncl:
682 case Builtin::BI__builtin_trunc:
683 case Builtin::BI__builtin_truncf:
684 case Builtin::BI__builtin_truncf16:
685 case Builtin::BI__builtin_truncl:
686 case Builtin::BI__builtin_truncf128:
687 case Builtin::BI__builtin_elementwise_trunc:
688 case Builtin::BIlround:
689 case Builtin::BIlroundf:
690 case Builtin::BIlroundl:
691 case Builtin::BI__builtin_lround:
692 case Builtin::BI__builtin_lroundf:
693 case Builtin::BI__builtin_lroundl:
694 case Builtin::BI__builtin_lroundf128:
695 case Builtin::BIllround:
696 case Builtin::BIllroundf:
697 case Builtin::BIllroundl:
698 case Builtin::BI__builtin_llround:
699 case Builtin::BI__builtin_llroundf:
700 case Builtin::BI__builtin_llroundl:
701 case Builtin::BI__builtin_llroundf128:
702 case Builtin::BIlrint:
703 case Builtin::BIlrintf:
704 case Builtin::BIlrintl:
705 case Builtin::BI__builtin_lrint:
706 case Builtin::BI__builtin_lrintf:
707 case Builtin::BI__builtin_lrintl:
708 case Builtin::BI__builtin_lrintf128:
709 case Builtin::BIllrint:
710 case Builtin::BIllrintf:
711 case Builtin::BIllrintl:
712 case Builtin::BI__builtin_llrint:
713 case Builtin::BI__builtin_llrintf:
714 case Builtin::BI__builtin_llrintl:
715 case Builtin::BI__builtin_llrintf128:
716 case Builtin::BI__builtin_ldexp:
717 case Builtin::BI__builtin_ldexpf:
718 case Builtin::BI__builtin_ldexpl:
719 case Builtin::BI__builtin_ldexpf16:
720 case Builtin::BI__builtin_ldexpf128:
721 case Builtin::BI__builtin_elementwise_ldexp:
722 default:
723 break;
724 }
725
726 return RValue::getIgnored();
727}
728
730 const CallExpr *e,
732 mlir::Location loc = getLoc(e->getSourceRange());
733
734 // See if we can constant fold this builtin. If so, don't emit it at all.
735 // TODO: Extend this handling to all builtin calls that we can constant-fold.
736 Expr::EvalResult result;
737 if (e->isPRValue() && e->EvaluateAsRValue(result, cgm.getASTContext()) &&
738 !result.hasSideEffects()) {
739 if (result.Val.isInt())
740 return RValue::get(builder.getConstInt(loc, result.Val.getInt()));
741 if (result.Val.isFloat()) {
742 // Note: we are using result type of CallExpr to determine the type of
743 // the constant. Classic codegen uses the result value to determine the
744 // type. We feel it should be Ok to use expression type because it is
745 // hard to imagine a builtin function evaluates to a value that
746 // over/underflows its own defined type.
747 mlir::Type type = convertType(e->getType());
748 return RValue::get(builder.getConstFP(loc, type, result.Val.getFloat()));
749 }
750 }
751
752 const FunctionDecl *fd = gd.getDecl()->getAsFunction();
753
755
756 // If the builtin has been declared explicitly with an assembler label,
757 // disable the specialized emitting below. Ideally we should communicate the
758 // rename in IR, or at least avoid generating the intrinsic calls that are
759 // likely to get lowered to the renamed library functions.
760 unsigned builtinIDIfNoAsmLabel = fd->hasAttr<AsmLabelAttr>() ? 0 : builtinID;
761
762 bool generateFPMathIntrinsics =
763 shouldCIREmitFPMathIntrinsic(*this, e, builtinID);
764
765 if (generateFPMathIntrinsics) {
766 // Try to match the builtinID with a floating point math builtin.
767 RValue rv = tryEmitFPMathIntrinsic(*this, e, builtinIDIfNoAsmLabel);
768
769 // Return the result directly if a math intrinsic was generated.
770 if (!rv.isIgnored()) {
771 return rv;
772 }
773 }
774
776
777 switch (builtinIDIfNoAsmLabel) {
778 default:
779 break;
780
781 // C stdarg builtins.
782 case Builtin::BI__builtin_stdarg_start:
783 case Builtin::BI__builtin_va_start:
784 case Builtin::BI__va_start: {
785 mlir::Value vaList = builtinID == Builtin::BI__va_start
786 ? emitScalarExpr(e->getArg(0))
788 mlir::Value count = emitScalarExpr(e->getArg(1));
789 emitVAStart(vaList, count);
790 return {};
791 }
792
793 case Builtin::BI__builtin_va_end:
795 return {};
796 case Builtin::BI__builtin_va_copy: {
797 mlir::Value dstPtr = emitVAListRef(e->getArg(0)).getPointer();
798 mlir::Value srcPtr = emitVAListRef(e->getArg(1)).getPointer();
799 cir::VACopyOp::create(builder, dstPtr.getLoc(), dstPtr, srcPtr);
800 return {};
801 }
802 case Builtin::BI__assume:
803 case Builtin::BI__builtin_assume: {
804 if (e->getArg(0)->HasSideEffects(getContext()))
805 return RValue::get(nullptr);
806
807 mlir::Value argValue = emitCheckedArgForAssume(e->getArg(0));
808 cir::AssumeOp::create(builder, loc, argValue);
809 return RValue::get(nullptr);
810 }
811
812 case Builtin::BI__builtin_assume_separate_storage: {
813 mlir::Value value0 = emitScalarExpr(e->getArg(0));
814 mlir::Value value1 = emitScalarExpr(e->getArg(1));
815 cir::AssumeSepStorageOp::create(builder, loc, value0, value1);
816 return RValue::get(nullptr);
817 }
818
819 case Builtin::BI__builtin_assume_aligned: {
820 const Expr *ptrExpr = e->getArg(0);
821 mlir::Value ptrValue = emitScalarExpr(ptrExpr);
822 mlir::Value offsetValue =
823 (e->getNumArgs() > 2) ? emitScalarExpr(e->getArg(2)) : nullptr;
824
825 std::optional<llvm::APSInt> alignment =
827 assert(alignment.has_value() &&
828 "the second argument to __builtin_assume_aligned must be an "
829 "integral constant expression");
830
831 mlir::Value result =
832 emitAlignmentAssumption(ptrValue, ptrExpr, ptrExpr->getExprLoc(),
833 alignment->getSExtValue(), offsetValue);
834 return RValue::get(result);
835 }
836
837 case Builtin::BI__builtin_complex: {
838 mlir::Value real = emitScalarExpr(e->getArg(0));
839 mlir::Value imag = emitScalarExpr(e->getArg(1));
840 mlir::Value complex = builder.createComplexCreate(loc, real, imag);
841 return RValue::getComplex(complex);
842 }
843
844 case Builtin::BI__builtin_creal:
845 case Builtin::BI__builtin_crealf:
846 case Builtin::BI__builtin_creall:
847 case Builtin::BIcreal:
848 case Builtin::BIcrealf:
849 case Builtin::BIcreall: {
850 mlir::Value complex = emitComplexExpr(e->getArg(0));
851 mlir::Value real = builder.createComplexReal(loc, complex);
852 return RValue::get(real);
853 }
854
855 case Builtin::BI__builtin_cimag:
856 case Builtin::BI__builtin_cimagf:
857 case Builtin::BI__builtin_cimagl:
858 case Builtin::BIcimag:
859 case Builtin::BIcimagf:
860 case Builtin::BIcimagl: {
861 mlir::Value complex = emitComplexExpr(e->getArg(0));
862 mlir::Value imag = builder.createComplexImag(loc, complex);
863 return RValue::get(imag);
864 }
865
866 case Builtin::BI__builtin_conj:
867 case Builtin::BI__builtin_conjf:
868 case Builtin::BI__builtin_conjl:
869 case Builtin::BIconj:
870 case Builtin::BIconjf:
871 case Builtin::BIconjl: {
872 mlir::Value complex = emitComplexExpr(e->getArg(0));
873 mlir::Value conj = builder.createUnaryOp(getLoc(e->getExprLoc()),
874 cir::UnaryOpKind::Not, complex);
875 return RValue::getComplex(conj);
876 }
877
878 case Builtin::BI__builtin_clrsb:
879 case Builtin::BI__builtin_clrsbl:
880 case Builtin::BI__builtin_clrsbll:
881 return emitBuiltinBitOp<cir::BitClrsbOp>(*this, e);
882
883 case Builtin::BI__builtin_ctzs:
884 case Builtin::BI__builtin_ctz:
885 case Builtin::BI__builtin_ctzl:
886 case Builtin::BI__builtin_ctzll:
887 case Builtin::BI__builtin_ctzg:
889 return emitBuiltinBitOp<cir::BitCtzOp>(*this, e, /*poisonZero=*/true);
890
891 case Builtin::BI__builtin_clzs:
892 case Builtin::BI__builtin_clz:
893 case Builtin::BI__builtin_clzl:
894 case Builtin::BI__builtin_clzll:
895 case Builtin::BI__builtin_clzg:
897 return emitBuiltinBitOp<cir::BitClzOp>(*this, e, /*poisonZero=*/true);
898
899 case Builtin::BI__builtin_ffs:
900 case Builtin::BI__builtin_ffsl:
901 case Builtin::BI__builtin_ffsll:
902 return emitBuiltinBitOp<cir::BitFfsOp>(*this, e);
903
904 case Builtin::BI__builtin_parity:
905 case Builtin::BI__builtin_parityl:
906 case Builtin::BI__builtin_parityll:
907 return emitBuiltinBitOp<cir::BitParityOp>(*this, e);
908
909 case Builtin::BI__lzcnt16:
910 case Builtin::BI__lzcnt:
911 case Builtin::BI__lzcnt64:
913 return emitBuiltinBitOp<cir::BitClzOp>(*this, e, /*poisonZero=*/false);
914
915 case Builtin::BI__popcnt16:
916 case Builtin::BI__popcnt:
917 case Builtin::BI__popcnt64:
918 case Builtin::BI__builtin_popcount:
919 case Builtin::BI__builtin_popcountl:
920 case Builtin::BI__builtin_popcountll:
921 case Builtin::BI__builtin_popcountg:
923
924 case Builtin::BI__builtin_expect:
925 case Builtin::BI__builtin_expect_with_probability: {
926 mlir::Value argValue = emitScalarExpr(e->getArg(0));
927 mlir::Value expectedValue = emitScalarExpr(e->getArg(1));
928
929 mlir::FloatAttr probAttr;
930 if (builtinIDIfNoAsmLabel == Builtin::BI__builtin_expect_with_probability) {
931 llvm::APFloat probability(0.0);
932 const Expr *probArg = e->getArg(2);
933 [[maybe_unused]] bool evalSucceeded =
934 probArg->EvaluateAsFloat(probability, cgm.getASTContext());
935 assert(evalSucceeded &&
936 "probability should be able to evaluate as float");
937 bool loseInfo = false; // ignored
938 probability.convert(llvm::APFloat::IEEEdouble(),
939 llvm::RoundingMode::Dynamic, &loseInfo);
940 probAttr = mlir::FloatAttr::get(mlir::Float64Type::get(&getMLIRContext()),
941 probability);
942 }
943
944 auto result = cir::ExpectOp::create(builder, loc, argValue.getType(),
945 argValue, expectedValue, probAttr);
946 return RValue::get(result);
947 }
948
949 case Builtin::BI__builtin_bswap16:
950 case Builtin::BI__builtin_bswap32:
951 case Builtin::BI__builtin_bswap64:
952 case Builtin::BI_byteswap_ushort:
953 case Builtin::BI_byteswap_ulong:
954 case Builtin::BI_byteswap_uint64: {
955 mlir::Value arg = emitScalarExpr(e->getArg(0));
956 return RValue::get(cir::ByteSwapOp::create(builder, loc, arg));
957 }
958
959 case Builtin::BI__builtin_bitreverse8:
960 case Builtin::BI__builtin_bitreverse16:
961 case Builtin::BI__builtin_bitreverse32:
962 case Builtin::BI__builtin_bitreverse64: {
963 mlir::Value arg = emitScalarExpr(e->getArg(0));
964 return RValue::get(cir::BitReverseOp::create(builder, loc, arg));
965 }
966
967 case Builtin::BI__builtin_rotateleft8:
968 case Builtin::BI__builtin_rotateleft16:
969 case Builtin::BI__builtin_rotateleft32:
970 case Builtin::BI__builtin_rotateleft64:
971 return emitRotate(e, /*isRotateLeft=*/true);
972
973 case Builtin::BI__builtin_rotateright8:
974 case Builtin::BI__builtin_rotateright16:
975 case Builtin::BI__builtin_rotateright32:
976 case Builtin::BI__builtin_rotateright64:
977 return emitRotate(e, /*isRotateLeft=*/false);
978
979 case Builtin::BI__builtin_coro_id:
980 case Builtin::BI__builtin_coro_promise:
981 case Builtin::BI__builtin_coro_resume:
982 case Builtin::BI__builtin_coro_noop:
983 case Builtin::BI__builtin_coro_destroy:
984 case Builtin::BI__builtin_coro_done:
985 case Builtin::BI__builtin_coro_alloc:
986 case Builtin::BI__builtin_coro_begin:
987 case Builtin::BI__builtin_coro_end:
988 case Builtin::BI__builtin_coro_suspend:
989 case Builtin::BI__builtin_coro_align:
990 cgm.errorNYI(e->getSourceRange(), "BI__builtin_coro_id like NYI");
991 return getUndefRValue(e->getType());
992
993 case Builtin::BI__builtin_coro_frame: {
994 return emitCoroutineFrame();
995 }
996 case Builtin::BI__builtin_coro_free:
997 case Builtin::BI__builtin_coro_size: {
998 GlobalDecl gd{fd};
999 mlir::Type ty = cgm.getTypes().getFunctionType(
1000 cgm.getTypes().arrangeGlobalDeclaration(gd));
1001 const auto *nd = cast<NamedDecl>(gd.getDecl());
1002 cir::FuncOp fnOp =
1003 cgm.getOrCreateCIRFunction(nd->getName(), ty, gd, /*ForVTable=*/false);
1004 fnOp.setBuiltin(true);
1005 return emitCall(e->getCallee()->getType(), CIRGenCallee::forDirect(fnOp), e,
1006 returnValue);
1007 }
1008
1009 case Builtin::BI__builtin_constant_p: {
1010 mlir::Type resultType = convertType(e->getType());
1011
1012 const Expr *arg = e->getArg(0);
1013 QualType argType = arg->getType();
1014 // FIXME: The allowance for Obj-C pointers and block pointers is historical
1015 // and likely a mistake.
1016 if (!argType->isIntegralOrEnumerationType() && !argType->isFloatingType() &&
1017 !argType->isObjCObjectPointerType() && !argType->isBlockPointerType()) {
1018 // Per the GCC documentation, only numeric constants are recognized after
1019 // inlining.
1020 return RValue::get(
1021 builder.getConstInt(getLoc(e->getSourceRange()),
1022 mlir::cast<cir::IntType>(resultType), 0));
1023 }
1024
1025 if (arg->HasSideEffects(getContext())) {
1026 // The argument is unevaluated, so be conservative if it might have
1027 // side-effects.
1028 return RValue::get(
1029 builder.getConstInt(getLoc(e->getSourceRange()),
1030 mlir::cast<cir::IntType>(resultType), 0));
1031 }
1032
1033 mlir::Value argValue = emitScalarExpr(arg);
1034 if (argType->isObjCObjectPointerType()) {
1035 cgm.errorNYI(e->getSourceRange(),
1036 "__builtin_constant_p: Obj-C object pointer");
1037 return {};
1038 }
1039 argValue = builder.createBitcast(argValue, convertType(argType));
1040
1041 mlir::Value result = cir::IsConstantOp::create(
1042 builder, getLoc(e->getSourceRange()), argValue);
1043 // IsConstantOp returns a bool, but __builtin_constant_p returns an int.
1044 result = builder.createBoolToInt(result, resultType);
1045 return RValue::get(result);
1046 }
1047 case Builtin::BI__builtin_dynamic_object_size:
1048 case Builtin::BI__builtin_object_size: {
1049 unsigned type =
1050 e->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
1051 auto resType = mlir::cast<cir::IntType>(convertType(e->getType()));
1052
1053 // We pass this builtin onto the optimizer so that it can figure out the
1054 // object size in more complex cases.
1055 bool isDynamic = builtinID == Builtin::BI__builtin_dynamic_object_size;
1056 return RValue::get(emitBuiltinObjectSize(e->getArg(0), type, resType,
1057 /*EmittedE=*/nullptr, isDynamic));
1058 }
1059
1060 case Builtin::BI__builtin_prefetch: {
1061 auto evaluateOperandAsInt = [&](const Expr *arg) {
1062 Expr::EvalResult res;
1063 [[maybe_unused]] bool evalSucceed =
1064 arg->EvaluateAsInt(res, cgm.getASTContext());
1065 assert(evalSucceed && "expression should be able to evaluate as int");
1066 return res.Val.getInt().getZExtValue();
1067 };
1068
1069 bool isWrite = false;
1070 if (e->getNumArgs() > 1)
1071 isWrite = evaluateOperandAsInt(e->getArg(1));
1072
1073 int locality = 3;
1074 if (e->getNumArgs() > 2)
1075 locality = evaluateOperandAsInt(e->getArg(2));
1076
1077 mlir::Value address = emitScalarExpr(e->getArg(0));
1078 cir::PrefetchOp::create(builder, loc, address, locality, isWrite);
1079 return RValue::get(nullptr);
1080 }
1081 case Builtin::BI__builtin_readcyclecounter:
1082 case Builtin::BI__builtin_readsteadycounter:
1083 return errorBuiltinNYI(*this, e, builtinID);
1084 case Builtin::BI__builtin___clear_cache: {
1085 mlir::Value begin =
1086 builder.createPtrBitcast(emitScalarExpr(e->getArg(0)), cgm.voidTy);
1087 mlir::Value end =
1088 builder.createPtrBitcast(emitScalarExpr(e->getArg(1)), cgm.voidTy);
1089 cir::ClearCacheOp::create(builder, getLoc(e->getSourceRange()), begin, end);
1090 return RValue::get(nullptr);
1091 }
1092 case Builtin::BI__builtin_trap:
1093 emitTrap(loc, /*createNewBlock=*/true);
1094 return RValue::getIgnored();
1095 case Builtin::BI__builtin_verbose_trap:
1096 case Builtin::BI__debugbreak:
1097 return errorBuiltinNYI(*this, e, builtinID);
1098 case Builtin::BI__builtin_unreachable:
1099 emitUnreachable(e->getExprLoc(), /*createNewBlock=*/true);
1100 return RValue::getIgnored();
1101 case Builtin::BI__builtin_powi:
1102 case Builtin::BI__builtin_powif:
1103 case Builtin::BI__builtin_powil:
1104 case Builtin::BI__builtin_frexpl:
1105 case Builtin::BI__builtin_frexp:
1106 case Builtin::BI__builtin_frexpf:
1107 case Builtin::BI__builtin_frexpf128:
1108 case Builtin::BI__builtin_frexpf16:
1109 case Builtin::BImodf:
1110 case Builtin::BImodff:
1111 case Builtin::BImodfl:
1112 case Builtin::BI__builtin_modf:
1113 case Builtin::BI__builtin_modff:
1114 case Builtin::BI__builtin_modfl:
1115 case Builtin::BI__builtin_isgreater:
1116 case Builtin::BI__builtin_isgreaterequal:
1117 case Builtin::BI__builtin_isless:
1118 case Builtin::BI__builtin_islessequal:
1119 case Builtin::BI__builtin_islessgreater:
1120 case Builtin::BI__builtin_isunordered:
1121 // From https://clang.llvm.org/docs/LanguageExtensions.html#builtin-isfpclass
1122 //
1123 // The `__builtin_isfpclass()` builtin is a generalization of functions
1124 // isnan, isinf, isfinite and some others defined by the C standard. It tests
1125 // if the floating-point value, specified by the first argument, falls into
1126 // any of data classes, specified by the second argument.
1127 case Builtin::BI__builtin_isnan: {
1129 mlir::Value v = emitScalarExpr(e->getArg(0));
1131 mlir::Location loc = getLoc(e->getBeginLoc());
1132 return RValue::get(builder.createBoolToInt(
1133 builder.createIsFPClass(loc, v, cir::FPClassTest::Nan),
1134 convertType(e->getType())));
1135 }
1136
1137 case Builtin::BI__builtin_issignaling: {
1139 mlir::Value v = emitScalarExpr(e->getArg(0));
1140 mlir::Location loc = getLoc(e->getBeginLoc());
1141 return RValue::get(builder.createBoolToInt(
1142 builder.createIsFPClass(loc, v, cir::FPClassTest::SignalingNaN),
1143 convertType(e->getType())));
1144 }
1145
1146 case Builtin::BI__builtin_isinf: {
1148 mlir::Value v = emitScalarExpr(e->getArg(0));
1150 mlir::Location loc = getLoc(e->getBeginLoc());
1151 return RValue::get(builder.createBoolToInt(
1152 builder.createIsFPClass(loc, v, cir::FPClassTest::Infinity),
1153 convertType(e->getType())));
1154 }
1155 case Builtin::BIfinite:
1156 case Builtin::BI__finite:
1157 case Builtin::BIfinitef:
1158 case Builtin::BI__finitef:
1159 case Builtin::BIfinitel:
1160 case Builtin::BI__finitel:
1161 case Builtin::BI__builtin_isfinite: {
1163 mlir::Value v = emitScalarExpr(e->getArg(0));
1165 mlir::Location loc = getLoc(e->getBeginLoc());
1166 return RValue::get(builder.createBoolToInt(
1167 builder.createIsFPClass(loc, v, cir::FPClassTest::Finite),
1168 convertType(e->getType())));
1169 }
1170
1171 case Builtin::BI__builtin_isnormal: {
1173 mlir::Value v = emitScalarExpr(e->getArg(0));
1174 mlir::Location loc = getLoc(e->getBeginLoc());
1175 return RValue::get(builder.createBoolToInt(
1176 builder.createIsFPClass(loc, v, cir::FPClassTest::Normal),
1177 convertType(e->getType())));
1178 }
1179
1180 case Builtin::BI__builtin_issubnormal: {
1182 mlir::Value v = emitScalarExpr(e->getArg(0));
1183 mlir::Location loc = getLoc(e->getBeginLoc());
1184 return RValue::get(builder.createBoolToInt(
1185 builder.createIsFPClass(loc, v, cir::FPClassTest::Subnormal),
1186 convertType(e->getType())));
1187 }
1188
1189 case Builtin::BI__builtin_iszero: {
1191 mlir::Value v = emitScalarExpr(e->getArg(0));
1192 mlir::Location loc = getLoc(e->getBeginLoc());
1193 return RValue::get(builder.createBoolToInt(
1194 builder.createIsFPClass(loc, v, cir::FPClassTest::Zero),
1195 convertType(e->getType())));
1196 }
1197 case Builtin::BI__builtin_isfpclass: {
1198 Expr::EvalResult result;
1199 if (!e->getArg(1)->EvaluateAsInt(result, cgm.getASTContext()))
1200 break;
1201
1203 mlir::Value v = emitScalarExpr(e->getArg(0));
1204 uint64_t test = result.Val.getInt().getLimitedValue();
1205 mlir::Location loc = getLoc(e->getBeginLoc());
1206 //
1207 return RValue::get(builder.createBoolToInt(
1208 builder.createIsFPClass(loc, v, cir::FPClassTest(test)),
1209 convertType(e->getType())));
1210 }
1211 case Builtin::BI__builtin_nondeterministic_value:
1212 case Builtin::BI__builtin_elementwise_abs:
1213 return errorBuiltinNYI(*this, e, builtinID);
1214 case Builtin::BI__builtin_elementwise_acos:
1215 return emitUnaryFPBuiltin<cir::ACosOp>(*this, *e);
1216 case Builtin::BI__builtin_elementwise_asin:
1217 return emitUnaryFPBuiltin<cir::ASinOp>(*this, *e);
1218 case Builtin::BI__builtin_elementwise_atan:
1219 return emitUnaryFPBuiltin<cir::ATanOp>(*this, *e);
1220 case Builtin::BI__builtin_elementwise_atan2:
1221 case Builtin::BI__builtin_elementwise_ceil:
1222 case Builtin::BI__builtin_elementwise_exp:
1223 case Builtin::BI__builtin_elementwise_exp2:
1224 case Builtin::BI__builtin_elementwise_exp10:
1225 case Builtin::BI__builtin_elementwise_ldexp:
1226 case Builtin::BI__builtin_elementwise_log:
1227 case Builtin::BI__builtin_elementwise_log2:
1228 case Builtin::BI__builtin_elementwise_log10:
1229 case Builtin::BI__builtin_elementwise_pow:
1230 case Builtin::BI__builtin_elementwise_bitreverse:
1231 return errorBuiltinNYI(*this, e, builtinID);
1232 case Builtin::BI__builtin_elementwise_cos:
1233 return emitUnaryFPBuiltin<cir::CosOp>(*this, *e);
1234 case Builtin::BI__builtin_elementwise_cosh:
1235 case Builtin::BI__builtin_elementwise_floor:
1236 case Builtin::BI__builtin_elementwise_popcount:
1237 case Builtin::BI__builtin_elementwise_roundeven:
1238 case Builtin::BI__builtin_elementwise_round:
1239 case Builtin::BI__builtin_elementwise_rint:
1240 case Builtin::BI__builtin_elementwise_nearbyint:
1241 case Builtin::BI__builtin_elementwise_sin:
1242 case Builtin::BI__builtin_elementwise_sinh:
1243 case Builtin::BI__builtin_elementwise_tan:
1244 case Builtin::BI__builtin_elementwise_tanh:
1245 case Builtin::BI__builtin_elementwise_trunc:
1246 case Builtin::BI__builtin_elementwise_canonicalize:
1247 case Builtin::BI__builtin_elementwise_copysign:
1248 case Builtin::BI__builtin_elementwise_fma:
1249 case Builtin::BI__builtin_elementwise_fshl:
1250 case Builtin::BI__builtin_elementwise_fshr:
1251 case Builtin::BI__builtin_elementwise_add_sat:
1252 case Builtin::BI__builtin_elementwise_sub_sat:
1253 case Builtin::BI__builtin_elementwise_max:
1254 case Builtin::BI__builtin_elementwise_min:
1255 case Builtin::BI__builtin_elementwise_maxnum:
1256 case Builtin::BI__builtin_elementwise_minnum:
1257 case Builtin::BI__builtin_elementwise_maximum:
1258 case Builtin::BI__builtin_elementwise_minimum:
1259 case Builtin::BI__builtin_elementwise_maximumnum:
1260 case Builtin::BI__builtin_elementwise_minimumnum:
1261 case Builtin::BI__builtin_reduce_max:
1262 case Builtin::BI__builtin_reduce_min:
1263 case Builtin::BI__builtin_reduce_add:
1264 case Builtin::BI__builtin_reduce_mul:
1265 case Builtin::BI__builtin_reduce_xor:
1266 case Builtin::BI__builtin_reduce_or:
1267 case Builtin::BI__builtin_reduce_and:
1268 case Builtin::BI__builtin_reduce_maximum:
1269 case Builtin::BI__builtin_reduce_minimum:
1270 case Builtin::BI__builtin_matrix_transpose:
1271 case Builtin::BI__builtin_matrix_column_major_load:
1272 case Builtin::BI__builtin_matrix_column_major_store:
1273 case Builtin::BI__builtin_masked_load:
1274 case Builtin::BI__builtin_masked_expand_load:
1275 case Builtin::BI__builtin_masked_gather:
1276 case Builtin::BI__builtin_masked_store:
1277 case Builtin::BI__builtin_masked_compress_store:
1278 case Builtin::BI__builtin_masked_scatter:
1279 case Builtin::BI__builtin_isinf_sign:
1280 case Builtin::BI__builtin_flt_rounds:
1281 case Builtin::BI__builtin_set_flt_rounds:
1282 case Builtin::BI__builtin_fpclassify:
1283 return errorBuiltinNYI(*this, e, builtinID);
1284 case Builtin::BIalloca:
1285 case Builtin::BI_alloca:
1286 case Builtin::BI__builtin_alloca_uninitialized:
1287 case Builtin::BI__builtin_alloca:
1288 return emitBuiltinAlloca(*this, e, builtinID);
1289 case Builtin::BI__builtin_alloca_with_align_uninitialized:
1290 case Builtin::BI__builtin_alloca_with_align:
1291 case Builtin::BI__builtin_infer_alloc_token:
1292 case Builtin::BIbzero:
1293 case Builtin::BI__builtin_bzero:
1294 case Builtin::BIbcopy:
1295 case Builtin::BI__builtin_bcopy:
1296 return errorBuiltinNYI(*this, e, builtinID);
1297 case Builtin::BI__builtin_char_memchr:
1298 case Builtin::BI__builtin_memchr: {
1299 Address srcPtr = emitPointerWithAlignment(e->getArg(0));
1300 mlir::Value src =
1301 builder.createBitcast(srcPtr.getPointer(), builder.getVoidPtrTy());
1302 mlir::Value pattern = emitScalarExpr(e->getArg(1));
1303 mlir::Value len = emitScalarExpr(e->getArg(2));
1304 mlir::Value res = cir::MemChrOp::create(builder, getLoc(e->getExprLoc()),
1305 src, pattern, len);
1306 return RValue::get(res);
1307 }
1308 case Builtin::BImemcpy:
1309 case Builtin::BI__builtin_memcpy:
1310 case Builtin::BImempcpy:
1311 case Builtin::BI__builtin_mempcpy:
1312 case Builtin::BI__builtin_memcpy_inline:
1313 case Builtin::BI__builtin___memcpy_chk:
1314 case Builtin::BI__builtin_objc_memmove_collectable:
1315 case Builtin::BI__builtin___memmove_chk:
1316 case Builtin::BI__builtin_trivially_relocate:
1317 case Builtin::BImemmove:
1318 case Builtin::BI__builtin_memmove:
1319 case Builtin::BImemset:
1320 case Builtin::BI__builtin_memset:
1321 case Builtin::BI__builtin_memset_inline:
1322 case Builtin::BI__builtin___memset_chk:
1323 case Builtin::BI__builtin_wmemchr:
1324 case Builtin::BI__builtin_wmemcmp:
1325 break; // Handled as library calls below.
1326 case Builtin::BI__builtin_dwarf_cfa:
1327 return errorBuiltinNYI(*this, e, builtinID);
1328 case Builtin::BI__builtin_return_address: {
1329 llvm::APSInt level = e->getArg(0)->EvaluateKnownConstInt(getContext());
1330 return RValue::get(cir::ReturnAddrOp::create(
1331 builder, getLoc(e->getExprLoc()),
1332 builder.getConstAPInt(loc, builder.getUInt32Ty(), level)));
1333 }
1334 case Builtin::BI_ReturnAddress: {
1335 return RValue::get(cir::ReturnAddrOp::create(
1336 builder, getLoc(e->getExprLoc()),
1337 builder.getConstInt(loc, builder.getUInt32Ty(), 0)));
1338 }
1339 case Builtin::BI__builtin_frame_address: {
1340 llvm::APSInt level = e->getArg(0)->EvaluateKnownConstInt(getContext());
1341 mlir::Location loc = getLoc(e->getExprLoc());
1342 mlir::Value addr = cir::FrameAddrOp::create(
1343 builder, loc, allocaInt8PtrTy,
1344 builder.getConstAPInt(loc, builder.getUInt32Ty(), level));
1345 return RValue::get(
1346 builder.createCast(loc, cir::CastKind::bitcast, addr, voidPtrTy));
1347 }
1348 case Builtin::BI__builtin_extract_return_addr:
1349 case Builtin::BI__builtin_frob_return_addr:
1350 case Builtin::BI__builtin_dwarf_sp_column:
1351 case Builtin::BI__builtin_init_dwarf_reg_size_table:
1352 case Builtin::BI__builtin_eh_return:
1353 case Builtin::BI__builtin_unwind_init:
1354 case Builtin::BI__builtin_extend_pointer:
1355 case Builtin::BI__builtin_setjmp:
1356 case Builtin::BI__builtin_longjmp:
1357 case Builtin::BI__builtin_launder:
1358 case Builtin::BI__sync_fetch_and_add:
1359 case Builtin::BI__sync_fetch_and_sub:
1360 case Builtin::BI__sync_fetch_and_or:
1361 case Builtin::BI__sync_fetch_and_and:
1362 case Builtin::BI__sync_fetch_and_xor:
1363 case Builtin::BI__sync_fetch_and_nand:
1364 case Builtin::BI__sync_add_and_fetch:
1365 case Builtin::BI__sync_sub_and_fetch:
1366 case Builtin::BI__sync_and_and_fetch:
1367 case Builtin::BI__sync_or_and_fetch:
1368 case Builtin::BI__sync_xor_and_fetch:
1369 case Builtin::BI__sync_nand_and_fetch:
1370 case Builtin::BI__sync_val_compare_and_swap:
1371 case Builtin::BI__sync_bool_compare_and_swap:
1372 case Builtin::BI__sync_lock_test_and_set:
1373 case Builtin::BI__sync_lock_release:
1374 case Builtin::BI__sync_swap:
1375 case Builtin::BI__sync_fetch_and_add_1:
1376 case Builtin::BI__sync_fetch_and_add_2:
1377 case Builtin::BI__sync_fetch_and_add_4:
1378 case Builtin::BI__sync_fetch_and_add_8:
1379 case Builtin::BI__sync_fetch_and_add_16:
1380 case Builtin::BI__sync_fetch_and_sub_1:
1381 case Builtin::BI__sync_fetch_and_sub_2:
1382 case Builtin::BI__sync_fetch_and_sub_4:
1383 case Builtin::BI__sync_fetch_and_sub_8:
1384 case Builtin::BI__sync_fetch_and_sub_16:
1385 case Builtin::BI__sync_fetch_and_or_1:
1386 case Builtin::BI__sync_fetch_and_or_2:
1387 case Builtin::BI__sync_fetch_and_or_4:
1388 case Builtin::BI__sync_fetch_and_or_8:
1389 case Builtin::BI__sync_fetch_and_or_16:
1390 case Builtin::BI__sync_fetch_and_and_1:
1391 case Builtin::BI__sync_fetch_and_and_2:
1392 case Builtin::BI__sync_fetch_and_and_4:
1393 case Builtin::BI__sync_fetch_and_and_8:
1394 case Builtin::BI__sync_fetch_and_and_16:
1395 case Builtin::BI__sync_fetch_and_xor_1:
1396 case Builtin::BI__sync_fetch_and_xor_2:
1397 case Builtin::BI__sync_fetch_and_xor_4:
1398 case Builtin::BI__sync_fetch_and_xor_8:
1399 case Builtin::BI__sync_fetch_and_xor_16:
1400 case Builtin::BI__sync_fetch_and_nand_1:
1401 case Builtin::BI__sync_fetch_and_nand_2:
1402 case Builtin::BI__sync_fetch_and_nand_4:
1403 case Builtin::BI__sync_fetch_and_nand_8:
1404 case Builtin::BI__sync_fetch_and_nand_16:
1405 case Builtin::BI__sync_fetch_and_min:
1406 case Builtin::BI__sync_fetch_and_max:
1407 case Builtin::BI__sync_fetch_and_umin:
1408 case Builtin::BI__sync_fetch_and_umax:
1409 return errorBuiltinNYI(*this, e, builtinID);
1410 return getUndefRValue(e->getType());
1411 case Builtin::BI__sync_add_and_fetch_1:
1412 case Builtin::BI__sync_add_and_fetch_2:
1413 case Builtin::BI__sync_add_and_fetch_4:
1414 case Builtin::BI__sync_add_and_fetch_8:
1415 case Builtin::BI__sync_add_and_fetch_16:
1416 return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Add, e,
1417 cir::BinOpKind::Add);
1418 case Builtin::BI__sync_sub_and_fetch_1:
1419 case Builtin::BI__sync_sub_and_fetch_2:
1420 case Builtin::BI__sync_sub_and_fetch_4:
1421 case Builtin::BI__sync_sub_and_fetch_8:
1422 case Builtin::BI__sync_sub_and_fetch_16:
1423 return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Sub, e,
1424 cir::BinOpKind::Sub);
1425 case Builtin::BI__sync_and_and_fetch_1:
1426 case Builtin::BI__sync_and_and_fetch_2:
1427 case Builtin::BI__sync_and_and_fetch_4:
1428 case Builtin::BI__sync_and_and_fetch_8:
1429 case Builtin::BI__sync_and_and_fetch_16:
1430 return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::And, e,
1431 cir::BinOpKind::And);
1432 case Builtin::BI__sync_or_and_fetch_1:
1433 case Builtin::BI__sync_or_and_fetch_2:
1434 case Builtin::BI__sync_or_and_fetch_4:
1435 case Builtin::BI__sync_or_and_fetch_8:
1436 case Builtin::BI__sync_or_and_fetch_16:
1437 return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Or, e,
1438 cir::BinOpKind::Or);
1439 case Builtin::BI__sync_xor_and_fetch_1:
1440 case Builtin::BI__sync_xor_and_fetch_2:
1441 case Builtin::BI__sync_xor_and_fetch_4:
1442 case Builtin::BI__sync_xor_and_fetch_8:
1443 case Builtin::BI__sync_xor_and_fetch_16:
1444 return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Xor, e,
1445 cir::BinOpKind::Xor);
1446 case Builtin::BI__sync_nand_and_fetch_1:
1447 case Builtin::BI__sync_nand_and_fetch_2:
1448 case Builtin::BI__sync_nand_and_fetch_4:
1449 case Builtin::BI__sync_nand_and_fetch_8:
1450 case Builtin::BI__sync_nand_and_fetch_16:
1451 return emitBinaryAtomicPost(*this, cir::AtomicFetchKind::Nand, e,
1452 cir::BinOpKind::And, true);
1453 case Builtin::BI__sync_val_compare_and_swap_1:
1454 case Builtin::BI__sync_val_compare_and_swap_2:
1455 case Builtin::BI__sync_val_compare_and_swap_4:
1456 case Builtin::BI__sync_val_compare_and_swap_8:
1457 case Builtin::BI__sync_val_compare_and_swap_16:
1458 case Builtin::BI__sync_bool_compare_and_swap_1:
1459 case Builtin::BI__sync_bool_compare_and_swap_2:
1460 case Builtin::BI__sync_bool_compare_and_swap_4:
1461 case Builtin::BI__sync_bool_compare_and_swap_8:
1462 case Builtin::BI__sync_bool_compare_and_swap_16:
1463 case Builtin::BI__sync_swap_1:
1464 case Builtin::BI__sync_swap_2:
1465 case Builtin::BI__sync_swap_4:
1466 case Builtin::BI__sync_swap_8:
1467 case Builtin::BI__sync_swap_16:
1468 case Builtin::BI__sync_lock_test_and_set_1:
1469 case Builtin::BI__sync_lock_test_and_set_2:
1470 case Builtin::BI__sync_lock_test_and_set_4:
1471 case Builtin::BI__sync_lock_test_and_set_8:
1472 case Builtin::BI__sync_lock_test_and_set_16:
1473 case Builtin::BI__sync_lock_release_1:
1474 case Builtin::BI__sync_lock_release_2:
1475 case Builtin::BI__sync_lock_release_4:
1476 case Builtin::BI__sync_lock_release_8:
1477 case Builtin::BI__sync_lock_release_16:
1478 case Builtin::BI__sync_synchronize:
1479 case Builtin::BI__builtin_nontemporal_load:
1480 case Builtin::BI__builtin_nontemporal_store:
1481 case Builtin::BI__c11_atomic_is_lock_free:
1482 case Builtin::BI__atomic_is_lock_free:
1483 case Builtin::BI__atomic_test_and_set:
1484 case Builtin::BI__atomic_clear:
1485 return errorBuiltinNYI(*this, e, builtinID);
1486 case Builtin::BI__atomic_thread_fence:
1487 case Builtin::BI__c11_atomic_thread_fence: {
1488 emitAtomicFenceOp(*this, e, cir::SyncScopeKind::System);
1489 return RValue::get(nullptr);
1490 }
1491 case Builtin::BI__atomic_signal_fence:
1492 case Builtin::BI__c11_atomic_signal_fence: {
1493 emitAtomicFenceOp(*this, e, cir::SyncScopeKind::SingleThread);
1494 return RValue::get(nullptr);
1495 }
1496 case Builtin::BI__scoped_atomic_thread_fence:
1497 case Builtin::BI__builtin_signbit:
1498 case Builtin::BI__builtin_signbitf:
1499 case Builtin::BI__builtin_signbitl:
1500 case Builtin::BI__warn_memset_zero_len:
1501 case Builtin::BI__annotation:
1502 case Builtin::BI__builtin_annotation:
1503 case Builtin::BI__builtin_addcb:
1504 case Builtin::BI__builtin_addcs:
1505 case Builtin::BI__builtin_addc:
1506 case Builtin::BI__builtin_addcl:
1507 case Builtin::BI__builtin_addcll:
1508 case Builtin::BI__builtin_subcb:
1509 case Builtin::BI__builtin_subcs:
1510 case Builtin::BI__builtin_subc:
1511 case Builtin::BI__builtin_subcl:
1512 case Builtin::BI__builtin_subcll:
1513 return errorBuiltinNYI(*this, e, builtinID);
1514
1515 case Builtin::BI__builtin_add_overflow:
1516 case Builtin::BI__builtin_sub_overflow:
1517 case Builtin::BI__builtin_mul_overflow: {
1518 const clang::Expr *leftArg = e->getArg(0);
1519 const clang::Expr *rightArg = e->getArg(1);
1520 const clang::Expr *resultArg = e->getArg(2);
1521
1522 clang::QualType resultQTy =
1523 resultArg->getType()->castAs<clang::PointerType>()->getPointeeType();
1524
1525 WidthAndSignedness leftInfo =
1526 getIntegerWidthAndSignedness(cgm.getASTContext(), leftArg->getType());
1527 WidthAndSignedness rightInfo =
1528 getIntegerWidthAndSignedness(cgm.getASTContext(), rightArg->getType());
1529 WidthAndSignedness resultInfo =
1530 getIntegerWidthAndSignedness(cgm.getASTContext(), resultQTy);
1531
1532 // Note we compute the encompassing type with the consideration to the
1533 // result type, so later in LLVM lowering we don't get redundant integral
1534 // extension casts.
1535 WidthAndSignedness encompassingInfo =
1536 EncompassingIntegerType({leftInfo, rightInfo, resultInfo});
1537
1538 auto encompassingCIRTy = cir::IntType::get(
1539 &getMLIRContext(), encompassingInfo.width, encompassingInfo.isSigned);
1540 auto resultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(resultQTy));
1541
1542 mlir::Value left = emitScalarExpr(leftArg);
1543 mlir::Value right = emitScalarExpr(rightArg);
1544 Address resultPtr = emitPointerWithAlignment(resultArg);
1545
1546 // Extend each operand to the encompassing type, if necessary.
1547 if (left.getType() != encompassingCIRTy)
1548 left =
1549 builder.createCast(cir::CastKind::integral, left, encompassingCIRTy);
1550 if (right.getType() != encompassingCIRTy)
1551 right =
1552 builder.createCast(cir::CastKind::integral, right, encompassingCIRTy);
1553
1554 // Perform the operation on the extended values.
1555 cir::BinOpOverflowKind opKind;
1556 switch (builtinID) {
1557 default:
1558 llvm_unreachable("Unknown overflow builtin id.");
1559 case Builtin::BI__builtin_add_overflow:
1560 opKind = cir::BinOpOverflowKind::Add;
1561 break;
1562 case Builtin::BI__builtin_sub_overflow:
1563 opKind = cir::BinOpOverflowKind::Sub;
1564 break;
1565 case Builtin::BI__builtin_mul_overflow:
1566 opKind = cir::BinOpOverflowKind::Mul;
1567 break;
1568 }
1569
1570 mlir::Location loc = getLoc(e->getSourceRange());
1571 auto arithOp = cir::BinOpOverflowOp::create(builder, loc, resultCIRTy,
1572 opKind, left, right);
1573
1574 // Here is a slight difference from the original clang CodeGen:
1575 // - In the original clang CodeGen, the checked arithmetic result is
1576 // first computed as a value of the encompassing type, and then it is
1577 // truncated to the actual result type with a second overflow checking.
1578 // - In CIRGen, the checked arithmetic operation directly produce the
1579 // checked arithmetic result in its expected type.
1580 //
1581 // So we don't need a truncation and a second overflow checking here.
1582
1583 // Finally, store the result using the pointer.
1584 bool isVolatile =
1585 resultArg->getType()->getPointeeType().isVolatileQualified();
1586 builder.createStore(loc, arithOp.getResult(), resultPtr, isVolatile);
1587
1588 return RValue::get(arithOp.getOverflow());
1589 }
1590
1591 case Builtin::BI__builtin_uadd_overflow:
1592 case Builtin::BI__builtin_uaddl_overflow:
1593 case Builtin::BI__builtin_uaddll_overflow:
1594 case Builtin::BI__builtin_usub_overflow:
1595 case Builtin::BI__builtin_usubl_overflow:
1596 case Builtin::BI__builtin_usubll_overflow:
1597 case Builtin::BI__builtin_umul_overflow:
1598 case Builtin::BI__builtin_umull_overflow:
1599 case Builtin::BI__builtin_umulll_overflow:
1600 case Builtin::BI__builtin_sadd_overflow:
1601 case Builtin::BI__builtin_saddl_overflow:
1602 case Builtin::BI__builtin_saddll_overflow:
1603 case Builtin::BI__builtin_ssub_overflow:
1604 case Builtin::BI__builtin_ssubl_overflow:
1605 case Builtin::BI__builtin_ssubll_overflow:
1606 case Builtin::BI__builtin_smul_overflow:
1607 case Builtin::BI__builtin_smull_overflow:
1608 case Builtin::BI__builtin_smulll_overflow: {
1609 // Scalarize our inputs.
1610 mlir::Value x = emitScalarExpr(e->getArg(0));
1611 mlir::Value y = emitScalarExpr(e->getArg(1));
1612
1613 const clang::Expr *resultArg = e->getArg(2);
1614 Address resultPtr = emitPointerWithAlignment(resultArg);
1615
1616 // Decide which of the arithmetic operation we are lowering to:
1617 cir::BinOpOverflowKind arithKind;
1618 switch (builtinID) {
1619 default:
1620 llvm_unreachable("Unknown overflow builtin id.");
1621 case Builtin::BI__builtin_uadd_overflow:
1622 case Builtin::BI__builtin_uaddl_overflow:
1623 case Builtin::BI__builtin_uaddll_overflow:
1624 case Builtin::BI__builtin_sadd_overflow:
1625 case Builtin::BI__builtin_saddl_overflow:
1626 case Builtin::BI__builtin_saddll_overflow:
1627 arithKind = cir::BinOpOverflowKind::Add;
1628 break;
1629 case Builtin::BI__builtin_usub_overflow:
1630 case Builtin::BI__builtin_usubl_overflow:
1631 case Builtin::BI__builtin_usubll_overflow:
1632 case Builtin::BI__builtin_ssub_overflow:
1633 case Builtin::BI__builtin_ssubl_overflow:
1634 case Builtin::BI__builtin_ssubll_overflow:
1635 arithKind = cir::BinOpOverflowKind::Sub;
1636 break;
1637 case Builtin::BI__builtin_umul_overflow:
1638 case Builtin::BI__builtin_umull_overflow:
1639 case Builtin::BI__builtin_umulll_overflow:
1640 case Builtin::BI__builtin_smul_overflow:
1641 case Builtin::BI__builtin_smull_overflow:
1642 case Builtin::BI__builtin_smulll_overflow:
1643 arithKind = cir::BinOpOverflowKind::Mul;
1644 break;
1645 }
1646
1647 clang::QualType resultQTy =
1648 resultArg->getType()->castAs<clang::PointerType>()->getPointeeType();
1649 auto resultCIRTy = mlir::cast<cir::IntType>(cgm.convertType(resultQTy));
1650
1651 mlir::Location loc = getLoc(e->getSourceRange());
1652 cir::BinOpOverflowOp arithOp = cir::BinOpOverflowOp::create(
1653 builder, loc, resultCIRTy, arithKind, x, y);
1654
1655 bool isVolatile =
1656 resultArg->getType()->getPointeeType().isVolatileQualified();
1657 builder.createStore(loc, emitToMemory(arithOp.getResult(), resultQTy),
1658 resultPtr, isVolatile);
1659
1660 return RValue::get(arithOp.getOverflow());
1661 }
1662
1663 case Builtin::BIaddressof:
1664 case Builtin::BI__addressof:
1665 case Builtin::BI__builtin_addressof:
1666 return RValue::get(emitLValue(e->getArg(0)).getPointer());
1667 case Builtin::BI__builtin_function_start:
1668 return errorBuiltinNYI(*this, e, builtinID);
1669 case Builtin::BI__builtin_operator_new:
1671 e->getCallee()->getType()->castAs<FunctionProtoType>(), e, OO_New);
1672 case Builtin::BI__builtin_operator_delete:
1674 e->getCallee()->getType()->castAs<FunctionProtoType>(), e, OO_Delete);
1675 return RValue::get(nullptr);
1676 case Builtin::BI__builtin_is_aligned:
1677 case Builtin::BI__builtin_align_up:
1678 case Builtin::BI__builtin_align_down:
1679 case Builtin::BI__noop:
1680 case Builtin::BI__builtin_call_with_static_chain:
1681 case Builtin::BI_InterlockedExchange8:
1682 case Builtin::BI_InterlockedExchange16:
1683 case Builtin::BI_InterlockedExchange:
1684 case Builtin::BI_InterlockedExchangePointer:
1685 case Builtin::BI_InterlockedCompareExchangePointer:
1686 case Builtin::BI_InterlockedCompareExchangePointer_nf:
1687 case Builtin::BI_InterlockedCompareExchange8:
1688 case Builtin::BI_InterlockedCompareExchange16:
1689 case Builtin::BI_InterlockedCompareExchange:
1690 case Builtin::BI_InterlockedCompareExchange64:
1691 case Builtin::BI_InterlockedIncrement16:
1692 case Builtin::BI_InterlockedIncrement:
1693 case Builtin::BI_InterlockedDecrement16:
1694 case Builtin::BI_InterlockedDecrement:
1695 case Builtin::BI_InterlockedAnd8:
1696 case Builtin::BI_InterlockedAnd16:
1697 case Builtin::BI_InterlockedAnd:
1698 case Builtin::BI_InterlockedExchangeAdd8:
1699 case Builtin::BI_InterlockedExchangeAdd16:
1700 case Builtin::BI_InterlockedExchangeAdd:
1701 case Builtin::BI_InterlockedExchangeSub8:
1702 case Builtin::BI_InterlockedExchangeSub16:
1703 case Builtin::BI_InterlockedExchangeSub:
1704 case Builtin::BI_InterlockedOr8:
1705 case Builtin::BI_InterlockedOr16:
1706 case Builtin::BI_InterlockedOr:
1707 case Builtin::BI_InterlockedXor8:
1708 case Builtin::BI_InterlockedXor16:
1709 case Builtin::BI_InterlockedXor:
1710 case Builtin::BI_bittest64:
1711 case Builtin::BI_bittest:
1712 case Builtin::BI_bittestandcomplement64:
1713 case Builtin::BI_bittestandcomplement:
1714 case Builtin::BI_bittestandreset64:
1715 case Builtin::BI_bittestandreset:
1716 case Builtin::BI_bittestandset64:
1717 case Builtin::BI_bittestandset:
1718 case Builtin::BI_interlockedbittestandreset:
1719 case Builtin::BI_interlockedbittestandreset64:
1720 case Builtin::BI_interlockedbittestandreset64_acq:
1721 case Builtin::BI_interlockedbittestandreset64_rel:
1722 case Builtin::BI_interlockedbittestandreset64_nf:
1723 case Builtin::BI_interlockedbittestandset64:
1724 case Builtin::BI_interlockedbittestandset64_acq:
1725 case Builtin::BI_interlockedbittestandset64_rel:
1726 case Builtin::BI_interlockedbittestandset64_nf:
1727 case Builtin::BI_interlockedbittestandset:
1728 case Builtin::BI_interlockedbittestandset_acq:
1729 case Builtin::BI_interlockedbittestandset_rel:
1730 case Builtin::BI_interlockedbittestandset_nf:
1731 case Builtin::BI_interlockedbittestandreset_acq:
1732 case Builtin::BI_interlockedbittestandreset_rel:
1733 case Builtin::BI_interlockedbittestandreset_nf:
1734 case Builtin::BI__iso_volatile_load8:
1735 case Builtin::BI__iso_volatile_load16:
1736 case Builtin::BI__iso_volatile_load32:
1737 case Builtin::BI__iso_volatile_load64:
1738 case Builtin::BI__iso_volatile_store8:
1739 case Builtin::BI__iso_volatile_store16:
1740 case Builtin::BI__iso_volatile_store32:
1741 case Builtin::BI__iso_volatile_store64:
1742 case Builtin::BI__builtin_ptrauth_sign_constant:
1743 case Builtin::BI__builtin_ptrauth_auth:
1744 case Builtin::BI__builtin_ptrauth_auth_and_resign:
1745 case Builtin::BI__builtin_ptrauth_blend_discriminator:
1746 case Builtin::BI__builtin_ptrauth_sign_generic_data:
1747 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
1748 case Builtin::BI__builtin_ptrauth_strip:
1749 case Builtin::BI__builtin_get_vtable_pointer:
1750 case Builtin::BI__exception_code:
1751 case Builtin::BI_exception_code:
1752 case Builtin::BI__exception_info:
1753 case Builtin::BI_exception_info:
1754 case Builtin::BI__abnormal_termination:
1755 case Builtin::BI_abnormal_termination:
1756 case Builtin::BI_setjmpex:
1757 case Builtin::BI_setjmp:
1758 case Builtin::BImove:
1759 case Builtin::BImove_if_noexcept:
1760 case Builtin::BIforward:
1761 case Builtin::BIforward_like:
1762 case Builtin::BIas_const:
1763 case Builtin::BI__GetExceptionInfo:
1764 case Builtin::BI__fastfail:
1765 case Builtin::BIread_pipe:
1766 case Builtin::BIwrite_pipe:
1767 case Builtin::BIreserve_read_pipe:
1768 case Builtin::BIreserve_write_pipe:
1769 case Builtin::BIwork_group_reserve_read_pipe:
1770 case Builtin::BIwork_group_reserve_write_pipe:
1771 case Builtin::BIsub_group_reserve_read_pipe:
1772 case Builtin::BIsub_group_reserve_write_pipe:
1773 case Builtin::BIcommit_read_pipe:
1774 case Builtin::BIcommit_write_pipe:
1775 case Builtin::BIwork_group_commit_read_pipe:
1776 case Builtin::BIwork_group_commit_write_pipe:
1777 case Builtin::BIsub_group_commit_read_pipe:
1778 case Builtin::BIsub_group_commit_write_pipe:
1779 case Builtin::BIget_pipe_num_packets:
1780 case Builtin::BIget_pipe_max_packets:
1781 case Builtin::BIto_global:
1782 case Builtin::BIto_local:
1783 case Builtin::BIto_private:
1784 case Builtin::BIenqueue_kernel:
1785 case Builtin::BIget_kernel_work_group_size:
1786 case Builtin::BIget_kernel_preferred_work_group_size_multiple:
1787 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
1788 case Builtin::BIget_kernel_sub_group_count_for_ndrange:
1789 case Builtin::BI__builtin_store_half:
1790 case Builtin::BI__builtin_store_halff:
1791 case Builtin::BI__builtin_load_half:
1792 case Builtin::BI__builtin_load_halff:
1793 return errorBuiltinNYI(*this, e, builtinID);
1794 case Builtin::BI__builtin_printf:
1795 case Builtin::BIprintf:
1796 break;
1797 case Builtin::BI__builtin_canonicalize:
1798 case Builtin::BI__builtin_canonicalizef:
1799 case Builtin::BI__builtin_canonicalizef16:
1800 case Builtin::BI__builtin_canonicalizel:
1801 case Builtin::BI__builtin_thread_pointer:
1802 case Builtin::BI__builtin_os_log_format:
1803 case Builtin::BI__xray_customevent:
1804 case Builtin::BI__xray_typedevent:
1805 case Builtin::BI__builtin_ms_va_start:
1806 case Builtin::BI__builtin_ms_va_end:
1807 case Builtin::BI__builtin_ms_va_copy:
1808 case Builtin::BI__builtin_get_device_side_mangled_name:
1809 return errorBuiltinNYI(*this, e, builtinID);
1810 }
1811
1812 // If this is an alias for a lib function (e.g. __builtin_sin), emit
1813 // the call using the normal call path, but using the unmangled
1814 // version of the function name.
1815 if (getContext().BuiltinInfo.isLibFunction(builtinID))
1816 return emitLibraryCall(*this, fd, e,
1817 cgm.getBuiltinLibFunction(fd, builtinID));
1818
1819 // Some target-specific builtins can have aggregate return values, e.g.
1820 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
1821 // returnValue to be non-null, so that the target-specific emission code can
1822 // always just emit into it.
1824 if (evalKind == cir::TEK_Aggregate && returnValue.isNull()) {
1825 cgm.errorNYI(e->getSourceRange(), "aggregate return value from builtin");
1826 return getUndefRValue(e->getType());
1827 }
1828
1829 // Now see if we can emit a target-specific builtin.
1830 // FIXME: This is a temporary mechanism (double-optional semantics) that will
1831 // go away once everything is implemented:
1832 // 1. return `mlir::Value{}` for cases where we have issued the diagnostic.
1833 // 2. return `std::nullopt` in cases where we didn't issue a diagnostic
1834 // but also didn't handle the builtin.
1835 if (std::optional<mlir::Value> rst =
1836 emitTargetBuiltinExpr(builtinID, e, returnValue)) {
1837 mlir::Value v = rst.value();
1838 // CIR dialect operations may have no results, no values will be returned
1839 // even if it executes successfully.
1840 if (!v)
1841 return RValue::get(nullptr);
1842
1843 switch (evalKind) {
1844 case cir::TEK_Scalar:
1845 if (mlir::isa<cir::VoidType>(v.getType()))
1846 return RValue::get(nullptr);
1847 return RValue::get(v);
1848 case cir::TEK_Aggregate:
1849 cgm.errorNYI(e->getSourceRange(), "aggregate return value from builtin");
1850 return getUndefRValue(e->getType());
1851 case cir::TEK_Complex:
1852 llvm_unreachable("No current target builtin returns complex");
1853 }
1854 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
1855 }
1856
1857 cgm.errorNYI(e->getSourceRange(),
1858 std::string("unimplemented builtin call: ") +
1859 getContext().BuiltinInfo.getName(builtinID));
1860 return getUndefRValue(e->getType());
1861}
1862
1863static std::optional<mlir::Value>
1865 const CallExpr *e, ReturnValueSlot &returnValue,
1866 llvm::Triple::ArchType arch) {
1867 // When compiling in HipStdPar mode we have to be conservative in rejecting
1868 // target specific features in the FE, and defer the possible error to the
1869 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
1870 // referenced by an accelerator executable function, we emit an error.
1871 // Returning nullptr here leads to the builtin being handled in
1872 // EmitStdParUnsupportedBuiltin.
1873 if (cgf->getLangOpts().HIPStdPar && cgf->getLangOpts().CUDAIsDevice &&
1874 arch != cgf->getTarget().getTriple().getArch())
1875 return std::nullopt;
1876
1877 switch (arch) {
1878 case llvm::Triple::arm:
1879 case llvm::Triple::armeb:
1880 case llvm::Triple::thumb:
1881 case llvm::Triple::thumbeb:
1882 // These are actually NYI, but that will be reported by emitBuiltinExpr.
1883 // At this point, we don't even know that the builtin is target-specific.
1884 return std::nullopt;
1885 case llvm::Triple::aarch64:
1886 case llvm::Triple::aarch64_32:
1887 case llvm::Triple::aarch64_be:
1888 return cgf->emitAArch64BuiltinExpr(builtinID, e, returnValue, arch);
1889 case llvm::Triple::bpfeb:
1890 case llvm::Triple::bpfel:
1891 // These are actually NYI, but that will be reported by emitBuiltinExpr.
1892 // At this point, we don't even know that the builtin is target-specific.
1893 return std::nullopt;
1894
1895 case llvm::Triple::x86:
1896 case llvm::Triple::x86_64:
1897 return cgf->emitX86BuiltinExpr(builtinID, e);
1898
1899 case llvm::Triple::ppc:
1900 case llvm::Triple::ppcle:
1901 case llvm::Triple::ppc64:
1902 case llvm::Triple::ppc64le:
1903 case llvm::Triple::r600:
1904 case llvm::Triple::amdgcn:
1905 case llvm::Triple::systemz:
1906 case llvm::Triple::nvptx:
1907 case llvm::Triple::nvptx64:
1908 case llvm::Triple::wasm32:
1909 case llvm::Triple::wasm64:
1910 case llvm::Triple::hexagon:
1911 case llvm::Triple::riscv32:
1912 case llvm::Triple::riscv64:
1913 // These are actually NYI, but that will be reported by emitBuiltinExpr.
1914 // At this point, we don't even know that the builtin is target-specific.
1915 return std::nullopt;
1916 default:
1917 return std::nullopt;
1918 }
1919}
1920
1921std::optional<mlir::Value>
1924 if (getContext().BuiltinInfo.isAuxBuiltinID(builtinID)) {
1925 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
1927 this, getContext().BuiltinInfo.getAuxBuiltinID(builtinID), e,
1928 returnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
1929 }
1930
1931 return emitTargetArchBuiltinExpr(this, builtinID, e, returnValue,
1932 getTarget().getTriple().getArch());
1933}
1934
1936 const unsigned iceArguments, const unsigned idx, const Expr *argExpr) {
1937 mlir::Value arg = {};
1938 if ((iceArguments & (1 << idx)) == 0) {
1939 arg = emitScalarExpr(argExpr);
1940 } else {
1941 // If this is required to be a constant, constant fold it so that we
1942 // know that the generated intrinsic gets a ConstantInt.
1943 const std::optional<llvm::APSInt> result =
1945 assert(result && "Expected argument to be a constant");
1946 arg = builder.getConstInt(getLoc(argExpr->getSourceRange()), *result);
1947 }
1948 return arg;
1949}
1950
1951/// Given a builtin id for a function like "__builtin_fabsf", return a Function*
1952/// for "fabsf".
1954 unsigned builtinID) {
1955 assert(astContext.BuiltinInfo.isLibFunction(builtinID));
1956
1957 // Get the name, skip over the __builtin_ prefix (if necessary). We may have
1958 // to build this up so provide a small stack buffer to handle the vast
1959 // majority of names.
1961
1963 name = astContext.BuiltinInfo.getName(builtinID).substr(10);
1964
1965 GlobalDecl d(fd);
1966 mlir::Type type = convertType(fd->getType());
1967 return getOrCreateCIRFunction(name, type, d, /*forVTable=*/false);
1968}
1969
1971 mlir::Value argValue = evaluateExprAsBool(e);
1972 if (!sanOpts.has(SanitizerKind::Builtin))
1973 return argValue;
1974
1976 cgm.errorNYI(e->getSourceRange(),
1977 "emitCheckedArgForAssume: sanitizers are NYI");
1978 return {};
1979}
1980
1981void CIRGenFunction::emitVAStart(mlir::Value vaList, mlir::Value count) {
1982 // LLVM codegen casts to *i8, no real gain on doing this for CIRGen this
1983 // early, defer to LLVM lowering.
1984 cir::VAStartOp::create(builder, vaList.getLoc(), vaList, count);
1985}
1986
1987void CIRGenFunction::emitVAEnd(mlir::Value vaList) {
1988 cir::VAEndOp::create(builder, vaList.getLoc(), vaList);
1989}
1990
1991// FIXME(cir): This completely abstracts away the ABI with a generic CIR Op. By
1992// default this lowers to llvm.va_arg which is incomplete and not ABI-compliant
1993// on most targets so cir.va_arg will need some ABI handling in LoweringPrepare
1995 assert(!cir::MissingFeatures::msabi());
1996 assert(!cir::MissingFeatures::vlas());
1997 mlir::Location loc = cgm.getLoc(ve->getExprLoc());
1998 mlir::Type type = convertType(ve->getType());
1999 mlir::Value vaList = emitVAListRef(ve->getSubExpr()).getPointer();
2000 return cir::VAArgOp::create(builder, loc, type, vaList);
2001}
2002
2003mlir::Value CIRGenFunction::emitBuiltinObjectSize(const Expr *e, unsigned type,
2004 cir::IntType resType,
2005 mlir::Value emittedE,
2006 bool isDynamic) {
2008
2009 // LLVM can't handle type=3 appropriately, and __builtin_object_size shouldn't
2010 // evaluate e for side-effects. In either case, just like original LLVM
2011 // lowering, we shouldn't lower to `cir.objsize` but to a constant instead.
2012 if (type == 3 || (!emittedE && e->HasSideEffects(getContext())))
2013 return builder.getConstInt(getLoc(e->getSourceRange()), resType,
2014 (type & 2) ? 0 : -1);
2015
2016 mlir::Value ptr = emittedE ? emittedE : emitScalarExpr(e);
2017 assert(mlir::isa<cir::PointerType>(ptr.getType()) &&
2018 "Non-pointer passed to __builtin_object_size?");
2019
2021
2022 // Extract the min/max mode from type. CIR only supports type 0
2023 // (max, whole object) and type 2 (min, whole object), not type 1 or 3
2024 // (closest subobject variants).
2025 const bool min = ((type & 2) != 0);
2026 // For GCC compatibility, __builtin_object_size treats NULL as unknown size.
2027 auto op =
2028 cir::ObjSizeOp::create(builder, getLoc(e->getSourceRange()), resType, ptr,
2029 min, /*nullUnknown=*/true, isDynamic);
2030 return op.getResult();
2031}
2032
2034 const Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE,
2035 bool isDynamic) {
2036 uint64_t objectSize;
2037 if (!e->tryEvaluateObjectSize(objectSize, getContext(), type))
2038 return emitBuiltinObjectSize(e, type, resType, emittedE, isDynamic);
2039 return builder.getConstInt(getLoc(e->getSourceRange()), resType, objectSize);
2040}
static StringRef bytes(const std::vector< T, Allocator > &v)
Defines enum values for all the target-independent builtin functions.
static RValue emitUnaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static mlir::Value makeBinaryAtomicValue(CIRGenFunction &cgf, cir::AtomicFetchKind kind, const CallExpr *expr, mlir::Type *originalArgType, mlir::Value *emittedArgValue=nullptr, cir::MemOrder ordering=cir::MemOrder::SequentiallyConsistent)
Utility to insert an atomic instruction based on Intrinsic::ID and the expression node.
static std::optional< mlir::Value > emitTargetArchBuiltinExpr(CIRGenFunction *cgf, unsigned builtinID, const CallExpr *e, ReturnValueSlot &returnValue, llvm::Triple::ArchType arch)
static RValue emitUnaryFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static mlir::Value emitToInt(CIRGenFunction &cgf, mlir::Value v, QualType t, cir::IntType intType)
Emit the conversions required to turn the given value into an integer of the given size.
static RValue emitLibraryCall(CIRGenFunction &cgf, const FunctionDecl *fd, const CallExpr *e, mlir::Operation *calleeValue)
static WidthAndSignedness getIntegerWidthAndSignedness(const clang::ASTContext &astContext, const clang::QualType type)
static RValue emitBuiltinBitOp(CIRGenFunction &cgf, const CallExpr *e, bool poisonZero=false)
static void emitAtomicFenceOp(CIRGenFunction &cgf, const CallExpr *expr, cir::SyncScopeKind syncScope)
static Address checkAtomicAlignment(CIRGenFunction &cgf, const CallExpr *e)
static bool shouldCIREmitFPMathIntrinsic(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static RValue tryEmitFPMathIntrinsic(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static struct WidthAndSignedness EncompassingIntegerType(ArrayRef< struct WidthAndSignedness > types)
static RValue emitBinaryAtomicPost(CIRGenFunction &cgf, cir::AtomicFetchKind atomicOpkind, const CallExpr *e, cir::BinOpKind binopKind, bool invert=false)
static RValue emitBuiltinAlloca(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static RValue errorBuiltinNYI(CIRGenFunction &cgf, const CallExpr *e, unsigned builtinID)
static mlir::Value emitFromInt(CIRGenFunction &cgf, mlir::Value v, QualType t, mlir::Type resultType)
*collection of selector each with an associated kind and an ordered *collection of selectors A selector has a kind
Defines an enumeration for C++ overloaded operators.
static QualType getPointeeType(const MemRegion *R)
__DEVICE__ int min(int __a, int __b)
mlir::Value createIntToPtr(mlir::Value src, mlir::Type newTy)
mlir::Value createPtrToInt(mlir::Value src, mlir::Type newTy)
cir::PointerType getPointerTo(mlir::Type ty)
mlir::Value createIntCast(mlir::Value src, mlir::Type newTy)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
cir::PointerType getVoidPtrTy(clang::LangAS langAS=clang::LangAS::Default)
mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, mlir::Type type, llvm::StringRef name, mlir::IntegerAttr alignment, mlir::Value dynAllocSize)
llvm::TypeSize getTypeSizeInBits(mlir::Type ty) const
APSInt & getInt()
Definition APValue.h:489
bool isFloat() const
Definition APValue.h:468
bool isInt() const
Definition APValue.h:467
APFloat & getFloat()
Definition APValue.h:503
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
unsigned getIntWidth(QualType T) const
CanQualType VoidPtrTy
Builtin::Context & BuiltinInfo
Definition ASTContext.h:792
TypeInfo getTypeInfo(const Type *T) const
Get the size and alignment of the specified complete type in bits.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:909
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
uint64_t getCharWidth() const
Return the size of the character type, in bits.
bool shouldGenerateFPMathIntrinsic(unsigned BuiltinID, llvm::Triple Trip, std::optional< bool > ErrnoOverwritten, bool MathErrnoEnabled, bool HasOptNoneAttr, bool IsOptimizationEnabled) const
Determine whether we can generate LLVM intrinsics for the given builtin ID, based on whether it has s...
Definition Builtins.cpp:225
bool isLibFunction(unsigned ID) const
Return true if this is a builtin for a libc/libm function, with a "__builtin_" prefix (e....
Definition Builtins.h:309
std::string getName(unsigned ID) const
Return the identifier name for the specified builtin, e.g.
Definition Builtins.cpp:80
mlir::Value getPointer() const
Definition Address.h:95
mlir::Type getElementType() const
Definition Address.h:122
clang::CharUnits getAlignment() const
Definition Address.h:135
Address withAlignment(clang::CharUnits newAlignment) const
Return address with different alignment, but same pointer and element type.
Definition Address.h:86
mlir::Value emitRawPointer() const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:109
cir::IntType getSIntNTy(int n)
cir::PointerType getUInt8PtrTy()
cir::IntType getUIntNTy(int n)
static CIRGenCallee forDirect(mlir::Operation *funcPtr, const CIRGenCalleeInfo &abstractInfo=CIRGenCalleeInfo())
Definition CIRGenCall.h:92
mlir::Type convertType(clang::QualType t)
mlir::Value emitCheckedArgForAssume(const Expr *e)
Emits an argument for a call to a __builtin_assume.
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
const clang::LangOptions & getLangOpts() const
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
const TargetInfo & getTarget() const
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
const clang::Decl * curFuncDecl
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitVAStart(mlir::Value vaList, mlir::Value count)
Emits the start of a CIR variable-argument operation (cir.va_start)
mlir::Value evaluateOrEmitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
mlir::Value emitBuiltinObjectSize(const clang::Expr *e, unsigned type, cir::IntType resType, mlir::Value emittedE, bool isDynamic)
Returns a Value corresponding to the size of the given expression by emitting a cir....
std::optional< mlir::Value > emitTargetBuiltinExpr(unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot &returnValue)
clang::SanitizerSet sanOpts
Sanitizers enabled for this function.
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
Address returnValue
The temporary alloca to hold the return value.
std::optional< mlir::Value > emitX86BuiltinExpr(unsigned builtinID, const CallExpr *expr)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
std::optional< mlir::Value > emitAArch64BuiltinExpr(unsigned builtinID, const CallExpr *expr, ReturnValueSlot returnValue, llvm::Triple::ArchType arch)
void emitAtomicExprWithMemOrder(const Expr *memOrder, bool isStore, bool isLoad, bool isFence, llvm::function_ref< void(cir::MemOrder)> emitAtomicOp)
void emitVAEnd(mlir::Value vaList)
Emits the end of a CIR variable-argument operation (cir.va_start)
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
CIRGenBuilderTy & getBuilder()
mlir::MLIRContext & getMLIRContext()
mlir::Value emitAlignmentAssumption(mlir::Value ptrValue, QualType ty, SourceLocation loc, SourceLocation assumptionLoc, int64_t alignment, mlir::Value offsetValue=nullptr)
RValue emitNewOrDeleteBuiltinCall(const FunctionProtoType *type, const CallExpr *callExpr, OverloadedOperatorKind op)
clang::ASTContext & getContext() const
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
mlir::Value emitFromMemory(mlir::Value value, clang::QualType ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Address emitVAListRef(const Expr *e)
Build a "reference" to a va_list; this is either the address or the value of the expression,...
mlir::Value emitScalarOrConstFoldImmArg(unsigned iceArguments, unsigned idx, const Expr *argExpr)
mlir::Value emitVAArg(VAArgExpr *ve)
Generate code to get an argument from the passed in pointer and update it accordingly.
RValue emitRotate(const CallExpr *e, bool isRotateLeft)
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
clang::ASTContext & getASTContext() const
mlir::Type convertType(clang::QualType type)
clang::DiagnosticsEngine & getDiags() const
cir::FuncOp getBuiltinLibFunction(const FunctionDecl *fd, unsigned builtinID)
Given a builtin id for a function like "__builtin_fabsf", return a Function* for "fabsf".
const llvm::Triple & getTriple() const
cir::FuncOp getOrCreateCIRFunction(llvm::StringRef mangledName, mlir::Type funcType, clang::GlobalDecl gd, bool forVTable, bool dontDefer=false, bool isThunk=false, ForDefinition_t isForDefinition=NotForDefinition, mlir::ArrayAttr extraAttrs={})
const cir::CIRDataLayout getDataLayout() const
const clang::CodeGenOptions & getCodeGenOpts() const
mlir::Value getPointer() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
static RValue getComplex(mlir::Value v)
Definition CIRGenValue.h:91
bool isIgnored() const
Definition CIRGenValue.h:52
static RValue getIgnored()
Definition CIRGenValue.h:78
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:256
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2943
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3147
bool hasStoredFPFeatures() const
Definition Expr.h:3102
SourceLocation getBeginLoc() const
Definition Expr.h:3277
Expr * getCallee()
Definition Expr.h:3090
FPOptionsOverride getFPFeatures() const
Definition Expr.h:3242
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3134
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
FunctionDecl * getAsFunction() LLVM_READONLY
Returns the function itself, or the templated function if this is a function template.
Definition DeclBase.cpp:273
bool hasAttr() const
Definition DeclBase.h:577
Concrete class used by the front-end to report problems and issues.
Definition Diagnostic.h:232
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
bool EvaluateAsFloat(llvm::APFloat &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsFloat - Return true if this is a constant which we can fold and convert to a floating point...
std::optional< llvm::APSInt > getIntegerConstantExpr(const ASTContext &Ctx) const
isIntegerConstantExpr - Return the value if this expression is a valid integer constant expression.
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3669
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
bool tryEvaluateObjectSize(uint64_t &Result, ASTContext &Ctx, unsigned Type) const
If the current Expr is a pointer, this will try to statically determine the number of bytes available...
Represents difference between two FPOptions values.
Represents a function declaration or definition.
Definition Decl.h:2000
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5269
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3329
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8386
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8428
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
Exposes information about the current target.
Definition TargetInfo.h:226
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
unsigned getSuitableAlign() const
Return the alignment that is the largest alignment ever used for any scalar/SIMD data type on the tar...
Definition TargetInfo.h:747
bool isBlockPointerType() const
Definition TypeBase.h:8559
bool isPointerType() const
Definition TypeBase.h:8539
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9188
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
bool isIntegralOrEnumerationType() const
Determine whether this type is an integral or enumeration type.
Definition TypeBase.h:9019
bool isObjCObjectPointerType() const
Definition TypeBase.h:8714
bool isFloatingType() const
Definition Type.cpp:2305
bool isUnsignedIntegerType() const
Return true if this is an integer type that is unsigned, according to C99 6.2.5p6 [which returns true...
Definition Type.cpp:2254
Represents a call to the builtin function __builtin_va_arg.
Definition Expr.h:4957
const Expr * getSubExpr() const
Definition Expr.h:4973
QualType getType() const
Definition Decl.h:723
bool isMatchingAddressSpace(cir::TargetAddressSpaceAttr cirAS, clang::LangAS as)
Definition CIRTypes.cpp:944
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
U cast(CodeGen::Address addr)
Definition Address.h:327
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
static bool builtinCheckKind()
static bool asmLabelAttr()
static bool msvcBuiltins()
static bool cgFPOptionsRAII()
static bool builtinCallF128()
static bool fpConstraints()
static bool countedBySize()
static bool opCallImplicitObjectSizeArgs()
static bool fastMathFlags()
static bool builtinCall()
cir::PointerType allocaInt8PtrTy
void* in alloca address space
cir::TargetAddressSpaceAttr getCIRAllocaAddressSpace() const
cir::PointerType voidPtrTy
void* in address space 0
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool hasSideEffects() const
Return true if the evaluated expression has side effects.
Definition Expr.h:639
#define conj(__x)
Definition tgmath.h:1303