clang 23.0.0git
CGBuiltin.cpp
Go to the documentation of this file.
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGBuiltin.h"
14#include "ABIInfo.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGDebugInfo.h"
18#include "CGObjCRuntime.h"
19#include "CGOpenCLRuntime.h"
20#include "CGRecordLayout.h"
21#include "CGValue.h"
22#include "CodeGenFunction.h"
23#include "CodeGenModule.h"
24#include "ConstantEmitter.h"
25#include "PatternInit.h"
26#include "TargetInfo.h"
27#include "clang/AST/OSLog.h"
31#include "llvm/IR/InlineAsm.h"
32#include "llvm/IR/Instruction.h"
33#include "llvm/IR/Intrinsics.h"
34#include "llvm/IR/IntrinsicsX86.h"
35#include "llvm/IR/MatrixBuilder.h"
36#include "llvm/Support/ConvertUTF.h"
37#include "llvm/Support/ScopedPrinter.h"
38#include <optional>
39#include <utility>
40
41using namespace clang;
42using namespace CodeGen;
43using namespace llvm;
44
45/// Some builtins do not have library implementation on some targets and
46/// are instead emitted as LLVM IRs by some target builtin emitters.
47/// FIXME: Remove this when library support is added
48static bool shouldEmitBuiltinAsIR(unsigned BuiltinID,
49 const Builtin::Context &BI,
50 const CodeGenFunction &CGF) {
51 if (!CGF.CGM.getLangOpts().MathErrno &&
55 switch (BuiltinID) {
56 default:
57 return false;
58 case Builtin::BIlogbf:
59 case Builtin::BI__builtin_logbf:
60 case Builtin::BIlogb:
61 case Builtin::BI__builtin_logb:
62 case Builtin::BIscalbnf:
63 case Builtin::BI__builtin_scalbnf:
64 case Builtin::BIscalbn:
65 case Builtin::BI__builtin_scalbn:
66 return true;
67 }
68 }
69 return false;
70}
71
73 unsigned BuiltinID, const CallExpr *E,
74 ReturnValueSlot ReturnValue,
75 llvm::Triple::ArchType Arch) {
76 // When compiling in HipStdPar mode we have to be conservative in rejecting
77 // target specific features in the FE, and defer the possible error to the
78 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
79 // referenced by an accelerator executable function, we emit an error.
80 // Returning nullptr here leads to the builtin being handled in
81 // EmitStdParUnsupportedBuiltin.
82 if (CGF->getLangOpts().HIPStdPar && CGF->getLangOpts().CUDAIsDevice &&
83 Arch != CGF->getTarget().getTriple().getArch())
84 return nullptr;
85
86 switch (Arch) {
87 case llvm::Triple::arm:
88 case llvm::Triple::armeb:
89 case llvm::Triple::thumb:
90 case llvm::Triple::thumbeb:
91 return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch);
92 case llvm::Triple::aarch64:
93 case llvm::Triple::aarch64_32:
94 case llvm::Triple::aarch64_be:
95 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
96 case llvm::Triple::bpfeb:
97 case llvm::Triple::bpfel:
98 return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
99 case llvm::Triple::dxil:
100 return CGF->EmitDirectXBuiltinExpr(BuiltinID, E);
101 case llvm::Triple::x86:
102 case llvm::Triple::x86_64:
103 return CGF->EmitX86BuiltinExpr(BuiltinID, E);
104 case llvm::Triple::ppc:
105 case llvm::Triple::ppcle:
106 case llvm::Triple::ppc64:
107 case llvm::Triple::ppc64le:
108 return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
109 case llvm::Triple::r600:
110 case llvm::Triple::amdgcn:
111 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
112 case llvm::Triple::systemz:
113 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
114 case llvm::Triple::nvptx:
115 case llvm::Triple::nvptx64:
116 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
117 case llvm::Triple::wasm32:
118 case llvm::Triple::wasm64:
119 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
120 case llvm::Triple::hexagon:
121 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
122 case llvm::Triple::riscv32:
123 case llvm::Triple::riscv64:
124 case llvm::Triple::riscv32be:
125 case llvm::Triple::riscv64be:
126 return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue);
127 case llvm::Triple::spirv32:
128 case llvm::Triple::spirv64:
129 if (CGF->getTarget().getTriple().getOS() == llvm::Triple::OSType::AMDHSA)
130 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
131 [[fallthrough]];
132 case llvm::Triple::spirv:
133 return CGF->EmitSPIRVBuiltinExpr(BuiltinID, E);
134 default:
135 return nullptr;
136 }
137}
138
140 const CallExpr *E,
142 if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
143 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
145 this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
146 ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
147 }
148
149 return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue,
150 getTarget().getTriple().getArch());
151}
152
153static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
154 Align AlignmentInBytes) {
155 ConstantInt *Byte;
156 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
158 // Nothing to initialize.
159 return;
161 Byte = CGF.Builder.getInt8(0x00);
162 break;
164 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
165 Byte = llvm::dyn_cast<llvm::ConstantInt>(
166 initializationPatternFor(CGF.CGM, Int8));
167 break;
168 }
169 }
170 if (CGF.CGM.stopAutoInit())
171 return;
172 auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
173 I->addAnnotationMetadata("auto-init");
174}
175
176/// getBuiltinLibFunction - Given a builtin id for a function like
177/// "__builtin_fabsf", return a Function* for "fabsf".
179 unsigned BuiltinID) {
180 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
181
182 // Get the name, skip over the __builtin_ prefix (if necessary). We may have
183 // to build this up so provide a small stack buffer to handle the vast
184 // majority of names.
186 GlobalDecl D(FD);
187
188 // TODO: This list should be expanded or refactored after all GCC-compatible
189 // std libcall builtins are implemented.
190 static const SmallDenseMap<unsigned, StringRef, 64> F128Builtins{
191 {Builtin::BI__builtin___fprintf_chk, "__fprintf_chkieee128"},
192 {Builtin::BI__builtin___printf_chk, "__printf_chkieee128"},
193 {Builtin::BI__builtin___snprintf_chk, "__snprintf_chkieee128"},
194 {Builtin::BI__builtin___sprintf_chk, "__sprintf_chkieee128"},
195 {Builtin::BI__builtin___vfprintf_chk, "__vfprintf_chkieee128"},
196 {Builtin::BI__builtin___vprintf_chk, "__vprintf_chkieee128"},
197 {Builtin::BI__builtin___vsnprintf_chk, "__vsnprintf_chkieee128"},
198 {Builtin::BI__builtin___vsprintf_chk, "__vsprintf_chkieee128"},
199 {Builtin::BI__builtin_fprintf, "__fprintfieee128"},
200 {Builtin::BI__builtin_printf, "__printfieee128"},
201 {Builtin::BI__builtin_snprintf, "__snprintfieee128"},
202 {Builtin::BI__builtin_sprintf, "__sprintfieee128"},
203 {Builtin::BI__builtin_vfprintf, "__vfprintfieee128"},
204 {Builtin::BI__builtin_vprintf, "__vprintfieee128"},
205 {Builtin::BI__builtin_vsnprintf, "__vsnprintfieee128"},
206 {Builtin::BI__builtin_vsprintf, "__vsprintfieee128"},
207 {Builtin::BI__builtin_fscanf, "__fscanfieee128"},
208 {Builtin::BI__builtin_scanf, "__scanfieee128"},
209 {Builtin::BI__builtin_sscanf, "__sscanfieee128"},
210 {Builtin::BI__builtin_vfscanf, "__vfscanfieee128"},
211 {Builtin::BI__builtin_vscanf, "__vscanfieee128"},
212 {Builtin::BI__builtin_vsscanf, "__vsscanfieee128"},
213 {Builtin::BI__builtin_nexttowardf128, "__nexttowardieee128"},
214 };
215
216 // The AIX library functions frexpl, ldexpl, and modfl are for 128-bit
217 // IBM 'long double' (i.e. __ibm128). Map to the 'double' versions
218 // if it is 64-bit 'long double' mode.
219 static const SmallDenseMap<unsigned, StringRef, 4> AIXLongDouble64Builtins{
220 {Builtin::BI__builtin_frexpl, "frexp"},
221 {Builtin::BI__builtin_ldexpl, "ldexp"},
222 {Builtin::BI__builtin_modfl, "modf"},
223 };
224
225 // If the builtin has been declared explicitly with an assembler label,
226 // use the mangled name. This differs from the plain label on platforms
227 // that prefix labels.
228 if (FD->hasAttr<AsmLabelAttr>())
229 Name = getMangledName(D);
230 else {
231 // TODO: This mutation should also be applied to other targets other than
232 // PPC, after backend supports IEEE 128-bit style libcalls.
233 if (getTriple().isPPC64() &&
234 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad() &&
235 F128Builtins.contains(BuiltinID))
236 Name = F128Builtins.lookup(BuiltinID);
237 else if (getTriple().isOSAIX() &&
238 &getTarget().getLongDoubleFormat() ==
239 &llvm::APFloat::IEEEdouble() &&
240 AIXLongDouble64Builtins.contains(BuiltinID))
241 Name = AIXLongDouble64Builtins.lookup(BuiltinID);
242 else
243 Name = Context.BuiltinInfo.getName(BuiltinID).substr(10);
244 }
245
246 llvm::FunctionType *Ty =
247 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
248
249 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
250}
251
252/// Emit the conversions required to turn the given value into an
253/// integer of the given size.
254Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
255 QualType T, llvm::IntegerType *IntType) {
256 V = CGF.EmitToMemory(V, T);
257
258 if (V->getType()->isPointerTy())
259 return CGF.Builder.CreatePtrToInt(V, IntType);
260
261 assert(V->getType() == IntType);
262 return V;
263}
264
265Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
266 QualType T, llvm::Type *ResultType) {
267 V = CGF.EmitFromMemory(V, T);
268
269 if (ResultType->isPointerTy())
270 return CGF.Builder.CreateIntToPtr(V, ResultType);
271
272 assert(V->getType() == ResultType);
273 return V;
274}
275
277 ASTContext &Ctx = CGF.getContext();
278 Address Ptr = CGF.EmitPointerWithAlignment(E->getArg(0));
279 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
280 unsigned Bytes = Ptr.getElementType()->isPointerTy()
282 : DL.getTypeStoreSize(Ptr.getElementType());
283 unsigned Align = Ptr.getAlignment().getQuantity();
284 if (Align % Bytes != 0) {
285 DiagnosticsEngine &Diags = CGF.CGM.getDiags();
286 Diags.Report(E->getBeginLoc(), diag::warn_sync_op_misaligned);
287 // Force address to be at least naturally-aligned.
288 return Ptr.withAlignment(CharUnits::fromQuantity(Bytes));
289 }
290 return Ptr;
291}
292
293/// Utility to insert an atomic instruction based on Intrinsic::ID
294/// and the expression node.
296 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
297 AtomicOrdering Ordering) {
298
299 QualType T = E->getType();
300 assert(E->getArg(0)->getType()->isPointerType());
301 assert(CGF.getContext().hasSameUnqualifiedType(T,
302 E->getArg(0)->getType()->getPointeeType()));
303 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
304
305 Address DestAddr = CheckAtomicAlignment(CGF, E);
306
307 llvm::IntegerType *IntType = llvm::IntegerType::get(
308 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
309
310 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1));
311 llvm::Type *ValueType = Val->getType();
312 Val = EmitToInt(CGF, Val, T, IntType);
313
314 llvm::Value *Result =
315 CGF.Builder.CreateAtomicRMW(Kind, DestAddr, Val, Ordering);
316 return EmitFromInt(CGF, Result, T, ValueType);
317}
318
320 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
322
323 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
324 LValue LV = CGF.MakeAddrLValue(Addr, E->getArg(0)->getType());
325 LV.setNontemporal(true);
326 CGF.EmitStoreOfScalar(Val, LV, false);
327 return nullptr;
328}
329
332
333 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
334 LV.setNontemporal(true);
335 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
336}
337
339 llvm::AtomicRMWInst::BinOp Kind,
340 const CallExpr *E) {
341 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
342}
343
344/// Utility to insert an atomic instruction based Intrinsic::ID and
345/// the expression node, where the return value is the result of the
346/// operation.
348 llvm::AtomicRMWInst::BinOp Kind,
349 const CallExpr *E,
350 Instruction::BinaryOps Op,
351 bool Invert = false) {
352 QualType T = E->getType();
353 assert(E->getArg(0)->getType()->isPointerType());
354 assert(CGF.getContext().hasSameUnqualifiedType(T,
355 E->getArg(0)->getType()->getPointeeType()));
356 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
357
358 Address DestAddr = CheckAtomicAlignment(CGF, E);
359
360 llvm::IntegerType *IntType = llvm::IntegerType::get(
361 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
362
363 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1));
364 llvm::Type *ValueType = Val->getType();
365 Val = EmitToInt(CGF, Val, T, IntType);
366
367 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
368 Kind, DestAddr, Val, llvm::AtomicOrdering::SequentiallyConsistent);
369 Result = CGF.Builder.CreateBinOp(Op, Result, Val);
370 if (Invert)
371 Result =
372 CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
373 llvm::ConstantInt::getAllOnesValue(IntType));
374 Result = EmitFromInt(CGF, Result, T, ValueType);
375 return RValue::get(Result);
376}
377
378/// Utility to insert an atomic cmpxchg instruction.
379///
380/// @param CGF The current codegen function.
381/// @param E Builtin call expression to convert to cmpxchg.
382/// arg0 - address to operate on
383/// arg1 - value to compare with
384/// arg2 - new value
385/// @param ReturnBool Specifies whether to return success flag of
386/// cmpxchg result or the old value.
387///
388/// @returns result of cmpxchg, according to ReturnBool
389///
390/// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
391/// invoke the function EmitAtomicCmpXchgForMSIntrin.
393 bool ReturnBool,
394 llvm::AtomicOrdering SuccessOrdering,
395 llvm::AtomicOrdering FailureOrdering) {
396 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
397 Address DestAddr = CheckAtomicAlignment(CGF, E);
398
399 llvm::IntegerType *IntType = llvm::IntegerType::get(
400 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
401
402 Value *Cmp = CGF.EmitScalarExpr(E->getArg(1));
403 llvm::Type *ValueType = Cmp->getType();
404 Cmp = EmitToInt(CGF, Cmp, T, IntType);
405 Value *New = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
406
408 DestAddr, Cmp, New, SuccessOrdering, FailureOrdering);
409 if (ReturnBool)
410 // Extract boolean success flag and zext it to int.
411 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
412 CGF.ConvertType(E->getType()));
413 else
414 // Extract old value and emit it using the same type as compare value.
415 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
416 ValueType);
417}
418
419/// This function should be invoked to emit atomic cmpxchg for Microsoft's
420/// _InterlockedCompareExchange* intrinsics which have the following signature:
421/// T _InterlockedCompareExchange(T volatile *Destination,
422/// T Exchange,
423/// T Comparand);
424///
425/// Whereas the llvm 'cmpxchg' instruction has the following syntax:
426/// cmpxchg *Destination, Comparand, Exchange.
427/// So we need to swap Comparand and Exchange when invoking
428/// CreateAtomicCmpXchg. That is the reason we could not use the above utility
429/// function MakeAtomicCmpXchgValue since it expects the arguments to be
430/// already swapped.
431
432static
434 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
435 assert(E->getArg(0)->getType()->isPointerType());
437 E->getType(), E->getArg(0)->getType()->getPointeeType()));
438 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
439 E->getArg(1)->getType()));
440 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
441 E->getArg(2)->getType()));
442
443 Address DestAddr = CheckAtomicAlignment(CGF, E);
444
445 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
446 auto *RTy = Exchange->getType();
447
448 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
449
450 if (RTy->isPointerTy()) {
451 Exchange = CGF.Builder.CreatePtrToInt(Exchange, CGF.IntPtrTy);
452 Comparand = CGF.Builder.CreatePtrToInt(Comparand, CGF.IntPtrTy);
453 }
454
455 // For Release ordering, the failure ordering should be Monotonic.
456 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
457 AtomicOrdering::Monotonic :
458 SuccessOrdering;
459
460 // The atomic instruction is marked volatile for consistency with MSVC. This
461 // blocks the few atomics optimizations that LLVM has. If we want to optimize
462 // _Interlocked* operations in the future, we will have to remove the volatile
463 // marker.
464 auto *CmpXchg = CGF.Builder.CreateAtomicCmpXchg(
465 DestAddr, Comparand, Exchange, SuccessOrdering, FailureOrdering);
466 CmpXchg->setVolatile(true);
467
468 auto *Result = CGF.Builder.CreateExtractValue(CmpXchg, 0);
469 if (RTy->isPointerTy()) {
470 Result = CGF.Builder.CreateIntToPtr(Result, RTy);
471 }
472
473 return Result;
474}
475
476// 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
477// prototyped like this:
478//
479// unsigned char _InterlockedCompareExchange128...(
480// __int64 volatile * _Destination,
481// __int64 _ExchangeHigh,
482// __int64 _ExchangeLow,
483// __int64 * _ComparandResult);
484//
485// Note that Destination is assumed to be at least 16-byte aligned, despite
486// being typed int64.
487
489 const CallExpr *E,
490 AtomicOrdering SuccessOrdering) {
491 assert(E->getNumArgs() == 4);
492 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
493 llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1));
494 llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2));
495 Address ComparandAddr = CGF.EmitPointerWithAlignment(E->getArg(3));
496
497 assert(DestPtr->getType()->isPointerTy());
498 assert(!ExchangeHigh->getType()->isPointerTy());
499 assert(!ExchangeLow->getType()->isPointerTy());
500
501 // For Release ordering, the failure ordering should be Monotonic.
502 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
503 ? AtomicOrdering::Monotonic
504 : SuccessOrdering;
505
506 // Convert to i128 pointers and values. Alignment is also overridden for
507 // destination pointer.
508 llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128);
509 Address DestAddr(DestPtr, Int128Ty,
511 ComparandAddr = ComparandAddr.withElementType(Int128Ty);
512
513 // (((i128)hi) << 64) | ((i128)lo)
514 ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
515 ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty);
516 ExchangeHigh =
517 CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64));
518 llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow);
519
520 // Load the comparand for the instruction.
521 llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandAddr);
522
523 auto *CXI = CGF.Builder.CreateAtomicCmpXchg(DestAddr, Comparand, Exchange,
524 SuccessOrdering, FailureOrdering);
525
526 // The atomic instruction is marked volatile for consistency with MSVC. This
527 // blocks the few atomics optimizations that LLVM has. If we want to optimize
528 // _Interlocked* operations in the future, we will have to remove the volatile
529 // marker.
530 CXI->setVolatile(true);
531
532 // Store the result as an outparameter.
533 CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0),
534 ComparandAddr);
535
536 // Get the success boolean and zero extend it to i8.
537 Value *Success = CGF.Builder.CreateExtractValue(CXI, 1);
538 return CGF.Builder.CreateZExt(Success, CGF.Int8Ty);
539}
540
542 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
543 assert(E->getArg(0)->getType()->isPointerType());
544
545 auto *IntTy = CGF.ConvertType(E->getType());
546 Address DestAddr = CheckAtomicAlignment(CGF, E);
547 auto *Result = CGF.Builder.CreateAtomicRMW(
548 AtomicRMWInst::Add, DestAddr, ConstantInt::get(IntTy, 1), Ordering);
549 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
550}
551
553 CodeGenFunction &CGF, const CallExpr *E,
554 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
555 assert(E->getArg(0)->getType()->isPointerType());
556
557 auto *IntTy = CGF.ConvertType(E->getType());
558 Address DestAddr = CheckAtomicAlignment(CGF, E);
559 auto *Result = CGF.Builder.CreateAtomicRMW(
560 AtomicRMWInst::Sub, DestAddr, ConstantInt::get(IntTy, 1), Ordering);
561 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
562}
563
564// Build a plain volatile load.
566 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
567 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
568 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
569 llvm::Type *ITy =
570 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
571 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize);
572 Load->setAtomic(llvm::AtomicOrdering::Monotonic);
573 Load->setVolatile(true);
574 return Load;
575}
576
577// Build a plain volatile store.
579 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
580 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
581 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
582 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
583 llvm::StoreInst *Store =
584 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
585 Store->setAtomic(llvm::AtomicOrdering::Monotonic);
586 Store->setVolatile(true);
587 return Store;
588}
589
590// Emit a simple mangled intrinsic that has 1 argument and a return type
591// matching the argument type. Depending on mode, this may be a constrained
592// floating-point intrinsic.
594 const CallExpr *E, unsigned IntrinsicID,
595 unsigned ConstrainedIntrinsicID) {
596 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
597
598 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
599 if (CGF.Builder.getIsFPConstrained()) {
600 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
601 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
602 } else {
603 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
604 return CGF.Builder.CreateCall(F, Src0);
605 }
606}
607
608// Emit an intrinsic that has 2 operands of the same type as its result.
609// Depending on mode, this may be a constrained floating-point intrinsic.
611 const CallExpr *E, unsigned IntrinsicID,
612 unsigned ConstrainedIntrinsicID) {
613 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
614 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
615
616 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
617 if (CGF.Builder.getIsFPConstrained()) {
618 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
619 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
620 } else {
621 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
622 return CGF.Builder.CreateCall(F, { Src0, Src1 });
623 }
624}
625
626// Has second type mangled argument.
627static Value *
629 Intrinsic::ID IntrinsicID,
630 Intrinsic::ID ConstrainedIntrinsicID) {
631 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
632 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
633
634 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
635 if (CGF.Builder.getIsFPConstrained()) {
636 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
637 {Src0->getType(), Src1->getType()});
638 return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1});
639 }
640
641 Function *F =
642 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), Src1->getType()});
643 return CGF.Builder.CreateCall(F, {Src0, Src1});
644}
645
646// Emit an intrinsic that has 3 operands of the same type as its result.
647// Depending on mode, this may be a constrained floating-point intrinsic.
649 const CallExpr *E, unsigned IntrinsicID,
650 unsigned ConstrainedIntrinsicID) {
651 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
652 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
653 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
654
655 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
656 if (CGF.Builder.getIsFPConstrained()) {
657 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
658 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
659 } else {
660 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
661 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
662 }
663}
664
665// Emit an intrinsic that has overloaded integer result and fp operand.
666static Value *
668 unsigned IntrinsicID,
669 unsigned ConstrainedIntrinsicID) {
670 llvm::Type *ResultType = CGF.ConvertType(E->getType());
671 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
672
673 if (CGF.Builder.getIsFPConstrained()) {
674 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
675 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
676 {ResultType, Src0->getType()});
677 return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
678 } else {
679 Function *F =
680 CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
681 return CGF.Builder.CreateCall(F, Src0);
682 }
683}
684
686 Intrinsic::ID IntrinsicID) {
687 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
688 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
689
690 QualType IntPtrTy = E->getArg(1)->getType()->getPointeeType();
691 llvm::Type *IntTy = CGF.ConvertType(IntPtrTy);
692 llvm::Function *F =
693 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), IntTy});
694 llvm::Value *Call = CGF.Builder.CreateCall(F, Src0);
695
696 llvm::Value *Exp = CGF.Builder.CreateExtractValue(Call, 1);
697 LValue LV = CGF.MakeNaturalAlignAddrLValue(Src1, IntPtrTy);
698 CGF.EmitStoreOfScalar(Exp, LV);
699
700 return CGF.Builder.CreateExtractValue(Call, 0);
701}
702
703static void emitSincosBuiltin(CodeGenFunction &CGF, const CallExpr *E,
704 Intrinsic::ID IntrinsicID) {
705 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(0));
706 llvm::Value *Dest0 = CGF.EmitScalarExpr(E->getArg(1));
707 llvm::Value *Dest1 = CGF.EmitScalarExpr(E->getArg(2));
708
709 llvm::Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {Val->getType()});
710 llvm::Value *Call = CGF.Builder.CreateCall(F, Val);
711
712 llvm::Value *SinResult = CGF.Builder.CreateExtractValue(Call, 0);
713 llvm::Value *CosResult = CGF.Builder.CreateExtractValue(Call, 1);
714
715 QualType DestPtrType = E->getArg(1)->getType()->getPointeeType();
716 LValue SinLV = CGF.MakeNaturalAlignAddrLValue(Dest0, DestPtrType);
717 LValue CosLV = CGF.MakeNaturalAlignAddrLValue(Dest1, DestPtrType);
718
719 llvm::StoreInst *StoreSin =
720 CGF.Builder.CreateStore(SinResult, SinLV.getAddress());
721 llvm::StoreInst *StoreCos =
722 CGF.Builder.CreateStore(CosResult, CosLV.getAddress());
723
724 // Mark the two stores as non-aliasing with each other. The order of stores
725 // emitted by this builtin is arbitrary, enforcing a particular order will
726 // prevent optimizations later on.
727 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
728 MDNode *Domain = MDHelper.createAnonymousAliasScopeDomain();
729 MDNode *AliasScope = MDHelper.createAnonymousAliasScope(Domain);
730 MDNode *AliasScopeList = MDNode::get(Call->getContext(), AliasScope);
731 StoreSin->setMetadata(LLVMContext::MD_alias_scope, AliasScopeList);
732 StoreCos->setMetadata(LLVMContext::MD_noalias, AliasScopeList);
733}
734
735static llvm::Value *emitModfBuiltin(CodeGenFunction &CGF, const CallExpr *E,
736 Intrinsic::ID IntrinsicID) {
737 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(0));
738 llvm::Value *IntPartDest = CGF.EmitScalarExpr(E->getArg(1));
739
740 llvm::Value *Call =
741 CGF.Builder.CreateIntrinsic(IntrinsicID, {Val->getType()}, Val);
742
743 llvm::Value *FractionalResult = CGF.Builder.CreateExtractValue(Call, 0);
744 llvm::Value *IntegralResult = CGF.Builder.CreateExtractValue(Call, 1);
745
746 QualType DestPtrType = E->getArg(1)->getType()->getPointeeType();
747 LValue IntegralLV = CGF.MakeNaturalAlignAddrLValue(IntPartDest, DestPtrType);
748 CGF.EmitStoreOfScalar(IntegralResult, IntegralLV);
749
750 return FractionalResult;
751}
752
753/// EmitFAbs - Emit a call to @llvm.fabs().
755 llvm::CallInst *Call = CGF.Builder.CreateFAbs(V);
756 Call->setDoesNotAccessMemory();
757 return Call;
758}
759
760/// Emit the computation of the sign bit for a floating point value. Returns
761/// the i1 sign bit value.
763 LLVMContext &C = CGF.CGM.getLLVMContext();
764
765 llvm::Type *Ty = V->getType();
766 int Width = Ty->getPrimitiveSizeInBits();
767 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
768 V = CGF.Builder.CreateBitCast(V, IntTy);
769 if (Ty->isPPC_FP128Ty()) {
770 // We want the sign bit of the higher-order double. The bitcast we just
771 // did works as if the double-double was stored to memory and then
772 // read as an i128. The "store" will put the higher-order double in the
773 // lower address in both little- and big-Endian modes, but the "load"
774 // will treat those bits as a different part of the i128: the low bits in
775 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
776 // we need to shift the high bits down to the low before truncating.
777 Width >>= 1;
778 if (CGF.getTarget().isBigEndian()) {
779 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
780 V = CGF.Builder.CreateLShr(V, ShiftCst);
781 }
782 // We are truncating value in order to extract the higher-order
783 // double, which we will be using to extract the sign from.
784 IntTy = llvm::IntegerType::get(C, Width);
785 V = CGF.Builder.CreateTrunc(V, IntTy);
786 }
787 Value *Zero = llvm::Constant::getNullValue(IntTy);
788 return CGF.Builder.CreateICmpSLT(V, Zero);
789}
790
791/// Checks no arguments or results are passed indirectly in the ABI (i.e. via a
792/// hidden pointer). This is used to check annotating FP libcalls (that could
793/// set `errno`) with "int" TBAA metadata is safe. If any floating-point
794/// arguments are passed indirectly, setup for the call could be incorrectly
795/// optimized out.
797 auto IsIndirect = [&](ABIArgInfo const &info) {
798 return info.isIndirect() || info.isIndirectAliased() || info.isInAlloca();
799 };
800 return !IsIndirect(FnInfo.getReturnInfo()) &&
801 llvm::none_of(FnInfo.arguments(),
802 [&](CGFunctionInfoArgInfo const &ArgInfo) {
803 return IsIndirect(ArgInfo.info);
804 });
805}
806
808 const CallExpr *E, llvm::Constant *calleeValue) {
809 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
810 CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
811 llvm::CallBase *callOrInvoke = nullptr;
812 CGFunctionInfo const *FnInfo = nullptr;
813 RValue Call =
814 CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot(),
815 /*Chain=*/nullptr, &callOrInvoke, &FnInfo);
816
817 if (unsigned BuiltinID = FD->getBuiltinID()) {
818 // Check whether a FP math builtin function, such as BI__builtin_expf
819 ASTContext &Context = CGF.getContext();
820 bool ConstWithoutErrnoAndExceptions =
822 // Restrict to target with errno, for example, MacOS doesn't set errno.
823 // TODO: Support builtin function with complex type returned, eg: cacosh
824 if (ConstWithoutErrnoAndExceptions && CGF.CGM.getLangOpts().MathErrno &&
825 !CGF.Builder.getIsFPConstrained() && Call.isScalar() &&
827 // Emit "int" TBAA metadata on FP math libcalls.
828 clang::QualType IntTy = Context.IntTy;
829 TBAAAccessInfo TBAAInfo = CGF.CGM.getTBAAAccessInfo(IntTy);
830 CGF.CGM.DecorateInstructionWithTBAA(callOrInvoke, TBAAInfo);
831 }
832 }
833 return Call;
834}
835
836/// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
837/// depending on IntrinsicID.
838///
839/// \arg CGF The current codegen function.
840/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
841/// \arg X The first argument to the llvm.*.with.overflow.*.
842/// \arg Y The second argument to the llvm.*.with.overflow.*.
843/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
844/// \returns The result (i.e. sum/product) returned by the intrinsic.
846 const Intrinsic::ID IntrinsicID,
847 llvm::Value *X, llvm::Value *Y,
848 llvm::Value *&Carry) {
849 // Make sure we have integers of the same width.
850 assert(X->getType() == Y->getType() &&
851 "Arguments must be the same type. (Did you forget to make sure both "
852 "arguments have the same integer width?)");
853
854 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
855 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
856 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
857 return CGF.Builder.CreateExtractValue(Tmp, 0);
858}
859
860namespace {
861 struct WidthAndSignedness {
862 unsigned Width;
863 bool Signed;
864 };
865}
866
867static WidthAndSignedness
869 const clang::QualType Type) {
870 assert(Type->isIntegerType() && "Given type is not an integer.");
871 unsigned Width = context.getIntWidth(Type);
873 return {Width, Signed};
874}
875
876// Given one or more integer types, this function produces an integer type that
877// encompasses them: any value in one of the given types could be expressed in
878// the encompassing type.
879static struct WidthAndSignedness
880EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
881 assert(Types.size() > 0 && "Empty list of types.");
882
883 // If any of the given types is signed, we must return a signed type.
884 bool Signed = false;
885 for (const auto &Type : Types) {
886 Signed |= Type.Signed;
887 }
888
889 // The encompassing type must have a width greater than or equal to the width
890 // of the specified types. Additionally, if the encompassing type is signed,
891 // its width must be strictly greater than the width of any unsigned types
892 // given.
893 unsigned Width = 0;
894 for (const auto &Type : Types) {
895 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
896 if (Width < MinWidth) {
897 Width = MinWidth;
898 }
899 }
900
901 return {Width, Signed};
902}
903
904Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
905 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
906 return Builder.CreateCall(CGM.getIntrinsic(inst, {ArgValue->getType()}),
907 ArgValue);
908}
909
910/// Checks if using the result of __builtin_object_size(p, @p From) in place of
911/// __builtin_object_size(p, @p To) is correct
912static bool areBOSTypesCompatible(int From, int To) {
913 // Note: Our __builtin_object_size implementation currently treats Type=0 and
914 // Type=2 identically. Encoding this implementation detail here may make
915 // improving __builtin_object_size difficult in the future, so it's omitted.
916 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
917}
918
919static llvm::Value *
920getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
921 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
922}
923
924llvm::Value *
925CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
926 llvm::IntegerType *ResType,
927 llvm::Value *EmittedE,
928 bool IsDynamic) {
929 if (std::optional<uint64_t> ObjectSize =
931 return ConstantInt::get(ResType, *ObjectSize, /*isSigned=*/true);
932 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
933}
934
935namespace {
936
937/// StructFieldAccess is a simple visitor class to grab the first MemberExpr
938/// from an Expr. It records any ArraySubscriptExpr we meet along the way.
939class StructFieldAccess
940 : public ConstStmtVisitor<StructFieldAccess, const Expr *> {
941 bool AddrOfSeen = false;
942
943public:
944 const Expr *ArrayIndex = nullptr;
945 QualType ArrayElementTy;
946
947 const Expr *VisitMemberExpr(const MemberExpr *E) {
948 if (AddrOfSeen && E->getType()->isArrayType())
949 // Avoid forms like '&ptr->array'.
950 return nullptr;
951 return E;
952 }
953
954 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
955 if (ArrayIndex)
956 // We don't support multiple subscripts.
957 return nullptr;
958
959 AddrOfSeen = false; // '&ptr->array[idx]' is okay.
960 ArrayIndex = E->getIdx();
961 ArrayElementTy = E->getBase()->getType();
962 return Visit(E->getBase());
963 }
964 const Expr *VisitCastExpr(const CastExpr *E) {
965 if (E->getCastKind() == CK_LValueToRValue)
966 return E;
967 return Visit(E->getSubExpr());
968 }
969 const Expr *VisitParenExpr(const ParenExpr *E) {
970 return Visit(E->getSubExpr());
971 }
972 const Expr *VisitUnaryAddrOf(const clang::UnaryOperator *E) {
973 AddrOfSeen = true;
974 return Visit(E->getSubExpr());
975 }
976 const Expr *VisitUnaryDeref(const clang::UnaryOperator *E) {
977 AddrOfSeen = false;
978 return Visit(E->getSubExpr());
979 }
980 const Expr *VisitBinaryOperator(const clang::BinaryOperator *Op) {
981 return Op->isCommaOp() ? Visit(Op->getRHS()) : nullptr;
982 }
983};
984
985} // end anonymous namespace
986
987/// Find a struct's flexible array member. It may be embedded inside multiple
988/// sub-structs, but must still be the last field.
990 ASTContext &Ctx,
991 const RecordDecl *RD) {
992 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
993 CGF.getLangOpts().getStrictFlexArraysLevel();
994
995 if (RD->isImplicit())
996 return nullptr;
997
998 for (const FieldDecl *FD : RD->fields()) {
1000 Ctx, FD, FD->getType(), StrictFlexArraysLevel,
1001 /*IgnoreTemplateOrMacroSubstitution=*/true))
1002 return FD;
1003
1004 if (const auto *RD = FD->getType()->getAsRecordDecl())
1005 if (const FieldDecl *FD = FindFlexibleArrayMemberField(CGF, Ctx, RD))
1006 return FD;
1007 }
1008
1009 return nullptr;
1010}
1011
1012/// Calculate the offset of a struct field. It may be embedded inside multiple
1013/// sub-structs.
1014static bool GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD,
1015 const FieldDecl *FD, int64_t &Offset) {
1016 if (RD->isImplicit())
1017 return false;
1018
1019 // Keep track of the field number ourselves, because the other methods
1020 // (CGRecordLayout::getLLVMFieldNo) aren't always equivalent to how the AST
1021 // is laid out.
1022 uint32_t FieldNo = 0;
1023 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
1024
1025 for (const FieldDecl *Field : RD->fields()) {
1026 if (Field == FD) {
1027 Offset += Layout.getFieldOffset(FieldNo);
1028 return true;
1029 }
1030
1031 if (const auto *RD = Field->getType()->getAsRecordDecl()) {
1032 if (GetFieldOffset(Ctx, RD, FD, Offset)) {
1033 Offset += Layout.getFieldOffset(FieldNo);
1034 return true;
1035 }
1036 }
1037
1038 if (!RD->isUnion())
1039 ++FieldNo;
1040 }
1041
1042 return false;
1043}
1044
1045static std::optional<int64_t>
1046GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FD) {
1047 int64_t Offset = 0;
1048
1049 if (GetFieldOffset(Ctx, RD, FD, Offset))
1050 return std::optional<int64_t>(Offset);
1051
1052 return std::nullopt;
1053}
1054
1055llvm::Value *CodeGenFunction::emitCountedBySize(const Expr *E,
1056 llvm::Value *EmittedE,
1057 unsigned Type,
1058 llvm::IntegerType *ResType) {
1059 // Note: If the whole struct is specificed in the __bdos (i.e. Visitor
1060 // returns a DeclRefExpr). The calculation of the whole size of the structure
1061 // with a flexible array member can be done in two ways:
1062 //
1063 // 1) sizeof(struct S) + count * sizeof(typeof(fam))
1064 // 2) offsetof(struct S, fam) + count * sizeof(typeof(fam))
1065 //
1066 // The first will add additional padding after the end of the array
1067 // allocation while the second method is more precise, but not quite expected
1068 // from programmers. See
1069 // https://lore.kernel.org/lkml/ZvV6X5FPBBW7CO1f@archlinux/ for a discussion
1070 // of the topic.
1071 //
1072 // GCC isn't (currently) able to calculate __bdos on a pointer to the whole
1073 // structure. Therefore, because of the above issue, we choose to match what
1074 // GCC does for consistency's sake.
1075
1076 StructFieldAccess Visitor;
1077 E = Visitor.Visit(E);
1078 if (!E)
1079 return nullptr;
1080
1081 const Expr *Idx = Visitor.ArrayIndex;
1082 if (Idx) {
1083 if (Idx->HasSideEffects(getContext()))
1084 // We can't have side-effects.
1085 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1086
1087 if (const auto *IL = dyn_cast<IntegerLiteral>(Idx)) {
1088 int64_t Val = IL->getValue().getSExtValue();
1089 if (Val < 0)
1090 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1091
1092 // The index is 0, so we don't need to take it into account.
1093 if (Val == 0)
1094 Idx = nullptr;
1095 }
1096 }
1097
1098 // __counted_by on either a flexible array member or a pointer into a struct
1099 // with a flexible array member.
1100 if (const auto *ME = dyn_cast<MemberExpr>(E))
1101 return emitCountedByMemberSize(ME, Idx, EmittedE, Visitor.ArrayElementTy,
1102 Type, ResType);
1103
1104 // __counted_by on a pointer in a struct.
1105 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E);
1106 ICE && ICE->getCastKind() == CK_LValueToRValue)
1107 return emitCountedByPointerSize(ICE, Idx, EmittedE, Visitor.ArrayElementTy,
1108 Type, ResType);
1109
1110 return nullptr;
1111}
1112
1114 llvm::Value *Res,
1115 llvm::Value *Index,
1116 llvm::IntegerType *ResType,
1117 bool IsSigned) {
1118 // cmp = (array_size >= 0)
1119 Value *Cmp = CGF.Builder.CreateIsNotNeg(Res);
1120 if (Index)
1121 // cmp = (cmp && index >= 0)
1122 Cmp = CGF.Builder.CreateAnd(CGF.Builder.CreateIsNotNeg(Index), Cmp);
1123
1124 // return cmp ? result : 0
1125 return CGF.Builder.CreateSelect(Cmp, Res,
1126 ConstantInt::get(ResType, 0, IsSigned));
1127}
1128
1129static std::pair<llvm::Value *, llvm::Value *>
1131 const FieldDecl *ArrayFD, const FieldDecl *CountFD,
1132 const Expr *Idx, llvm::IntegerType *ResType,
1133 bool IsSigned) {
1134 // count = ptr->count;
1135 Value *Count = CGF.EmitLoadOfCountedByField(ME, ArrayFD, CountFD);
1136 if (!Count)
1137 return std::make_pair<Value *>(nullptr, nullptr);
1138 Count = CGF.Builder.CreateIntCast(Count, ResType, IsSigned, "count");
1139
1140 // index = ptr->index;
1141 Value *Index = nullptr;
1142 if (Idx) {
1143 bool IdxSigned = Idx->getType()->isSignedIntegerType();
1144 Index = CGF.EmitScalarExpr(Idx);
1145 Index = CGF.Builder.CreateIntCast(Index, ResType, IdxSigned, "index");
1146 }
1147
1148 return std::make_pair(Count, Index);
1149}
1150
1151llvm::Value *CodeGenFunction::emitCountedByPointerSize(
1152 const ImplicitCastExpr *E, const Expr *Idx, llvm::Value *EmittedE,
1153 QualType CastedArrayElementTy, unsigned Type, llvm::IntegerType *ResType) {
1154 assert(E->getCastKind() == CK_LValueToRValue &&
1155 "must be an LValue to RValue cast");
1156
1157 const MemberExpr *ME =
1158 dyn_cast<MemberExpr>(E->getSubExpr()->IgnoreParenNoopCasts(getContext()));
1159 if (!ME)
1160 return nullptr;
1161
1162 const auto *ArrayBaseFD = dyn_cast<FieldDecl>(ME->getMemberDecl());
1163 if (!ArrayBaseFD || !ArrayBaseFD->getType()->isPointerType() ||
1164 !ArrayBaseFD->getType()->isCountAttributedType())
1165 return nullptr;
1166
1167 // Get the 'count' FieldDecl.
1168 const FieldDecl *CountFD = ArrayBaseFD->findCountedByField();
1169 if (!CountFD)
1170 // Can't find the field referenced by the "counted_by" attribute.
1171 return nullptr;
1172
1173 // Calculate the array's object size using these formulae. (Note: if the
1174 // calculation is negative, we return 0.):
1175 //
1176 // struct p;
1177 // struct s {
1178 // /* ... */
1179 // struct p **array __attribute__((counted_by(count)));
1180 // int count;
1181 // };
1182 //
1183 // 1) 'ptr->array':
1184 //
1185 // count = ptr->count;
1186 //
1187 // array_element_size = sizeof (*ptr->array);
1188 // array_size = count * array_element_size;
1189 //
1190 // result = array_size;
1191 //
1192 // cmp = (result >= 0)
1193 // return cmp ? result : 0;
1194 //
1195 // 2) '&((cast) ptr->array)[idx]':
1196 //
1197 // count = ptr->count;
1198 // index = idx;
1199 //
1200 // array_element_size = sizeof (*ptr->array);
1201 // array_size = count * array_element_size;
1202 //
1203 // casted_array_element_size = sizeof (*((cast) ptr->array));
1204 //
1205 // index_size = index * casted_array_element_size;
1206 // result = array_size - index_size;
1207 //
1208 // cmp = (result >= 0)
1209 // if (index)
1210 // cmp = (cmp && index > 0)
1211 // return cmp ? result : 0;
1212
1213 auto GetElementBaseSize = [&](QualType ElementTy) {
1214 CharUnits ElementSize =
1215 getContext().getTypeSizeInChars(ElementTy->getPointeeType());
1216
1217 if (ElementSize.isZero()) {
1218 // This might be a __sized_by (or __counted_by) on a
1219 // 'void *', which counts bytes, not elements.
1220 [[maybe_unused]] auto *CAT = ElementTy->getAs<CountAttributedType>();
1221 assert(CAT && "must have an CountAttributedType");
1222
1223 ElementSize = CharUnits::One();
1224 }
1225
1226 return std::optional<CharUnits>(ElementSize);
1227 };
1228
1229 // Get the sizes of the original array element and the casted array element,
1230 // if different.
1231 std::optional<CharUnits> ArrayElementBaseSize =
1232 GetElementBaseSize(ArrayBaseFD->getType());
1233 if (!ArrayElementBaseSize)
1234 return nullptr;
1235
1236 std::optional<CharUnits> CastedArrayElementBaseSize = ArrayElementBaseSize;
1237 if (!CastedArrayElementTy.isNull() && CastedArrayElementTy->isPointerType()) {
1238 CastedArrayElementBaseSize = GetElementBaseSize(CastedArrayElementTy);
1239 if (!CastedArrayElementBaseSize)
1240 return nullptr;
1241 }
1242
1243 bool IsSigned = CountFD->getType()->isSignedIntegerType();
1244
1245 // count = ptr->count;
1246 // index = ptr->index;
1247 Value *Count, *Index;
1248 std::tie(Count, Index) = GetCountFieldAndIndex(
1249 *this, ME, ArrayBaseFD, CountFD, Idx, ResType, IsSigned);
1250 if (!Count)
1251 return nullptr;
1252
1253 // array_element_size = sizeof (*ptr->array)
1254 auto *ArrayElementSize = llvm::ConstantInt::get(
1255 ResType, ArrayElementBaseSize->getQuantity(), IsSigned);
1256
1257 // casted_array_element_size = sizeof (*((cast) ptr->array));
1258 auto *CastedArrayElementSize = llvm::ConstantInt::get(
1259 ResType, CastedArrayElementBaseSize->getQuantity(), IsSigned);
1260
1261 // array_size = count * array_element_size;
1262 Value *ArraySize = Builder.CreateMul(Count, ArrayElementSize, "array_size",
1263 !IsSigned, IsSigned);
1264
1265 // Option (1) 'ptr->array'
1266 // result = array_size
1267 Value *Result = ArraySize;
1268
1269 if (Idx) { // Option (2) '&((cast) ptr->array)[idx]'
1270 // index_size = index * casted_array_element_size;
1271 Value *IndexSize = Builder.CreateMul(Index, CastedArrayElementSize,
1272 "index_size", !IsSigned, IsSigned);
1273
1274 // result = result - index_size;
1275 Result =
1276 Builder.CreateSub(Result, IndexSize, "result", !IsSigned, IsSigned);
1277 }
1278
1279 return EmitPositiveResultOrZero(*this, Result, Index, ResType, IsSigned);
1280}
1281
1282llvm::Value *CodeGenFunction::emitCountedByMemberSize(
1283 const MemberExpr *ME, const Expr *Idx, llvm::Value *EmittedE,
1284 QualType CastedArrayElementTy, unsigned Type, llvm::IntegerType *ResType) {
1285 const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
1286 if (!FD)
1287 return nullptr;
1288
1289 // Find the flexible array member and check that it has the __counted_by
1290 // attribute.
1291 ASTContext &Ctx = getContext();
1292 const RecordDecl *RD = FD->getDeclContext()->getOuterLexicalRecordContext();
1293 const FieldDecl *FlexibleArrayMemberFD = nullptr;
1294
1296 Ctx, FD, FD->getType(), getLangOpts().getStrictFlexArraysLevel(),
1297 /*IgnoreTemplateOrMacroSubstitution=*/true))
1298 FlexibleArrayMemberFD = FD;
1299 else
1300 FlexibleArrayMemberFD = FindFlexibleArrayMemberField(*this, Ctx, RD);
1301
1302 if (!FlexibleArrayMemberFD ||
1303 !FlexibleArrayMemberFD->getType()->isCountAttributedType())
1304 return nullptr;
1305
1306 // Get the 'count' FieldDecl.
1307 const FieldDecl *CountFD = FlexibleArrayMemberFD->findCountedByField();
1308 if (!CountFD)
1309 // Can't find the field referenced by the "counted_by" attribute.
1310 return nullptr;
1311
1312 // Calculate the flexible array member's object size using these formulae.
1313 // (Note: if the calculation is negative, we return 0.):
1314 //
1315 // struct p;
1316 // struct s {
1317 // /* ... */
1318 // int count;
1319 // struct p *array[] __attribute__((counted_by(count)));
1320 // };
1321 //
1322 // 1) 'ptr->array':
1323 //
1324 // count = ptr->count;
1325 //
1326 // flexible_array_member_element_size = sizeof (*ptr->array);
1327 // flexible_array_member_size =
1328 // count * flexible_array_member_element_size;
1329 //
1330 // result = flexible_array_member_size;
1331 //
1332 // cmp = (result >= 0)
1333 // return cmp ? result : 0;
1334 //
1335 // 2) '&((cast) ptr->array)[idx]':
1336 //
1337 // count = ptr->count;
1338 // index = idx;
1339 //
1340 // flexible_array_member_element_size = sizeof (*ptr->array);
1341 // flexible_array_member_size =
1342 // count * flexible_array_member_element_size;
1343 //
1344 // casted_flexible_array_member_element_size =
1345 // sizeof (*((cast) ptr->array));
1346 // index_size = index * casted_flexible_array_member_element_size;
1347 //
1348 // result = flexible_array_member_size - index_size;
1349 //
1350 // cmp = (result >= 0)
1351 // if (index != 0)
1352 // cmp = (cmp && index >= 0)
1353 // return cmp ? result : 0;
1354 //
1355 // 3) '&ptr->field':
1356 //
1357 // count = ptr->count;
1358 // sizeof_struct = sizeof (struct s);
1359 //
1360 // flexible_array_member_element_size = sizeof (*ptr->array);
1361 // flexible_array_member_size =
1362 // count * flexible_array_member_element_size;
1363 //
1364 // field_offset = offsetof (struct s, field);
1365 // offset_diff = sizeof_struct - field_offset;
1366 //
1367 // result = offset_diff + flexible_array_member_size;
1368 //
1369 // cmp = (result >= 0)
1370 // return cmp ? result : 0;
1371 //
1372 // 4) '&((cast) ptr->field_array)[idx]':
1373 //
1374 // count = ptr->count;
1375 // index = idx;
1376 // sizeof_struct = sizeof (struct s);
1377 //
1378 // flexible_array_member_element_size = sizeof (*ptr->array);
1379 // flexible_array_member_size =
1380 // count * flexible_array_member_element_size;
1381 //
1382 // casted_field_element_size = sizeof (*((cast) ptr->field_array));
1383 // field_offset = offsetof (struct s, field)
1384 // field_offset += index * casted_field_element_size;
1385 //
1386 // offset_diff = sizeof_struct - field_offset;
1387 //
1388 // result = offset_diff + flexible_array_member_size;
1389 //
1390 // cmp = (result >= 0)
1391 // if (index != 0)
1392 // cmp = (cmp && index >= 0)
1393 // return cmp ? result : 0;
1394
1395 bool IsSigned = CountFD->getType()->isSignedIntegerType();
1396
1397 QualType FlexibleArrayMemberTy = FlexibleArrayMemberFD->getType();
1398
1399 // Explicit cast because otherwise the CharWidth will promote an i32's into
1400 // u64's leading to overflows.
1401 int64_t CharWidth = static_cast<int64_t>(CGM.getContext().getCharWidth());
1402
1403 // field_offset = offsetof (struct s, field);
1404 Value *FieldOffset = nullptr;
1405 if (FlexibleArrayMemberFD != FD) {
1406 std::optional<int64_t> Offset = GetFieldOffset(Ctx, RD, FD);
1407 if (!Offset)
1408 return nullptr;
1409 FieldOffset =
1410 llvm::ConstantInt::get(ResType, *Offset / CharWidth, IsSigned);
1411 }
1412
1413 // count = ptr->count;
1414 // index = ptr->index;
1415 Value *Count, *Index;
1416 std::tie(Count, Index) = GetCountFieldAndIndex(
1417 *this, ME, FlexibleArrayMemberFD, CountFD, Idx, ResType, IsSigned);
1418 if (!Count)
1419 return nullptr;
1420
1421 // flexible_array_member_element_size = sizeof (*ptr->array);
1422 const ArrayType *ArrayTy = Ctx.getAsArrayType(FlexibleArrayMemberTy);
1423 CharUnits BaseSize = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
1424 auto *FlexibleArrayMemberElementSize =
1425 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1426
1427 // flexible_array_member_size = count * flexible_array_member_element_size;
1428 Value *FlexibleArrayMemberSize =
1429 Builder.CreateMul(Count, FlexibleArrayMemberElementSize,
1430 "flexible_array_member_size", !IsSigned, IsSigned);
1431
1432 Value *Result = nullptr;
1433 if (FlexibleArrayMemberFD == FD) {
1434 if (Idx) { // Option (2) '&((cast) ptr->array)[idx]'
1435 // casted_flexible_array_member_element_size =
1436 // sizeof (*((cast) ptr->array));
1437 llvm::ConstantInt *CastedFlexibleArrayMemberElementSize =
1438 FlexibleArrayMemberElementSize;
1439 if (!CastedArrayElementTy.isNull() &&
1440 CastedArrayElementTy->isPointerType()) {
1441 CharUnits BaseSize =
1442 Ctx.getTypeSizeInChars(CastedArrayElementTy->getPointeeType());
1443 CastedFlexibleArrayMemberElementSize =
1444 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1445 }
1446
1447 // index_size = index * casted_flexible_array_member_element_size;
1448 Value *IndexSize =
1449 Builder.CreateMul(Index, CastedFlexibleArrayMemberElementSize,
1450 "index_size", !IsSigned, IsSigned);
1451
1452 // result = flexible_array_member_size - index_size;
1453 Result = Builder.CreateSub(FlexibleArrayMemberSize, IndexSize, "result",
1454 !IsSigned, IsSigned);
1455 } else { // Option (1) 'ptr->array'
1456 // result = flexible_array_member_size;
1457 Result = FlexibleArrayMemberSize;
1458 }
1459 } else {
1460 // sizeof_struct = sizeof (struct s);
1461 llvm::StructType *StructTy = getTypes().getCGRecordLayout(RD).getLLVMType();
1462 const llvm::DataLayout &Layout = CGM.getDataLayout();
1463 TypeSize Size = Layout.getTypeSizeInBits(StructTy);
1464 Value *SizeofStruct =
1465 llvm::ConstantInt::get(ResType, Size.getKnownMinValue() / CharWidth);
1466
1467 if (Idx) { // Option (4) '&((cast) ptr->field_array)[idx]'
1468 // casted_field_element_size = sizeof (*((cast) ptr->field_array));
1469 CharUnits BaseSize;
1470 if (!CastedArrayElementTy.isNull() &&
1471 CastedArrayElementTy->isPointerType()) {
1472 BaseSize =
1473 Ctx.getTypeSizeInChars(CastedArrayElementTy->getPointeeType());
1474 } else {
1475 const ArrayType *ArrayTy = Ctx.getAsArrayType(FD->getType());
1476 BaseSize = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
1477 }
1478
1479 llvm::ConstantInt *CastedFieldElementSize =
1480 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1481
1482 // field_offset += index * casted_field_element_size;
1483 Value *Mul = Builder.CreateMul(Index, CastedFieldElementSize,
1484 "field_offset", !IsSigned, IsSigned);
1485 FieldOffset = Builder.CreateAdd(FieldOffset, Mul);
1486 }
1487 // Option (3) '&ptr->field', and Option (4) continuation.
1488 // offset_diff = flexible_array_member_offset - field_offset;
1489 Value *OffsetDiff = Builder.CreateSub(SizeofStruct, FieldOffset,
1490 "offset_diff", !IsSigned, IsSigned);
1491
1492 // result = offset_diff + flexible_array_member_size;
1493 Result = Builder.CreateAdd(FlexibleArrayMemberSize, OffsetDiff, "result");
1494 }
1495
1496 return EmitPositiveResultOrZero(*this, Result, Index, ResType, IsSigned);
1497}
1498
1499/// Returns a Value corresponding to the size of the given expression.
1500/// This Value may be either of the following:
1501/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
1502/// it)
1503/// - A call to the @llvm.objectsize intrinsic
1504///
1505/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
1506/// and we wouldn't otherwise try to reference a pass_object_size parameter,
1507/// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
1508llvm::Value *
1509CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
1510 llvm::IntegerType *ResType,
1511 llvm::Value *EmittedE, bool IsDynamic) {
1512 // We need to reference an argument if the pointer is a parameter with the
1513 // pass_object_size attribute.
1514 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
1515 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
1516 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
1517 if (Param != nullptr && PS != nullptr &&
1518 areBOSTypesCompatible(PS->getType(), Type)) {
1519 auto Iter = SizeArguments.find(Param);
1520 assert(Iter != SizeArguments.end());
1521
1522 const ImplicitParamDecl *D = Iter->second;
1523 auto DIter = LocalDeclMap.find(D);
1524 assert(DIter != LocalDeclMap.end());
1525
1526 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
1527 getContext().getSizeType(), E->getBeginLoc());
1528 }
1529 }
1530
1531 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
1532 // evaluate E for side-effects. In either case, we shouldn't lower to
1533 // @llvm.objectsize.
1534 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
1535 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1536
1537 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
1538 assert(Ptr->getType()->isPointerTy() &&
1539 "Non-pointer passed to __builtin_object_size?");
1540
1541 if (IsDynamic)
1542 // Emit special code for a flexible array member with the "counted_by"
1543 // attribute.
1544 if (Value *V = emitCountedBySize(E, Ptr, Type, ResType))
1545 return V;
1546
1547 Function *F =
1548 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
1549
1550 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
1551 Value *Min = Builder.getInt1((Type & 2) != 0);
1552 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
1553 Value *NullIsUnknown = Builder.getTrue();
1554 Value *Dynamic = Builder.getInt1(IsDynamic);
1555 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
1556}
1557
1558namespace {
1559/// A struct to generically describe a bit test intrinsic.
1560struct BitTest {
1561 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
1562 enum InterlockingKind : uint8_t {
1563 Unlocked,
1564 Sequential,
1565 Acquire,
1566 Release,
1567 NoFence
1568 };
1569
1570 ActionKind Action;
1571 InterlockingKind Interlocking;
1572 bool Is64Bit;
1573
1574 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
1575};
1576
1577} // namespace
1578
1579BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
1580 switch (BuiltinID) {
1581 // Main portable variants.
1582 case Builtin::BI_bittest:
1583 return {TestOnly, Unlocked, false};
1584 case Builtin::BI_bittestandcomplement:
1585 return {Complement, Unlocked, false};
1586 case Builtin::BI_bittestandreset:
1587 return {Reset, Unlocked, false};
1588 case Builtin::BI_bittestandset:
1589 return {Set, Unlocked, false};
1590 case Builtin::BI_interlockedbittestandreset:
1591 return {Reset, Sequential, false};
1592 case Builtin::BI_interlockedbittestandset:
1593 return {Set, Sequential, false};
1594
1595 // 64-bit variants.
1596 case Builtin::BI_bittest64:
1597 return {TestOnly, Unlocked, true};
1598 case Builtin::BI_bittestandcomplement64:
1599 return {Complement, Unlocked, true};
1600 case Builtin::BI_bittestandreset64:
1601 return {Reset, Unlocked, true};
1602 case Builtin::BI_bittestandset64:
1603 return {Set, Unlocked, true};
1604 case Builtin::BI_interlockedbittestandreset64:
1605 return {Reset, Sequential, true};
1606 case Builtin::BI_interlockedbittestandset64:
1607 return {Set, Sequential, true};
1608
1609 // ARM/AArch64-specific ordering variants.
1610 case Builtin::BI_interlockedbittestandset_acq:
1611 return {Set, Acquire, false};
1612 case Builtin::BI_interlockedbittestandset_rel:
1613 return {Set, Release, false};
1614 case Builtin::BI_interlockedbittestandset_nf:
1615 return {Set, NoFence, false};
1616 case Builtin::BI_interlockedbittestandreset_acq:
1617 return {Reset, Acquire, false};
1618 case Builtin::BI_interlockedbittestandreset_rel:
1619 return {Reset, Release, false};
1620 case Builtin::BI_interlockedbittestandreset_nf:
1621 return {Reset, NoFence, false};
1622 case Builtin::BI_interlockedbittestandreset64_acq:
1623 return {Reset, Acquire, false};
1624 case Builtin::BI_interlockedbittestandreset64_rel:
1625 return {Reset, Release, false};
1626 case Builtin::BI_interlockedbittestandreset64_nf:
1627 return {Reset, NoFence, false};
1628 case Builtin::BI_interlockedbittestandset64_acq:
1629 return {Set, Acquire, false};
1630 case Builtin::BI_interlockedbittestandset64_rel:
1631 return {Set, Release, false};
1632 case Builtin::BI_interlockedbittestandset64_nf:
1633 return {Set, NoFence, false};
1634 }
1635 llvm_unreachable("expected only bittest intrinsics");
1636}
1637
1638static char bitActionToX86BTCode(BitTest::ActionKind A) {
1639 switch (A) {
1640 case BitTest::TestOnly: return '\0';
1641 case BitTest::Complement: return 'c';
1642 case BitTest::Reset: return 'r';
1643 case BitTest::Set: return 's';
1644 }
1645 llvm_unreachable("invalid action");
1646}
1647
1649 BitTest BT,
1650 const CallExpr *E, Value *BitBase,
1651 Value *BitPos) {
1652 char Action = bitActionToX86BTCode(BT.Action);
1653 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
1654
1655 // Build the assembly.
1657 raw_svector_ostream AsmOS(Asm);
1658 if (BT.Interlocking != BitTest::Unlocked)
1659 AsmOS << "lock ";
1660 AsmOS << "bt";
1661 if (Action)
1662 AsmOS << Action;
1663 AsmOS << SizeSuffix << " $2, ($1)";
1664
1665 // Build the constraints. FIXME: We should support immediates when possible.
1666 std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
1667 std::string_view MachineClobbers = CGF.getTarget().getClobbers();
1668 if (!MachineClobbers.empty()) {
1669 Constraints += ',';
1670 Constraints += MachineClobbers;
1671 }
1672 llvm::IntegerType *IntType = llvm::IntegerType::get(
1673 CGF.getLLVMContext(),
1674 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
1675 llvm::FunctionType *FTy =
1676 llvm::FunctionType::get(CGF.Int8Ty, {CGF.DefaultPtrTy, IntType}, false);
1677
1678 llvm::InlineAsm *IA =
1679 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1680 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
1681}
1682
1683static llvm::AtomicOrdering
1684getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
1685 switch (I) {
1686 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
1687 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
1688 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
1689 case BitTest::Release: return llvm::AtomicOrdering::Release;
1690 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
1691 }
1692 llvm_unreachable("invalid interlocking");
1693}
1694
1695static llvm::Value *EmitBitCountExpr(CodeGenFunction &CGF, const Expr *E) {
1696 llvm::Value *ArgValue = CGF.EmitScalarExpr(E);
1697 llvm::Type *ArgType = ArgValue->getType();
1698
1699 // Boolean vectors can be casted directly to its bitfield representation. We
1700 // intentionally do not round up to the next power of two size and let LLVM
1701 // handle the trailing bits.
1702 if (auto *VT = dyn_cast<llvm::FixedVectorType>(ArgType);
1703 VT && VT->getElementType()->isIntegerTy(1)) {
1704 llvm::Type *StorageType =
1705 llvm::Type::getIntNTy(CGF.getLLVMContext(), VT->getNumElements());
1706 ArgValue = CGF.Builder.CreateBitCast(ArgValue, StorageType);
1707 }
1708
1709 return ArgValue;
1710}
1711
1712/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
1713/// bits and a bit position and read and optionally modify the bit at that
1714/// position. The position index can be arbitrarily large, i.e. it can be larger
1715/// than 31 or 63, so we need an indexed load in the general case.
1716static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
1717 unsigned BuiltinID,
1718 const CallExpr *E) {
1719 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
1720 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
1721
1722 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
1723
1724 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
1725 // indexing operation internally. Use them if possible.
1726 if (CGF.getTarget().getTriple().isX86())
1727 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
1728
1729 // Otherwise, use generic code to load one byte and test the bit. Use all but
1730 // the bottom three bits as the array index, and the bottom three bits to form
1731 // a mask.
1732 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
1733 Value *ByteIndex = CGF.Builder.CreateAShr(
1734 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
1735 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBase, ByteIndex,
1736 "bittest.byteaddr"),
1737 CGF.Int8Ty, CharUnits::One());
1738 Value *PosLow =
1739 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
1740 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
1741
1742 // The updating instructions will need a mask.
1743 Value *Mask = nullptr;
1744 if (BT.Action != BitTest::TestOnly) {
1745 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
1746 "bittest.mask");
1747 }
1748
1749 // Check the action and ordering of the interlocked intrinsics.
1750 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
1751
1752 Value *OldByte = nullptr;
1753 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
1754 // Emit a combined atomicrmw load/store operation for the interlocked
1755 // intrinsics.
1756 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
1757 if (BT.Action == BitTest::Reset) {
1758 Mask = CGF.Builder.CreateNot(Mask);
1759 RMWOp = llvm::AtomicRMWInst::And;
1760 }
1761 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr, Mask, Ordering);
1762 } else {
1763 // Emit a plain load for the non-interlocked intrinsics.
1764 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
1765 Value *NewByte = nullptr;
1766 switch (BT.Action) {
1767 case BitTest::TestOnly:
1768 // Don't store anything.
1769 break;
1770 case BitTest::Complement:
1771 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
1772 break;
1773 case BitTest::Reset:
1774 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
1775 break;
1776 case BitTest::Set:
1777 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
1778 break;
1779 }
1780 if (NewByte)
1781 CGF.Builder.CreateStore(NewByte, ByteAddr);
1782 }
1783
1784 // However we loaded the old byte, either by plain load or atomicrmw, shift
1785 // the bit into the low position and mask it to 0 or 1.
1786 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
1787 return CGF.Builder.CreateAnd(
1788 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
1789}
1790
1791namespace {
1792enum class MSVCSetJmpKind {
1793 _setjmpex,
1794 _setjmp3,
1795 _setjmp
1796};
1797}
1798
1799/// MSVC handles setjmp a bit differently on different platforms. On every
1800/// architecture except 32-bit x86, the frame address is passed. On x86, extra
1801/// parameters can be passed as variadic arguments, but we always pass none.
1802static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
1803 const CallExpr *E) {
1804 llvm::Value *Arg1 = nullptr;
1805 llvm::Type *Arg1Ty = nullptr;
1806 StringRef Name;
1807 bool IsVarArg = false;
1808 if (SJKind == MSVCSetJmpKind::_setjmp3) {
1809 Name = "_setjmp3";
1810 Arg1Ty = CGF.Int32Ty;
1811 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
1812 IsVarArg = true;
1813 } else {
1814 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
1815 Arg1Ty = CGF.Int8PtrTy;
1816 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
1817 Arg1 = CGF.Builder.CreateCall(
1818 CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
1819 } else
1820 Arg1 = CGF.Builder.CreateCall(
1821 CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
1822 llvm::ConstantInt::get(CGF.Int32Ty, 0));
1823 }
1824
1825 // Mark the call site and declaration with ReturnsTwice.
1826 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
1827 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
1828 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
1829 llvm::Attribute::ReturnsTwice);
1830 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
1831 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
1832 ReturnsTwiceAttr, /*Local=*/true);
1833
1834 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
1835 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
1836 llvm::Value *Args[] = {Buf, Arg1};
1837 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
1838 CB->setAttributes(ReturnsTwiceAttr);
1839 return RValue::get(CB);
1840}
1841
1842// Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated.
1844 const CallExpr *E) {
1845 switch (BuiltinID) {
1848 Address IndexAddress(EmitPointerWithAlignment(E->getArg(0)));
1849 Value *ArgValue = EmitScalarExpr(E->getArg(1));
1850
1851 llvm::Type *ArgType = ArgValue->getType();
1852 llvm::Type *IndexType = IndexAddress.getElementType();
1853 llvm::Type *ResultType = ConvertType(E->getType());
1854
1855 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
1856 Value *ResZero = llvm::Constant::getNullValue(ResultType);
1857 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
1858
1859 BasicBlock *Begin = Builder.GetInsertBlock();
1860 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
1861 Builder.SetInsertPoint(End);
1862 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
1863
1864 Builder.SetInsertPoint(Begin);
1865 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
1866 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
1867 Builder.CreateCondBr(IsZero, End, NotZero);
1868 Result->addIncoming(ResZero, Begin);
1869
1870 Builder.SetInsertPoint(NotZero);
1871
1872 if (BuiltinID == MSVCIntrin::_BitScanForward) {
1873 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1874 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1875 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1876 Builder.CreateStore(ZeroCount, IndexAddress, false);
1877 } else {
1878 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
1879 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
1880
1881 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1882 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1883 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1884 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
1885 Builder.CreateStore(Index, IndexAddress, false);
1886 }
1887 Builder.CreateBr(End);
1888 Result->addIncoming(ResOne, NotZero);
1889
1890 Builder.SetInsertPoint(End);
1891 return Result;
1892 }
1894 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
1896 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
1898 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
1900 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
1902 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
1904 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
1906 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1907 AtomicOrdering::Acquire);
1909 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1910 AtomicOrdering::Release);
1912 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1913 AtomicOrdering::Monotonic);
1915 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1916 AtomicOrdering::Acquire);
1918 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1919 AtomicOrdering::Release);
1921 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1922 AtomicOrdering::Monotonic);
1924 return EmitAtomicCmpXchgForMSIntrin(*this, E);
1926 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
1928 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
1930 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1933 *this, E, AtomicOrdering::SequentiallyConsistent);
1935 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire);
1937 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release);
1939 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1941 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1942 AtomicOrdering::Acquire);
1944 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1945 AtomicOrdering::Release);
1947 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1948 AtomicOrdering::Monotonic);
1950 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1951 AtomicOrdering::Acquire);
1953 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1954 AtomicOrdering::Release);
1956 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1957 AtomicOrdering::Monotonic);
1959 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1960 AtomicOrdering::Acquire);
1962 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1963 AtomicOrdering::Release);
1965 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1966 AtomicOrdering::Monotonic);
1968 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1970 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1972 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1974 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1976 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1978 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1979
1981 return EmitAtomicDecrementValue(*this, E);
1983 return EmitAtomicIncrementValue(*this, E);
1984
1986 // Request immediate process termination from the kernel. The instruction
1987 // sequences to do this are documented on MSDN:
1988 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1989 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1990 StringRef Asm, Constraints;
1991 switch (ISA) {
1992 default:
1993 ErrorUnsupported(E, "__fastfail call for this architecture");
1994 break;
1995 case llvm::Triple::x86:
1996 case llvm::Triple::x86_64:
1997 Asm = "int $$0x29";
1998 Constraints = "{cx}";
1999 break;
2000 case llvm::Triple::thumb:
2001 Asm = "udf #251";
2002 Constraints = "{r0}";
2003 break;
2004 case llvm::Triple::aarch64:
2005 Asm = "brk #0xF003";
2006 Constraints = "{w0}";
2007 }
2008 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
2009 llvm::InlineAsm *IA =
2010 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
2011 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
2012 getLLVMContext(), llvm::AttributeList::FunctionIndex,
2013 llvm::Attribute::NoReturn);
2014 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
2015 CI->setAttributes(NoReturnAttr);
2016 return CI;
2017 }
2018 }
2019 llvm_unreachable("Incorrect MSVC intrinsic!");
2020}
2021
2022namespace {
2023// ARC cleanup for __builtin_os_log_format
2024struct CallObjCArcUse final : EHScopeStack::Cleanup {
2025 CallObjCArcUse(llvm::Value *object) : object(object) {}
2026 llvm::Value *object;
2027
2028 void Emit(CodeGenFunction &CGF, Flags flags) override {
2029 CGF.EmitARCIntrinsicUse(object);
2030 }
2031};
2032}
2033
2035 BuiltinCheckKind Kind) {
2036 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) &&
2037 "Unsupported builtin check kind");
2038
2039 Value *ArgValue = EmitBitCountExpr(*this, E);
2040 if (!SanOpts.has(SanitizerKind::Builtin))
2041 return ArgValue;
2042
2043 auto CheckOrdinal = SanitizerKind::SO_Builtin;
2044 auto CheckHandler = SanitizerHandler::InvalidBuiltin;
2045 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2046 Value *Cond = Builder.CreateICmpNE(
2047 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
2048 EmitCheck(std::make_pair(Cond, CheckOrdinal), CheckHandler,
2050 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
2051 {});
2052 return ArgValue;
2053}
2054
2056 Value *ArgValue = EvaluateExprAsBool(E);
2057 if (!SanOpts.has(SanitizerKind::Builtin))
2058 return ArgValue;
2059
2060 auto CheckOrdinal = SanitizerKind::SO_Builtin;
2061 auto CheckHandler = SanitizerHandler::InvalidBuiltin;
2062 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2063 EmitCheck(
2064 std::make_pair(ArgValue, CheckOrdinal), CheckHandler,
2066 llvm::ConstantInt::get(Builder.getInt8Ty(), BCK_AssumePassedFalse)},
2067 {});
2068 return ArgValue;
2069}
2070
2071static Value *EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW) {
2072 return CGF.Builder.CreateBinaryIntrinsic(
2073 Intrinsic::abs, ArgValue,
2074 ConstantInt::get(CGF.Builder.getInt1Ty(), HasNSW));
2075}
2076
2078 bool SanitizeOverflow) {
2079 Value *ArgValue = CGF.EmitScalarExpr(E->getArg(0));
2080
2081 // Try to eliminate overflow check.
2082 if (const auto *VCI = dyn_cast<llvm::ConstantInt>(ArgValue)) {
2083 if (!VCI->isMinSignedValue())
2084 return EmitAbs(CGF, ArgValue, true);
2085 }
2086
2088 SanitizerHandler CheckHandler;
2089 if (SanitizeOverflow) {
2090 Ordinals.push_back(SanitizerKind::SO_SignedIntegerOverflow);
2091 CheckHandler = SanitizerHandler::NegateOverflow;
2092 } else
2093 CheckHandler = SanitizerHandler::SubOverflow;
2094
2095 SanitizerDebugLocation SanScope(&CGF, Ordinals, CheckHandler);
2096
2097 Constant *Zero = Constant::getNullValue(ArgValue->getType());
2098 Value *ResultAndOverflow = CGF.Builder.CreateBinaryIntrinsic(
2099 Intrinsic::ssub_with_overflow, Zero, ArgValue);
2100 Value *Result = CGF.Builder.CreateExtractValue(ResultAndOverflow, 0);
2101 Value *NotOverflow = CGF.Builder.CreateNot(
2102 CGF.Builder.CreateExtractValue(ResultAndOverflow, 1));
2103
2104 // TODO: support -ftrapv-handler.
2105 if (SanitizeOverflow) {
2106 CGF.EmitCheck({{NotOverflow, SanitizerKind::SO_SignedIntegerOverflow}},
2107 CheckHandler,
2110 {ArgValue});
2111 } else
2112 CGF.EmitTrapCheck(NotOverflow, CheckHandler);
2113
2114 Value *CmpResult = CGF.Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
2115 return CGF.Builder.CreateSelect(CmpResult, Result, ArgValue, "abs");
2116}
2117
2118/// Get the argument type for arguments to os_log_helper.
2120 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
2121 return C.getCanonicalType(UnsignedTy);
2122}
2123
2126 CharUnits BufferAlignment) {
2127 ASTContext &Ctx = getContext();
2128
2130 {
2131 raw_svector_ostream OS(Name);
2132 OS << "__os_log_helper";
2133 OS << "_" << BufferAlignment.getQuantity();
2134 OS << "_" << int(Layout.getSummaryByte());
2135 OS << "_" << int(Layout.getNumArgsByte());
2136 for (const auto &Item : Layout.Items)
2137 OS << "_" << int(Item.getSizeByte()) << "_"
2138 << int(Item.getDescriptorByte());
2139 }
2140
2141 if (llvm::Function *F = CGM.getModule().getFunction(Name))
2142 return F;
2143
2145 FunctionArgList Args;
2146 Args.push_back(ImplicitParamDecl::Create(
2147 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
2149 ArgTys.emplace_back(Ctx.VoidPtrTy);
2150
2151 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
2152 char Size = Layout.Items[I].getSizeByte();
2153 if (!Size)
2154 continue;
2155
2156 QualType ArgTy = getOSLogArgType(Ctx, Size);
2157 Args.push_back(ImplicitParamDecl::Create(
2158 Ctx, nullptr, SourceLocation(),
2159 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
2161 ArgTys.emplace_back(ArgTy);
2162 }
2163
2164 QualType ReturnTy = Ctx.VoidTy;
2165
2166 // The helper function has linkonce_odr linkage to enable the linker to merge
2167 // identical functions. To ensure the merging always happens, 'noinline' is
2168 // attached to the function when compiling with -Oz.
2169 const CGFunctionInfo &FI =
2170 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
2171 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
2172 llvm::Function *Fn = llvm::Function::Create(
2173 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
2174 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
2175 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn, /*IsThunk=*/false);
2176 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
2177 Fn->setDoesNotThrow();
2178
2179 // Attach 'noinline' at -Oz.
2180 if (CGM.getCodeGenOpts().OptimizeSize == 2)
2181 Fn->addFnAttr(llvm::Attribute::NoInline);
2182
2183 auto NL = ApplyDebugLocation::CreateEmpty(*this);
2184 StartFunction(GlobalDecl(), ReturnTy, Fn, FI, Args);
2185
2186 // Create a scope with an artificial location for the body of this function.
2187 auto AL = ApplyDebugLocation::CreateArtificial(*this);
2188
2189 CharUnits Offset;
2191 Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), Ctx.VoidTy,
2192 BufferAlignment);
2193 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
2194 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
2195 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
2196 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
2197
2198 unsigned I = 1;
2199 for (const auto &Item : Layout.Items) {
2200 Builder.CreateStore(
2201 Builder.getInt8(Item.getDescriptorByte()),
2202 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
2203 Builder.CreateStore(
2204 Builder.getInt8(Item.getSizeByte()),
2205 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
2206
2207 CharUnits Size = Item.size();
2208 if (!Size.getQuantity())
2209 continue;
2210
2211 Address Arg = GetAddrOfLocalVar(Args[I]);
2212 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
2213 Addr = Addr.withElementType(Arg.getElementType());
2214 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
2215 Offset += Size;
2216 ++I;
2217 }
2218
2220
2221 return Fn;
2222}
2223
2225 assert(E.getNumArgs() >= 2 &&
2226 "__builtin_os_log_format takes at least 2 arguments");
2227 ASTContext &Ctx = getContext();
2230 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
2231
2232 // Ignore argument 1, the format string. It is not currently used.
2233 CallArgList Args;
2234 Args.add(RValue::get(BufAddr.emitRawPointer(*this)), Ctx.VoidPtrTy);
2235
2236 for (const auto &Item : Layout.Items) {
2237 int Size = Item.getSizeByte();
2238 if (!Size)
2239 continue;
2240
2241 llvm::Value *ArgVal;
2242
2243 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
2244 uint64_t Val = 0;
2245 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
2246 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
2247 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
2248 } else if (const Expr *TheExpr = Item.getExpr()) {
2249 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
2250
2251 // If a temporary object that requires destruction after the full
2252 // expression is passed, push a lifetime-extended cleanup to extend its
2253 // lifetime to the end of the enclosing block scope.
2254 auto LifetimeExtendObject = [&](const Expr *E) {
2255 E = E->IgnoreParenCasts();
2256 // Extend lifetimes of objects returned by function calls and message
2257 // sends.
2258
2259 // FIXME: We should do this in other cases in which temporaries are
2260 // created including arguments of non-ARC types (e.g., C++
2261 // temporaries).
2263 return true;
2264 return false;
2265 };
2266
2267 if (TheExpr->getType()->isObjCRetainableType() &&
2268 getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
2269 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
2270 "Only scalar can be a ObjC retainable type");
2271 if (!isa<Constant>(ArgVal)) {
2272 CleanupKind Cleanup = getARCCleanupKind();
2273 QualType Ty = TheExpr->getType();
2275 RawAddress Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
2276 ArgVal = EmitARCRetain(Ty, ArgVal);
2277 Builder.CreateStore(ArgVal, Addr);
2278 pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
2280 Cleanup & EHCleanup);
2281
2282 // Push a clang.arc.use call to ensure ARC optimizer knows that the
2283 // argument has to be alive.
2284 if (CGM.getCodeGenOpts().OptimizationLevel != 0)
2286 }
2287 }
2288 } else {
2289 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
2290 }
2291
2292 unsigned ArgValSize =
2293 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
2294 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
2295 ArgValSize);
2296 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
2297 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
2298 // If ArgVal has type x86_fp80, zero-extend ArgVal.
2299 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
2300 Args.add(RValue::get(ArgVal), ArgTy);
2301 }
2302
2303 const CGFunctionInfo &FI =
2304 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
2305 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
2306 Layout, BufAddr.getAlignment());
2308 return RValue::get(BufAddr, *this);
2309}
2310
2312 unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info,
2313 WidthAndSignedness ResultInfo) {
2314 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2315 Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width &&
2316 !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed;
2317}
2318
2320 CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info,
2321 const clang::Expr *Op2, WidthAndSignedness Op2Info,
2322 const clang::Expr *ResultArg, QualType ResultQTy,
2323 WidthAndSignedness ResultInfo) {
2325 Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&
2326 "Cannot specialize this multiply");
2327
2328 llvm::Value *V1 = CGF.EmitScalarExpr(Op1);
2329 llvm::Value *V2 = CGF.EmitScalarExpr(Op2);
2330
2331 llvm::Value *HasOverflow;
2332 llvm::Value *Result = EmitOverflowIntrinsic(
2333 CGF, Intrinsic::umul_with_overflow, V1, V2, HasOverflow);
2334
2335 // The intrinsic call will detect overflow when the value is > UINT_MAX,
2336 // however, since the original builtin had a signed result, we need to report
2337 // an overflow when the result is greater than INT_MAX.
2338 auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width);
2339 llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax);
2340
2341 llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(Result, IntMaxValue);
2342 HasOverflow = CGF.Builder.CreateOr(HasOverflow, IntMaxOverflow);
2343
2344 bool isVolatile =
2345 ResultArg->getType()->getPointeeType().isVolatileQualified();
2346 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2347 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2348 isVolatile);
2349 return RValue::get(HasOverflow);
2350}
2351
2352/// Determine if a binop is a checked mixed-sign multiply we can specialize.
2353static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
2354 WidthAndSignedness Op1Info,
2355 WidthAndSignedness Op2Info,
2356 WidthAndSignedness ResultInfo) {
2357 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2358 std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
2359 Op1Info.Signed != Op2Info.Signed;
2360}
2361
2362/// Emit a checked mixed-sign multiply. This is a cheaper specialization of
2363/// the generic checked-binop irgen.
2364static RValue
2366 WidthAndSignedness Op1Info, const clang::Expr *Op2,
2367 WidthAndSignedness Op2Info,
2368 const clang::Expr *ResultArg, QualType ResultQTy,
2369 WidthAndSignedness ResultInfo) {
2370 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,
2371 Op2Info, ResultInfo) &&
2372 "Not a mixed-sign multipliction we can specialize");
2373
2374 // Emit the signed and unsigned operands.
2375 const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
2376 const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
2377 llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
2378 llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
2379 unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
2380 unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
2381
2382 // One of the operands may be smaller than the other. If so, [s|z]ext it.
2383 if (SignedOpWidth < UnsignedOpWidth)
2384 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
2385 if (UnsignedOpWidth < SignedOpWidth)
2386 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
2387
2388 llvm::Type *OpTy = Signed->getType();
2389 llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
2390 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2391 llvm::Type *ResTy = CGF.getTypes().ConvertType(ResultQTy);
2392 unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
2393
2394 // Take the absolute value of the signed operand.
2395 llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
2396 llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
2397 llvm::Value *AbsSigned =
2398 CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
2399
2400 // Perform a checked unsigned multiplication.
2401 llvm::Value *UnsignedOverflow;
2402 llvm::Value *UnsignedResult =
2403 EmitOverflowIntrinsic(CGF, Intrinsic::umul_with_overflow, AbsSigned,
2404 Unsigned, UnsignedOverflow);
2405
2406 llvm::Value *Overflow, *Result;
2407 if (ResultInfo.Signed) {
2408 // Signed overflow occurs if the result is greater than INT_MAX or lesser
2409 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
2410 auto IntMax =
2411 llvm::APInt::getSignedMaxValue(ResultInfo.Width).zext(OpWidth);
2412 llvm::Value *MaxResult =
2413 CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
2414 CGF.Builder.CreateZExt(IsNegative, OpTy));
2415 llvm::Value *SignedOverflow =
2416 CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
2417 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
2418
2419 // Prepare the signed result (possibly by negating it).
2420 llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
2421 llvm::Value *SignedResult =
2422 CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
2423 Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
2424 } else {
2425 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
2426 llvm::Value *Underflow = CGF.Builder.CreateAnd(
2427 IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
2428 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
2429 if (ResultInfo.Width < OpWidth) {
2430 auto IntMax =
2431 llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
2432 llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
2433 UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
2434 Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
2435 }
2436
2437 // Negate the product if it would be negative in infinite precision.
2438 Result = CGF.Builder.CreateSelect(
2439 IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
2440
2441 Result = CGF.Builder.CreateTrunc(Result, ResTy);
2442 }
2443 assert(Overflow && Result && "Missing overflow or result");
2444
2445 bool isVolatile =
2446 ResultArg->getType()->getPointeeType().isVolatileQualified();
2447 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2448 isVolatile);
2449 return RValue::get(Overflow);
2450}
2451
2452static bool
2454 llvm::SmallPtrSetImpl<const Decl *> &Seen) {
2455 if (const auto *Arr = Ctx.getAsArrayType(Ty))
2456 Ty = Ctx.getBaseElementType(Arr);
2457
2458 const auto *Record = Ty->getAsCXXRecordDecl();
2459 if (!Record)
2460 return false;
2461
2462 // We've already checked this type, or are in the process of checking it.
2463 if (!Seen.insert(Record).second)
2464 return false;
2465
2466 assert(Record->hasDefinition() &&
2467 "Incomplete types should already be diagnosed");
2468
2469 if (Record->isDynamicClass())
2470 return true;
2471
2472 for (FieldDecl *F : Record->fields()) {
2473 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
2474 return true;
2475 }
2476 return false;
2477}
2478
2479/// Determine if the specified type requires laundering by checking if it is a
2480/// dynamic class type or contains a subobject which is a dynamic class type.
2482 if (!CGM.getCodeGenOpts().StrictVTablePointers)
2483 return false;
2485 return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
2486}
2487
2488RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
2489 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
2490 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
2491
2492 // The builtin's shift arg may have a different type than the source arg and
2493 // result, but the LLVM intrinsic uses the same type for all values.
2494 llvm::Type *Ty = Src->getType();
2495 llvm::Type *ShiftTy = ShiftAmt->getType();
2496
2497 unsigned BitWidth = Ty->getIntegerBitWidth();
2498
2499 // Normalize shift amount to [0, BitWidth) range to match runtime behavior.
2500 // This matches the algorithm in ExprConstant.cpp for constant evaluation.
2501 if (BitWidth == 1) {
2502 // Rotating a 1-bit value is always a no-op
2503 ShiftAmt = ConstantInt::get(ShiftTy, 0);
2504 } else if (BitWidth == 2) {
2505 // For 2-bit values: rotation amount is 0 or 1 based on
2506 // whether the amount is even or odd. We can't use srem here because
2507 // the divisor (2) would be misinterpreted as -2 in 2-bit signed arithmetic.
2508 llvm::Value *One = ConstantInt::get(ShiftTy, 1);
2509 ShiftAmt = Builder.CreateAnd(ShiftAmt, One);
2510 } else {
2511 unsigned ShiftAmtBitWidth = ShiftTy->getIntegerBitWidth();
2512 bool ShiftAmtIsSigned = E->getArg(1)->getType()->isSignedIntegerType();
2513
2514 // Choose the wider type for the divisor to avoid truncation
2515 llvm::Type *DivisorTy = ShiftAmtBitWidth > BitWidth ? ShiftTy : Ty;
2516 llvm::Value *Divisor = ConstantInt::get(DivisorTy, BitWidth);
2517
2518 // Extend ShiftAmt to match Divisor width if needed
2519 if (ShiftAmtBitWidth < DivisorTy->getIntegerBitWidth()) {
2520 ShiftAmt = Builder.CreateIntCast(ShiftAmt, DivisorTy, ShiftAmtIsSigned);
2521 }
2522
2523 // Normalize to [0, BitWidth)
2524 llvm::Value *RemResult;
2525 if (ShiftAmtIsSigned) {
2526 RemResult = Builder.CreateSRem(ShiftAmt, Divisor);
2527 // Signed remainder can be negative, convert to positive equivalent
2528 llvm::Value *Zero = ConstantInt::get(DivisorTy, 0);
2529 llvm::Value *IsNegative = Builder.CreateICmpSLT(RemResult, Zero);
2530 llvm::Value *PositiveShift = Builder.CreateAdd(RemResult, Divisor);
2531 ShiftAmt = Builder.CreateSelect(IsNegative, PositiveShift, RemResult);
2532 } else {
2533 ShiftAmt = Builder.CreateURem(ShiftAmt, Divisor);
2534 }
2535 }
2536
2537 // Convert to the source type if needed
2538 if (ShiftAmt->getType() != Ty) {
2539 ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
2540 }
2541
2542 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
2543 unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
2544 Function *F = CGM.getIntrinsic(IID, Ty);
2545 return RValue::get(Builder.CreateCall(F, {Src, Src, ShiftAmt}));
2546}
2547
2548// Map math builtins for long-double to f128 version.
2549static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {
2550 switch (BuiltinID) {
2551#define MUTATE_LDBL(func) \
2552 case Builtin::BI__builtin_##func##l: \
2553 return Builtin::BI__builtin_##func##f128;
2584 MUTATE_LDBL(nans)
2585 MUTATE_LDBL(inf)
2604 MUTATE_LDBL(huge_val)
2614#undef MUTATE_LDBL
2615 default:
2616 return BuiltinID;
2617 }
2618}
2619
2620static Value *tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID,
2621 Value *V) {
2622 if (CGF.Builder.getIsFPConstrained() &&
2623 CGF.Builder.getDefaultConstrainedExcept() != fp::ebIgnore) {
2624 if (Value *Result =
2625 CGF.getTargetHooks().testFPKind(V, BuiltinID, CGF.Builder, CGF.CGM))
2626 return Result;
2627 }
2628 return nullptr;
2629}
2630
2632 const FunctionDecl *FD) {
2633 auto Name = FD->getNameAsString() + "__hipstdpar_unsupported";
2634 auto FnTy = CGF->CGM.getTypes().GetFunctionType(FD);
2635 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2636
2638 for (auto &&FormalTy : FnTy->params())
2639 Args.push_back(llvm::PoisonValue::get(FormalTy));
2640
2641 return RValue::get(CGF->Builder.CreateCall(UBF, Args));
2642}
2643
2644// stdc_{leading,trailing}_{zeros,ones} and stdc_count_ones: counts bits using
2645// ctlz, cttz, or ctpop (IsPop). InvertArg flips the input to count the
2646// opposite bit value.
2648 Intrinsic::ID IntID,
2649 bool InvertArg, bool IsPop) {
2650 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2651 llvm::Type *ArgType = ArgValue->getType();
2652 llvm::Type *ResultType = ConvertType(E->getType());
2653 Value *ActualArg = InvertArg ? Builder.CreateNot(ArgValue) : ArgValue;
2654 Function *F = CGM.getIntrinsic(IntID, ArgType);
2655 Value *Result = IsPop
2656 ? Builder.CreateCall(F, ActualArg)
2657 : Builder.CreateCall(F, {ActualArg, Builder.getFalse()});
2658 if (Result->getType() != ResultType)
2659 Result = Builder.CreateIntCast(Result, ResultType, false);
2660 return RValue::get(Result);
2661}
2662
2663// stdc_count_zeros (BitWidth - ctpop) and stdc_bit_width (BitWidth - ctlz).
2664// IsPop selects ctpop; otherwise ctlz is used.
2666 Intrinsic::ID IntID, bool IsPop) {
2667 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2668 llvm::Type *ArgType = ArgValue->getType();
2669 llvm::Type *ResultType = ConvertType(E->getType());
2670 unsigned BitWidth = ArgType->getIntegerBitWidth();
2671 Function *F = CGM.getIntrinsic(IntID, ArgType);
2672 Value *Cnt = IsPop ? Builder.CreateCall(F, ArgValue)
2673 : Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
2674 Value *Result = Builder.CreateSub(ConstantInt::get(ArgType, BitWidth), Cnt);
2675 if (Result->getType() != ResultType)
2676 Result = Builder.CreateIntCast(Result, ResultType, false);
2677 return RValue::get(Result);
2678}
2679
2680// stdc_first_{leading,trailing}_{zero,one}: returns the 1-based position of
2681// the first matching bit, or 0 if no such bit exists. InvertArg flips the
2682// input to search for zeros instead of ones.
2684 bool InvertArg) {
2685 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2686 llvm::Type *ArgType = ArgValue->getType();
2687 llvm::Type *ResultType = ConvertType(E->getType());
2688 Value *Zero = ConstantInt::get(ArgType, 0);
2689 Value *One = ConstantInt::get(ArgType, 1);
2690 Value *ActualArg = InvertArg ? Builder.CreateNot(ArgValue) : ArgValue;
2691 Function *F = CGM.getIntrinsic(IntID, ArgType);
2692 Value *Cnt = Builder.CreateCall(F, {ActualArg, Builder.getFalse()});
2693 Value *Tmp = Builder.CreateAdd(Cnt, One);
2694 Value *IsZero = Builder.CreateICmpEQ(ActualArg, Zero);
2695 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp);
2696 if (Result->getType() != ResultType)
2697 Result = Builder.CreateIntCast(Result, ResultType, false);
2698 return RValue::get(Result);
2699}
2700
2702 const CallExpr *E,
2704 assert(!getContext().BuiltinInfo.isImmediate(BuiltinID) &&
2705 "Should not codegen for consteval builtins");
2706
2707 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
2708 // See if we can constant fold this builtin. If so, don't emit it at all.
2709 // TODO: Extend this handling to all builtin calls that we can constant-fold.
2711 if (E->isPRValue() && E->EvaluateAsRValue(Result, CGM.getContext()) &&
2712 !Result.hasSideEffects()) {
2713 if (Result.Val.isInt())
2714 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
2715 Result.Val.getInt()));
2716 if (Result.Val.isFloat())
2717 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
2718 Result.Val.getFloat()));
2719 }
2720
2721 // If current long-double semantics is IEEE 128-bit, replace math builtins
2722 // of long-double with f128 equivalent.
2723 // TODO: This mutation should also be applied to other targets other than PPC,
2724 // after backend supports IEEE 128-bit style libcalls.
2725 if (getTarget().getTriple().isPPC64() &&
2726 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
2727 BuiltinID = mutateLongDoubleBuiltin(BuiltinID);
2728
2729 // If the builtin has been declared explicitly with an assembler label,
2730 // disable the specialized emitting below. Ideally we should communicate the
2731 // rename in IR, or at least avoid generating the intrinsic calls that are
2732 // likely to get lowered to the renamed library functions.
2733 const unsigned BuiltinIDIfNoAsmLabel =
2734 FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID;
2735
2736 std::optional<bool> ErrnoOverriden;
2737 // ErrnoOverriden is true if math-errno is overriden via the
2738 // '#pragma float_control(precise, on)'. This pragma disables fast-math,
2739 // which implies math-errno.
2740 if (E->hasStoredFPFeatures()) {
2742 if (OP.hasMathErrnoOverride())
2743 ErrnoOverriden = OP.getMathErrnoOverride();
2744 }
2745 // True if 'attribute__((optnone))' is used. This attribute overrides
2746 // fast-math which implies math-errno.
2747 bool OptNone = CurFuncDecl && CurFuncDecl->hasAttr<OptimizeNoneAttr>();
2748
2749 bool IsOptimizationEnabled = CGM.getCodeGenOpts().OptimizationLevel != 0;
2750
2751 bool GenerateFPMathIntrinsics =
2753 BuiltinID, CGM.getTriple(), ErrnoOverriden, getLangOpts().MathErrno,
2754 OptNone, IsOptimizationEnabled);
2755
2756 if (GenerateFPMathIntrinsics) {
2757 switch (BuiltinIDIfNoAsmLabel) {
2758 case Builtin::BIacos:
2759 case Builtin::BIacosf:
2760 case Builtin::BIacosl:
2761 case Builtin::BI__builtin_acos:
2762 case Builtin::BI__builtin_acosf:
2763 case Builtin::BI__builtin_acosf16:
2764 case Builtin::BI__builtin_acosl:
2765 case Builtin::BI__builtin_acosf128:
2766 case Builtin::BI__builtin_elementwise_acos:
2768 *this, E, Intrinsic::acos, Intrinsic::experimental_constrained_acos));
2769
2770 case Builtin::BIasin:
2771 case Builtin::BIasinf:
2772 case Builtin::BIasinl:
2773 case Builtin::BI__builtin_asin:
2774 case Builtin::BI__builtin_asinf:
2775 case Builtin::BI__builtin_asinf16:
2776 case Builtin::BI__builtin_asinl:
2777 case Builtin::BI__builtin_asinf128:
2778 case Builtin::BI__builtin_elementwise_asin:
2780 *this, E, Intrinsic::asin, Intrinsic::experimental_constrained_asin));
2781
2782 case Builtin::BIatan:
2783 case Builtin::BIatanf:
2784 case Builtin::BIatanl:
2785 case Builtin::BI__builtin_atan:
2786 case Builtin::BI__builtin_atanf:
2787 case Builtin::BI__builtin_atanf16:
2788 case Builtin::BI__builtin_atanl:
2789 case Builtin::BI__builtin_atanf128:
2790 case Builtin::BI__builtin_elementwise_atan:
2792 *this, E, Intrinsic::atan, Intrinsic::experimental_constrained_atan));
2793
2794 case Builtin::BIatan2:
2795 case Builtin::BIatan2f:
2796 case Builtin::BIatan2l:
2797 case Builtin::BI__builtin_atan2:
2798 case Builtin::BI__builtin_atan2f:
2799 case Builtin::BI__builtin_atan2f16:
2800 case Builtin::BI__builtin_atan2l:
2801 case Builtin::BI__builtin_atan2f128:
2802 case Builtin::BI__builtin_elementwise_atan2:
2804 *this, E, Intrinsic::atan2,
2805 Intrinsic::experimental_constrained_atan2));
2806
2807 case Builtin::BIceil:
2808 case Builtin::BIceilf:
2809 case Builtin::BIceill:
2810 case Builtin::BI__builtin_ceil:
2811 case Builtin::BI__builtin_ceilf:
2812 case Builtin::BI__builtin_ceilf16:
2813 case Builtin::BI__builtin_ceill:
2814 case Builtin::BI__builtin_ceilf128:
2815 case Builtin::BI__builtin_elementwise_ceil:
2817 Intrinsic::ceil,
2818 Intrinsic::experimental_constrained_ceil));
2819
2820 case Builtin::BIcopysign:
2821 case Builtin::BIcopysignf:
2822 case Builtin::BIcopysignl:
2823 case Builtin::BI__builtin_copysign:
2824 case Builtin::BI__builtin_copysignf:
2825 case Builtin::BI__builtin_copysignf16:
2826 case Builtin::BI__builtin_copysignl:
2827 case Builtin::BI__builtin_copysignf128:
2828 return RValue::get(
2829 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::copysign));
2830
2831 case Builtin::BIcos:
2832 case Builtin::BIcosf:
2833 case Builtin::BIcosl:
2834 case Builtin::BI__builtin_cos:
2835 case Builtin::BI__builtin_cosf:
2836 case Builtin::BI__builtin_cosf16:
2837 case Builtin::BI__builtin_cosl:
2838 case Builtin::BI__builtin_cosf128:
2839 case Builtin::BI__builtin_elementwise_cos:
2841 Intrinsic::cos,
2842 Intrinsic::experimental_constrained_cos));
2843
2844 case Builtin::BIcosh:
2845 case Builtin::BIcoshf:
2846 case Builtin::BIcoshl:
2847 case Builtin::BI__builtin_cosh:
2848 case Builtin::BI__builtin_coshf:
2849 case Builtin::BI__builtin_coshf16:
2850 case Builtin::BI__builtin_coshl:
2851 case Builtin::BI__builtin_coshf128:
2852 case Builtin::BI__builtin_elementwise_cosh:
2854 *this, E, Intrinsic::cosh, Intrinsic::experimental_constrained_cosh));
2855
2856 case Builtin::BIexp:
2857 case Builtin::BIexpf:
2858 case Builtin::BIexpl:
2859 case Builtin::BI__builtin_exp:
2860 case Builtin::BI__builtin_expf:
2861 case Builtin::BI__builtin_expf16:
2862 case Builtin::BI__builtin_expl:
2863 case Builtin::BI__builtin_expf128:
2864 case Builtin::BI__builtin_elementwise_exp:
2866 Intrinsic::exp,
2867 Intrinsic::experimental_constrained_exp));
2868
2869 case Builtin::BIexp2:
2870 case Builtin::BIexp2f:
2871 case Builtin::BIexp2l:
2872 case Builtin::BI__builtin_exp2:
2873 case Builtin::BI__builtin_exp2f:
2874 case Builtin::BI__builtin_exp2f16:
2875 case Builtin::BI__builtin_exp2l:
2876 case Builtin::BI__builtin_exp2f128:
2877 case Builtin::BI__builtin_elementwise_exp2:
2879 Intrinsic::exp2,
2880 Intrinsic::experimental_constrained_exp2));
2881 case Builtin::BI__builtin_exp10:
2882 case Builtin::BI__builtin_exp10f:
2883 case Builtin::BI__builtin_exp10f16:
2884 case Builtin::BI__builtin_exp10l:
2885 case Builtin::BI__builtin_exp10f128:
2886 case Builtin::BI__builtin_elementwise_exp10: {
2887 // TODO: strictfp support
2888 if (Builder.getIsFPConstrained())
2889 break;
2890 return RValue::get(
2891 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::exp10));
2892 }
2893 case Builtin::BIfabs:
2894 case Builtin::BIfabsf:
2895 case Builtin::BIfabsl:
2896 case Builtin::BI__builtin_fabs:
2897 case Builtin::BI__builtin_fabsf:
2898 case Builtin::BI__builtin_fabsf16:
2899 case Builtin::BI__builtin_fabsl:
2900 case Builtin::BI__builtin_fabsf128:
2901 return RValue::get(
2902 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::fabs));
2903
2904 case Builtin::BIfloor:
2905 case Builtin::BIfloorf:
2906 case Builtin::BIfloorl:
2907 case Builtin::BI__builtin_floor:
2908 case Builtin::BI__builtin_floorf:
2909 case Builtin::BI__builtin_floorf16:
2910 case Builtin::BI__builtin_floorl:
2911 case Builtin::BI__builtin_floorf128:
2912 case Builtin::BI__builtin_elementwise_floor:
2914 Intrinsic::floor,
2915 Intrinsic::experimental_constrained_floor));
2916
2917 case Builtin::BIfma:
2918 case Builtin::BIfmaf:
2919 case Builtin::BIfmal:
2920 case Builtin::BI__builtin_fma:
2921 case Builtin::BI__builtin_fmaf:
2922 case Builtin::BI__builtin_fmaf16:
2923 case Builtin::BI__builtin_fmal:
2924 case Builtin::BI__builtin_fmaf128:
2925 case Builtin::BI__builtin_elementwise_fma:
2927 Intrinsic::fma,
2928 Intrinsic::experimental_constrained_fma));
2929
2930 case Builtin::BIfmax:
2931 case Builtin::BIfmaxf:
2932 case Builtin::BIfmaxl:
2933 case Builtin::BI__builtin_fmax:
2934 case Builtin::BI__builtin_fmaxf:
2935 case Builtin::BI__builtin_fmaxf16:
2936 case Builtin::BI__builtin_fmaxl:
2937 case Builtin::BI__builtin_fmaxf128: {
2938 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
2939 Builder.getFastMathFlags().setNoSignedZeros();
2941 *this, E, Intrinsic::maxnum,
2942 Intrinsic::experimental_constrained_maxnum));
2943 }
2944
2945 case Builtin::BIfmin:
2946 case Builtin::BIfminf:
2947 case Builtin::BIfminl:
2948 case Builtin::BI__builtin_fmin:
2949 case Builtin::BI__builtin_fminf:
2950 case Builtin::BI__builtin_fminf16:
2951 case Builtin::BI__builtin_fminl:
2952 case Builtin::BI__builtin_fminf128: {
2953 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
2954 Builder.getFastMathFlags().setNoSignedZeros();
2956 *this, E, Intrinsic::minnum,
2957 Intrinsic::experimental_constrained_minnum));
2958 }
2959
2960 case Builtin::BIfmaximum_num:
2961 case Builtin::BIfmaximum_numf:
2962 case Builtin::BIfmaximum_numl:
2963 case Builtin::BI__builtin_fmaximum_num:
2964 case Builtin::BI__builtin_fmaximum_numf:
2965 case Builtin::BI__builtin_fmaximum_numf16:
2966 case Builtin::BI__builtin_fmaximum_numl:
2967 case Builtin::BI__builtin_fmaximum_numf128:
2968 return RValue::get(
2969 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::maximumnum));
2970
2971 case Builtin::BIfminimum_num:
2972 case Builtin::BIfminimum_numf:
2973 case Builtin::BIfminimum_numl:
2974 case Builtin::BI__builtin_fminimum_num:
2975 case Builtin::BI__builtin_fminimum_numf:
2976 case Builtin::BI__builtin_fminimum_numf16:
2977 case Builtin::BI__builtin_fminimum_numl:
2978 case Builtin::BI__builtin_fminimum_numf128:
2979 return RValue::get(
2980 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::minimumnum));
2981
2982 // fmod() is a special-case. It maps to the frem instruction rather than an
2983 // LLVM intrinsic.
2984 case Builtin::BIfmod:
2985 case Builtin::BIfmodf:
2986 case Builtin::BIfmodl:
2987 case Builtin::BI__builtin_fmod:
2988 case Builtin::BI__builtin_fmodf:
2989 case Builtin::BI__builtin_fmodf16:
2990 case Builtin::BI__builtin_fmodl:
2991 case Builtin::BI__builtin_fmodf128:
2992 case Builtin::BI__builtin_elementwise_fmod: {
2993 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2994 Value *Arg1 = EmitScalarExpr(E->getArg(0));
2995 Value *Arg2 = EmitScalarExpr(E->getArg(1));
2996 if (Builder.getIsFPConstrained()) {
2997 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_frem,
2998 Arg1->getType());
2999 return RValue::get(Builder.CreateConstrainedFPCall(F, {Arg1, Arg2}));
3000 } else {
3001 return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
3002 }
3003 }
3004
3005 case Builtin::BIlog:
3006 case Builtin::BIlogf:
3007 case Builtin::BIlogl:
3008 case Builtin::BI__builtin_log:
3009 case Builtin::BI__builtin_logf:
3010 case Builtin::BI__builtin_logf16:
3011 case Builtin::BI__builtin_logl:
3012 case Builtin::BI__builtin_logf128:
3013 case Builtin::BI__builtin_elementwise_log:
3015 Intrinsic::log,
3016 Intrinsic::experimental_constrained_log));
3017
3018 case Builtin::BIlog10:
3019 case Builtin::BIlog10f:
3020 case Builtin::BIlog10l:
3021 case Builtin::BI__builtin_log10:
3022 case Builtin::BI__builtin_log10f:
3023 case Builtin::BI__builtin_log10f16:
3024 case Builtin::BI__builtin_log10l:
3025 case Builtin::BI__builtin_log10f128:
3026 case Builtin::BI__builtin_elementwise_log10:
3028 Intrinsic::log10,
3029 Intrinsic::experimental_constrained_log10));
3030
3031 case Builtin::BIlog2:
3032 case Builtin::BIlog2f:
3033 case Builtin::BIlog2l:
3034 case Builtin::BI__builtin_log2:
3035 case Builtin::BI__builtin_log2f:
3036 case Builtin::BI__builtin_log2f16:
3037 case Builtin::BI__builtin_log2l:
3038 case Builtin::BI__builtin_log2f128:
3039 case Builtin::BI__builtin_elementwise_log2:
3041 Intrinsic::log2,
3042 Intrinsic::experimental_constrained_log2));
3043
3044 case Builtin::BInearbyint:
3045 case Builtin::BInearbyintf:
3046 case Builtin::BInearbyintl:
3047 case Builtin::BI__builtin_nearbyint:
3048 case Builtin::BI__builtin_nearbyintf:
3049 case Builtin::BI__builtin_nearbyintl:
3050 case Builtin::BI__builtin_nearbyintf128:
3051 case Builtin::BI__builtin_elementwise_nearbyint:
3053 Intrinsic::nearbyint,
3054 Intrinsic::experimental_constrained_nearbyint));
3055
3056 case Builtin::BIpow:
3057 case Builtin::BIpowf:
3058 case Builtin::BIpowl:
3059 case Builtin::BI__builtin_pow:
3060 case Builtin::BI__builtin_powf:
3061 case Builtin::BI__builtin_powf16:
3062 case Builtin::BI__builtin_powl:
3063 case Builtin::BI__builtin_powf128:
3064 case Builtin::BI__builtin_elementwise_pow:
3066 Intrinsic::pow,
3067 Intrinsic::experimental_constrained_pow));
3068
3069 case Builtin::BIrint:
3070 case Builtin::BIrintf:
3071 case Builtin::BIrintl:
3072 case Builtin::BI__builtin_rint:
3073 case Builtin::BI__builtin_rintf:
3074 case Builtin::BI__builtin_rintf16:
3075 case Builtin::BI__builtin_rintl:
3076 case Builtin::BI__builtin_rintf128:
3077 case Builtin::BI__builtin_elementwise_rint:
3079 Intrinsic::rint,
3080 Intrinsic::experimental_constrained_rint));
3081
3082 case Builtin::BIround:
3083 case Builtin::BIroundf:
3084 case Builtin::BIroundl:
3085 case Builtin::BI__builtin_round:
3086 case Builtin::BI__builtin_roundf:
3087 case Builtin::BI__builtin_roundf16:
3088 case Builtin::BI__builtin_roundl:
3089 case Builtin::BI__builtin_roundf128:
3090 case Builtin::BI__builtin_elementwise_round:
3092 Intrinsic::round,
3093 Intrinsic::experimental_constrained_round));
3094
3095 case Builtin::BIroundeven:
3096 case Builtin::BIroundevenf:
3097 case Builtin::BIroundevenl:
3098 case Builtin::BI__builtin_roundeven:
3099 case Builtin::BI__builtin_roundevenf:
3100 case Builtin::BI__builtin_roundevenf16:
3101 case Builtin::BI__builtin_roundevenl:
3102 case Builtin::BI__builtin_roundevenf128:
3103 case Builtin::BI__builtin_elementwise_roundeven:
3105 Intrinsic::roundeven,
3106 Intrinsic::experimental_constrained_roundeven));
3107
3108 case Builtin::BIsin:
3109 case Builtin::BIsinf:
3110 case Builtin::BIsinl:
3111 case Builtin::BI__builtin_sin:
3112 case Builtin::BI__builtin_sinf:
3113 case Builtin::BI__builtin_sinf16:
3114 case Builtin::BI__builtin_sinl:
3115 case Builtin::BI__builtin_sinf128:
3116 case Builtin::BI__builtin_elementwise_sin:
3118 Intrinsic::sin,
3119 Intrinsic::experimental_constrained_sin));
3120
3121 case Builtin::BIsinh:
3122 case Builtin::BIsinhf:
3123 case Builtin::BIsinhl:
3124 case Builtin::BI__builtin_sinh:
3125 case Builtin::BI__builtin_sinhf:
3126 case Builtin::BI__builtin_sinhf16:
3127 case Builtin::BI__builtin_sinhl:
3128 case Builtin::BI__builtin_sinhf128:
3129 case Builtin::BI__builtin_elementwise_sinh:
3131 *this, E, Intrinsic::sinh, Intrinsic::experimental_constrained_sinh));
3132
3133 case Builtin::BI__builtin_sincospi:
3134 case Builtin::BI__builtin_sincospif:
3135 case Builtin::BI__builtin_sincospil:
3136 if (Builder.getIsFPConstrained())
3137 break; // TODO: Emit constrained sincospi intrinsic once one exists.
3138 emitSincosBuiltin(*this, E, Intrinsic::sincospi);
3139 return RValue::get(nullptr);
3140
3141 case Builtin::BIsincos:
3142 case Builtin::BIsincosf:
3143 case Builtin::BIsincosl:
3144 case Builtin::BI__builtin_sincos:
3145 case Builtin::BI__builtin_sincosf:
3146 case Builtin::BI__builtin_sincosf16:
3147 case Builtin::BI__builtin_sincosl:
3148 case Builtin::BI__builtin_sincosf128:
3149 if (Builder.getIsFPConstrained())
3150 break; // TODO: Emit constrained sincos intrinsic once one exists.
3151 emitSincosBuiltin(*this, E, Intrinsic::sincos);
3152 return RValue::get(nullptr);
3153
3154 case Builtin::BIsqrt:
3155 case Builtin::BIsqrtf:
3156 case Builtin::BIsqrtl:
3157 case Builtin::BI__builtin_sqrt:
3158 case Builtin::BI__builtin_sqrtf:
3159 case Builtin::BI__builtin_sqrtf16:
3160 case Builtin::BI__builtin_sqrtl:
3161 case Builtin::BI__builtin_sqrtf128:
3162 case Builtin::BI__builtin_elementwise_sqrt: {
3164 *this, E, Intrinsic::sqrt, Intrinsic::experimental_constrained_sqrt);
3166 return RValue::get(Call);
3167 }
3168
3169 case Builtin::BItan:
3170 case Builtin::BItanf:
3171 case Builtin::BItanl:
3172 case Builtin::BI__builtin_tan:
3173 case Builtin::BI__builtin_tanf:
3174 case Builtin::BI__builtin_tanf16:
3175 case Builtin::BI__builtin_tanl:
3176 case Builtin::BI__builtin_tanf128:
3177 case Builtin::BI__builtin_elementwise_tan:
3179 *this, E, Intrinsic::tan, Intrinsic::experimental_constrained_tan));
3180
3181 case Builtin::BItanh:
3182 case Builtin::BItanhf:
3183 case Builtin::BItanhl:
3184 case Builtin::BI__builtin_tanh:
3185 case Builtin::BI__builtin_tanhf:
3186 case Builtin::BI__builtin_tanhf16:
3187 case Builtin::BI__builtin_tanhl:
3188 case Builtin::BI__builtin_tanhf128:
3189 case Builtin::BI__builtin_elementwise_tanh:
3191 *this, E, Intrinsic::tanh, Intrinsic::experimental_constrained_tanh));
3192
3193 case Builtin::BItrunc:
3194 case Builtin::BItruncf:
3195 case Builtin::BItruncl:
3196 case Builtin::BI__builtin_trunc:
3197 case Builtin::BI__builtin_truncf:
3198 case Builtin::BI__builtin_truncf16:
3199 case Builtin::BI__builtin_truncl:
3200 case Builtin::BI__builtin_truncf128:
3201 case Builtin::BI__builtin_elementwise_trunc:
3203 Intrinsic::trunc,
3204 Intrinsic::experimental_constrained_trunc));
3205
3206 case Builtin::BIlround:
3207 case Builtin::BIlroundf:
3208 case Builtin::BIlroundl:
3209 case Builtin::BI__builtin_lround:
3210 case Builtin::BI__builtin_lroundf:
3211 case Builtin::BI__builtin_lroundl:
3212 case Builtin::BI__builtin_lroundf128:
3214 *this, E, Intrinsic::lround,
3215 Intrinsic::experimental_constrained_lround));
3216
3217 case Builtin::BIllround:
3218 case Builtin::BIllroundf:
3219 case Builtin::BIllroundl:
3220 case Builtin::BI__builtin_llround:
3221 case Builtin::BI__builtin_llroundf:
3222 case Builtin::BI__builtin_llroundl:
3223 case Builtin::BI__builtin_llroundf128:
3225 *this, E, Intrinsic::llround,
3226 Intrinsic::experimental_constrained_llround));
3227
3228 case Builtin::BIlrint:
3229 case Builtin::BIlrintf:
3230 case Builtin::BIlrintl:
3231 case Builtin::BI__builtin_lrint:
3232 case Builtin::BI__builtin_lrintf:
3233 case Builtin::BI__builtin_lrintl:
3234 case Builtin::BI__builtin_lrintf128:
3236 *this, E, Intrinsic::lrint,
3237 Intrinsic::experimental_constrained_lrint));
3238
3239 case Builtin::BIllrint:
3240 case Builtin::BIllrintf:
3241 case Builtin::BIllrintl:
3242 case Builtin::BI__builtin_llrint:
3243 case Builtin::BI__builtin_llrintf:
3244 case Builtin::BI__builtin_llrintl:
3245 case Builtin::BI__builtin_llrintf128:
3247 *this, E, Intrinsic::llrint,
3248 Intrinsic::experimental_constrained_llrint));
3249 case Builtin::BI__builtin_ldexp:
3250 case Builtin::BI__builtin_ldexpf:
3251 case Builtin::BI__builtin_ldexpl:
3252 case Builtin::BI__builtin_ldexpf16:
3253 case Builtin::BI__builtin_ldexpf128:
3254 case Builtin::BI__builtin_elementwise_ldexp:
3256 *this, E, Intrinsic::ldexp,
3257 Intrinsic::experimental_constrained_ldexp));
3258 default:
3259 break;
3260 }
3261 }
3262
3263 // Check NonnullAttribute/NullabilityArg and Alignment.
3264 auto EmitArgCheck = [&](TypeCheckKind Kind, Address A, const Expr *Arg,
3265 unsigned ParmNum) {
3266 Value *Val = A.emitRawPointer(*this);
3267 EmitNonNullArgCheck(RValue::get(Val), Arg->getType(), Arg->getExprLoc(), FD,
3268 ParmNum);
3269
3270 if (SanOpts.has(SanitizerKind::Alignment)) {
3271 SanitizerSet SkippedChecks;
3272 SkippedChecks.set(SanitizerKind::All);
3273 SkippedChecks.clear(SanitizerKind::Alignment);
3274 SourceLocation Loc = Arg->getExprLoc();
3275 // Strip an implicit cast.
3276 if (auto *CE = dyn_cast<ImplicitCastExpr>(Arg))
3277 if (CE->getCastKind() == CK_BitCast)
3278 Arg = CE->getSubExpr();
3279 EmitTypeCheck(Kind, Loc, Val, Arg->getType(), A.getAlignment(),
3280 SkippedChecks);
3281 }
3282 };
3283
3284 switch (BuiltinIDIfNoAsmLabel) {
3285 default: break;
3286 case Builtin::BI__builtin___CFStringMakeConstantString:
3287 case Builtin::BI__builtin___NSStringMakeConstantString:
3288 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
3289 case Builtin::BI__builtin_stdarg_start:
3290 case Builtin::BI__builtin_va_start:
3291 case Builtin::BI__va_start:
3292 case Builtin::BI__builtin_c23_va_start:
3293 case Builtin::BI__builtin_va_end:
3294 EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
3295 ? EmitScalarExpr(E->getArg(0))
3296 : EmitVAListRef(E->getArg(0)).emitRawPointer(*this),
3297 BuiltinID != Builtin::BI__builtin_va_end);
3298 return RValue::get(nullptr);
3299 case Builtin::BI__builtin_va_copy: {
3300 Value *DstPtr = EmitVAListRef(E->getArg(0)).emitRawPointer(*this);
3301 Value *SrcPtr = EmitVAListRef(E->getArg(1)).emitRawPointer(*this);
3302 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy, {DstPtr->getType()}),
3303 {DstPtr, SrcPtr});
3304 return RValue::get(nullptr);
3305 }
3306 case Builtin::BIabs:
3307 case Builtin::BIlabs:
3308 case Builtin::BIllabs:
3309 case Builtin::BI__builtin_abs:
3310 case Builtin::BI__builtin_labs:
3311 case Builtin::BI__builtin_llabs: {
3312 bool SanitizeOverflow = SanOpts.has(SanitizerKind::SignedIntegerOverflow);
3313
3314 Value *Result;
3315 switch (getLangOpts().getSignedOverflowBehavior()) {
3317 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), false);
3318 break;
3320 if (!SanitizeOverflow) {
3321 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), true);
3322 break;
3323 }
3324 [[fallthrough]];
3326 // TODO: Somehow handle the corner case when the address of abs is taken.
3327 Result = EmitOverflowCheckedAbs(*this, E, SanitizeOverflow);
3328 break;
3329 }
3330 return RValue::get(Result);
3331 }
3332 case Builtin::BI__builtin_complex: {
3333 Value *Real = EmitScalarExpr(E->getArg(0));
3334 Value *Imag = EmitScalarExpr(E->getArg(1));
3335 return RValue::getComplex({Real, Imag});
3336 }
3337 case Builtin::BI__builtin_conj:
3338 case Builtin::BI__builtin_conjf:
3339 case Builtin::BI__builtin_conjl:
3340 case Builtin::BIconj:
3341 case Builtin::BIconjf:
3342 case Builtin::BIconjl: {
3343 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3344 Value *Real = ComplexVal.first;
3345 Value *Imag = ComplexVal.second;
3346 Imag = Builder.CreateFNeg(Imag, "neg");
3347 return RValue::getComplex(std::make_pair(Real, Imag));
3348 }
3349 case Builtin::BI__builtin_creal:
3350 case Builtin::BI__builtin_crealf:
3351 case Builtin::BI__builtin_creall:
3352 case Builtin::BIcreal:
3353 case Builtin::BIcrealf:
3354 case Builtin::BIcreall: {
3355 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3356 return RValue::get(ComplexVal.first);
3357 }
3358
3359 case Builtin::BI__builtin_preserve_access_index: {
3360 // Only enabled preserved access index region when debuginfo
3361 // is available as debuginfo is needed to preserve user-level
3362 // access pattern.
3363 if (!getDebugInfo()) {
3364 CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
3365 return RValue::get(EmitScalarExpr(E->getArg(0)));
3366 }
3367
3368 // Nested builtin_preserve_access_index() not supported
3370 CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
3371 return RValue::get(EmitScalarExpr(E->getArg(0)));
3372 }
3373
3374 IsInPreservedAIRegion = true;
3375 Value *Res = EmitScalarExpr(E->getArg(0));
3376 IsInPreservedAIRegion = false;
3377 return RValue::get(Res);
3378 }
3379
3380 case Builtin::BI__builtin_cimag:
3381 case Builtin::BI__builtin_cimagf:
3382 case Builtin::BI__builtin_cimagl:
3383 case Builtin::BIcimag:
3384 case Builtin::BIcimagf:
3385 case Builtin::BIcimagl: {
3386 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3387 return RValue::get(ComplexVal.second);
3388 }
3389
3390 case Builtin::BI__builtin_clrsb:
3391 case Builtin::BI__builtin_clrsbl:
3392 case Builtin::BI__builtin_clrsbll: {
3393 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
3394 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3395
3396 llvm::Type *ArgType = ArgValue->getType();
3397 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3398
3399 llvm::Type *ResultType = ConvertType(E->getType());
3400 Value *Zero = llvm::Constant::getNullValue(ArgType);
3401 Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
3402 Value *Inverse = Builder.CreateNot(ArgValue, "not");
3403 Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
3404 Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
3405 Value *Result =
3406 Builder.CreateNUWSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
3407 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3408 "cast");
3409 return RValue::get(Result);
3410 }
3411 case Builtin::BI__builtin_ctzs:
3412 case Builtin::BI__builtin_ctz:
3413 case Builtin::BI__builtin_ctzl:
3414 case Builtin::BI__builtin_ctzll:
3415 case Builtin::BI__builtin_ctzg:
3416 case Builtin::BI__builtin_elementwise_ctzg: {
3417 bool HasFallback =
3418 (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_ctzg ||
3419 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_ctzg) &&
3420 E->getNumArgs() > 1;
3421
3422 Value *ArgValue =
3423 HasFallback ? EmitBitCountExpr(*this, E->getArg(0))
3425
3426 llvm::Type *ArgType = ArgValue->getType();
3427 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
3428
3429 llvm::Type *ResultType = ConvertType(E->getType());
3430 // The elementwise builtins always exhibit zero-is-undef behaviour
3431 Value *ZeroUndef = Builder.getInt1(
3432 HasFallback || getTarget().isCLZForZeroUndef() ||
3433 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_ctzg);
3434 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
3435 if (Result->getType() != ResultType)
3436 Result =
3437 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3438 if (!HasFallback)
3439 return RValue::get(Result);
3440
3441 Value *Zero = Constant::getNullValue(ArgType);
3442 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3443 Value *FallbackValue = EmitScalarExpr(E->getArg(1));
3444 Value *ResultOrFallback =
3445 Builder.CreateSelect(IsZero, FallbackValue, Result, "ctzg");
3446 return RValue::get(ResultOrFallback);
3447 }
3448 case Builtin::BI__builtin_clzs:
3449 case Builtin::BI__builtin_clz:
3450 case Builtin::BI__builtin_clzl:
3451 case Builtin::BI__builtin_clzll:
3452 case Builtin::BI__builtin_clzg:
3453 case Builtin::BI__builtin_elementwise_clzg: {
3454 bool HasFallback =
3455 (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_clzg ||
3456 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_clzg) &&
3457 E->getNumArgs() > 1;
3458
3459 Value *ArgValue =
3460 HasFallback ? EmitBitCountExpr(*this, E->getArg(0))
3462
3463 llvm::Type *ArgType = ArgValue->getType();
3464 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3465
3466 llvm::Type *ResultType = ConvertType(E->getType());
3467 // The elementwise builtins always exhibit zero-is-undef behaviour
3468 Value *ZeroUndef = Builder.getInt1(
3469 HasFallback || getTarget().isCLZForZeroUndef() ||
3470 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_clzg);
3471 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
3472 if (Result->getType() != ResultType)
3473 Result =
3474 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3475 if (!HasFallback)
3476 return RValue::get(Result);
3477
3478 Value *Zero = Constant::getNullValue(ArgType);
3479 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3480 Value *FallbackValue = EmitScalarExpr(E->getArg(1));
3481 Value *ResultOrFallback =
3482 Builder.CreateSelect(IsZero, FallbackValue, Result, "clzg");
3483 return RValue::get(ResultOrFallback);
3484 }
3485 case Builtin::BI__builtin_ffs:
3486 case Builtin::BI__builtin_ffsl:
3487 case Builtin::BI__builtin_ffsll: {
3488 // ffs(x) -> x ? cttz(x) + 1 : 0
3489 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3490
3491 llvm::Type *ArgType = ArgValue->getType();
3492 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
3493
3494 llvm::Type *ResultType = ConvertType(E->getType());
3495 Value *Tmp =
3496 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
3497 llvm::ConstantInt::get(ArgType, 1));
3498 Value *Zero = llvm::Constant::getNullValue(ArgType);
3499 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3500 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
3501 if (Result->getType() != ResultType)
3502 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3503 "cast");
3504 return RValue::get(Result);
3505 }
3506 case Builtin::BI__builtin_parity:
3507 case Builtin::BI__builtin_parityl:
3508 case Builtin::BI__builtin_parityll: {
3509 // parity(x) -> ctpop(x) & 1
3510 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3511
3512 llvm::Type *ArgType = ArgValue->getType();
3513 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3514
3515 llvm::Type *ResultType = ConvertType(E->getType());
3516 Value *Tmp = Builder.CreateCall(F, ArgValue);
3517 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
3518 if (Result->getType() != ResultType)
3519 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3520 "cast");
3521 return RValue::get(Result);
3522 }
3523 case Builtin::BI__lzcnt16:
3524 case Builtin::BI__lzcnt:
3525 case Builtin::BI__lzcnt64: {
3526 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3527
3528 llvm::Type *ArgType = ArgValue->getType();
3529 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3530
3531 llvm::Type *ResultType = ConvertType(E->getType());
3532 Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
3533 if (Result->getType() != ResultType)
3534 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3535 "cast");
3536 return RValue::get(Result);
3537 }
3538 case Builtin::BI__popcnt16:
3539 case Builtin::BI__popcnt:
3540 case Builtin::BI__popcnt64:
3541 case Builtin::BI__builtin_popcount:
3542 case Builtin::BI__builtin_popcountl:
3543 case Builtin::BI__builtin_popcountll:
3544 case Builtin::BI__builtin_popcountg: {
3545 Value *ArgValue = EmitBitCountExpr(*this, E->getArg(0));
3546
3547 llvm::Type *ArgType = ArgValue->getType();
3548 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3549
3550 llvm::Type *ResultType = ConvertType(E->getType());
3551 Value *Result = Builder.CreateCall(F, ArgValue);
3552 if (Result->getType() != ResultType)
3553 Result =
3554 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3555 return RValue::get(Result);
3556 }
3557 case Builtin::BI__builtin_unpredictable: {
3558 // Always return the argument of __builtin_unpredictable. LLVM does not
3559 // handle this builtin. Metadata for this builtin should be added directly
3560 // to instructions such as branches or switches that use it.
3561 return RValue::get(EmitScalarExpr(E->getArg(0)));
3562 }
3563 case Builtin::BI__builtin_expect: {
3564 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3565 llvm::Type *ArgType = ArgValue->getType();
3566
3567 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
3568 // Don't generate llvm.expect on -O0 as the backend won't use it for
3569 // anything.
3570 // Note, we still IRGen ExpectedValue because it could have side-effects.
3571 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3572 return RValue::get(ArgValue);
3573
3574 Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
3575 Value *Result =
3576 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
3577 return RValue::get(Result);
3578 }
3579 case Builtin::BI__builtin_expect_with_probability: {
3580 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3581 llvm::Type *ArgType = ArgValue->getType();
3582
3583 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
3584 llvm::APFloat Probability(0.0);
3585 const Expr *ProbArg = E->getArg(2);
3586 bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
3587 assert(EvalSucceed && "probability should be able to evaluate as float");
3588 (void)EvalSucceed;
3589 bool LoseInfo = false;
3590 Probability.convert(llvm::APFloat::IEEEdouble(),
3591 llvm::RoundingMode::Dynamic, &LoseInfo);
3592 llvm::Type *Ty = ConvertType(ProbArg->getType());
3593 Constant *Confidence = ConstantFP::get(Ty, Probability);
3594 // Don't generate llvm.expect.with.probability on -O0 as the backend
3595 // won't use it for anything.
3596 // Note, we still IRGen ExpectedValue because it could have side-effects.
3597 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3598 return RValue::get(ArgValue);
3599
3600 Function *FnExpect =
3601 CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
3602 Value *Result = Builder.CreateCall(
3603 FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
3604 return RValue::get(Result);
3605 }
3606 case Builtin::BI__builtin_assume_aligned: {
3607 const Expr *Ptr = E->getArg(0);
3608 Value *PtrValue = EmitScalarExpr(Ptr);
3609 Value *OffsetValue =
3610 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
3611
3612 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
3613 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
3614 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
3615 AlignmentCI = ConstantInt::get(AlignmentCI->getIntegerType(),
3616 llvm::Value::MaximumAlignment);
3617
3618 emitAlignmentAssumption(PtrValue, Ptr,
3619 /*The expr loc is sufficient.*/ SourceLocation(),
3620 AlignmentCI, OffsetValue);
3621 return RValue::get(PtrValue);
3622 }
3623 case Builtin::BI__builtin_assume_dereferenceable: {
3624 const Expr *Ptr = E->getArg(0);
3625 const Expr *Size = E->getArg(1);
3626 Value *PtrValue = EmitScalarExpr(Ptr);
3627 Value *SizeValue = EmitScalarExpr(Size);
3628 if (SizeValue->getType() != IntPtrTy)
3629 SizeValue =
3630 Builder.CreateIntCast(SizeValue, IntPtrTy, false, "casted.size");
3631 Builder.CreateDereferenceableAssumption(PtrValue, SizeValue);
3632 return RValue::get(nullptr);
3633 }
3634 case Builtin::BI__assume:
3635 case Builtin::BI__builtin_assume: {
3636 if (E->getArg(0)->HasSideEffects(getContext()))
3637 return RValue::get(nullptr);
3638
3639 Value *ArgValue = EmitCheckedArgForAssume(E->getArg(0));
3640 Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
3641 Builder.CreateCall(FnAssume, ArgValue);
3642 return RValue::get(nullptr);
3643 }
3644 case Builtin::BI__builtin_assume_separate_storage: {
3645 const Expr *Arg0 = E->getArg(0);
3646 const Expr *Arg1 = E->getArg(1);
3647
3648 Value *Value0 = EmitScalarExpr(Arg0);
3649 Value *Value1 = EmitScalarExpr(Arg1);
3650
3651 Value *Values[] = {Value0, Value1};
3652 OperandBundleDefT<Value *> OBD("separate_storage", Values);
3653 Builder.CreateAssumption(ConstantInt::getTrue(getLLVMContext()), {OBD});
3654 return RValue::get(nullptr);
3655 }
3656 case Builtin::BI__builtin_allow_runtime_check: {
3657 StringRef Kind =
3658 cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts())->getString();
3659 LLVMContext &Ctx = CGM.getLLVMContext();
3660 llvm::Value *Allow = Builder.CreateCall(
3661 CGM.getIntrinsic(Intrinsic::allow_runtime_check),
3662 llvm::MetadataAsValue::get(Ctx, llvm::MDString::get(Ctx, Kind)));
3663 return RValue::get(Allow);
3664 }
3665 case Builtin::BI__builtin_allow_sanitize_check: {
3666 Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
3667 StringRef Name =
3668 cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts())->getString();
3669
3670 // We deliberately allow the use of kernel- and non-kernel names
3671 // interchangably, even when one or the other is enabled. This is consistent
3672 // with the no_sanitize-attribute, which allows either kernel- or non-kernel
3673 // name to disable instrumentation (see CodeGenFunction::StartFunction).
3674 if (getLangOpts().Sanitize.hasOneOf(SanitizerKind::Address |
3675 SanitizerKind::KernelAddress) &&
3676 (Name == "address" || Name == "kernel-address")) {
3677 IntrID = Intrinsic::allow_sanitize_address;
3678 } else if (getLangOpts().Sanitize.has(SanitizerKind::Thread) &&
3679 Name == "thread") {
3680 IntrID = Intrinsic::allow_sanitize_thread;
3681 } else if (getLangOpts().Sanitize.hasOneOf(SanitizerKind::Memory |
3682 SanitizerKind::KernelMemory) &&
3683 (Name == "memory" || Name == "kernel-memory")) {
3684 IntrID = Intrinsic::allow_sanitize_memory;
3685 } else if (getLangOpts().Sanitize.hasOneOf(
3686 SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress) &&
3687 (Name == "hwaddress" || Name == "kernel-hwaddress")) {
3688 IntrID = Intrinsic::allow_sanitize_hwaddress;
3689 }
3690
3691 if (IntrID != Intrinsic::not_intrinsic) {
3692 llvm::Value *Allow = Builder.CreateCall(CGM.getIntrinsic(IntrID));
3693 return RValue::get(Allow);
3694 }
3695 // If the checked sanitizer is not enabled, we can safely lower to false
3696 // right away. This is also more efficient, since the LowerAllowCheckPass
3697 // must not always be enabled if none of the above sanitizers are enabled.
3698 return RValue::get(Builder.getFalse());
3699 }
3700 case Builtin::BI__arithmetic_fence: {
3701 // Create the builtin call if FastMath is selected, and the target
3702 // supports the builtin, otherwise just return the argument.
3703 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3704 llvm::FastMathFlags FMF = Builder.getFastMathFlags();
3705 bool isArithmeticFenceEnabled =
3706 FMF.allowReassoc() &&
3708 QualType ArgType = E->getArg(0)->getType();
3709 if (ArgType->isComplexType()) {
3710 if (isArithmeticFenceEnabled) {
3711 QualType ElementType = ArgType->castAs<ComplexType>()->getElementType();
3712 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3713 Value *Real = Builder.CreateArithmeticFence(ComplexVal.first,
3714 ConvertType(ElementType));
3715 Value *Imag = Builder.CreateArithmeticFence(ComplexVal.second,
3716 ConvertType(ElementType));
3717 return RValue::getComplex(std::make_pair(Real, Imag));
3718 }
3719 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3720 Value *Real = ComplexVal.first;
3721 Value *Imag = ComplexVal.second;
3722 return RValue::getComplex(std::make_pair(Real, Imag));
3723 }
3724 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3725 if (isArithmeticFenceEnabled)
3726 return RValue::get(
3727 Builder.CreateArithmeticFence(ArgValue, ConvertType(ArgType)));
3728 return RValue::get(ArgValue);
3729 }
3730 case Builtin::BI__builtin_bswapg: {
3731 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3732 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(ArgValue->getType());
3733 assert(IntTy && "LLVM's __builtin_bswapg only supports integer variants");
3734 if (IntTy->getBitWidth() == 1 || IntTy->getBitWidth() == 8)
3735 return RValue::get(ArgValue);
3736 assert(((IntTy->getBitWidth() % 16 == 0 && IntTy->getBitWidth() != 0)) &&
3737 "LLVM's __builtin_bswapg only supports integer variants that has a "
3738 "multiple of 16 bits as well as a single byte");
3739 return RValue::get(
3740 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bswap));
3741 }
3742 case Builtin::BI__builtin_bswap16:
3743 case Builtin::BI__builtin_bswap32:
3744 case Builtin::BI__builtin_bswap64:
3745 case Builtin::BI_byteswap_ushort:
3746 case Builtin::BI_byteswap_ulong:
3747 case Builtin::BI_byteswap_uint64: {
3748 return RValue::get(
3749 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bswap));
3750 }
3751 case Builtin::BI__builtin_bitreverseg: {
3752 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3753 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(ArgValue->getType());
3754 assert(IntTy &&
3755 "LLVM's __builtin_bitreverseg only support integer variants");
3756 if (IntTy->getBitWidth() == 1)
3757 return RValue::get(ArgValue);
3758 return RValue::get(
3759 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bitreverse));
3760 }
3761 case Builtin::BI__builtin_bitreverse8:
3762 case Builtin::BI__builtin_bitreverse16:
3763 case Builtin::BI__builtin_bitreverse32:
3764 case Builtin::BI__builtin_bitreverse64: {
3765 return RValue::get(
3766 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bitreverse));
3767 }
3768 case Builtin::BI__builtin_rotateleft8:
3769 case Builtin::BI__builtin_rotateleft16:
3770 case Builtin::BI__builtin_rotateleft32:
3771 case Builtin::BI__builtin_rotateleft64:
3772 case Builtin::BI__builtin_stdc_rotate_left:
3773 case Builtin::BI_rotl8: // Microsoft variants of rotate left
3774 case Builtin::BI_rotl16:
3775 case Builtin::BI_rotl:
3776 case Builtin::BI_lrotl:
3777 case Builtin::BI_rotl64:
3778 return emitRotate(E, false);
3779
3780 case Builtin::BI__builtin_rotateright8:
3781 case Builtin::BI__builtin_rotateright16:
3782 case Builtin::BI__builtin_rotateright32:
3783 case Builtin::BI__builtin_rotateright64:
3784 case Builtin::BI__builtin_stdc_rotate_right:
3785 case Builtin::BI_rotr8: // Microsoft variants of rotate right
3786 case Builtin::BI_rotr16:
3787 case Builtin::BI_rotr:
3788 case Builtin::BI_lrotr:
3789 case Builtin::BI_rotr64:
3790 return emitRotate(E, true);
3791
3792 case Builtin::BIstdc_leading_zeros_uc:
3793 case Builtin::BIstdc_leading_zeros_us:
3794 case Builtin::BIstdc_leading_zeros_ui:
3795 case Builtin::BIstdc_leading_zeros_ul:
3796 case Builtin::BIstdc_leading_zeros_ull:
3797 case Builtin::BIstdc_leading_zeros:
3798 case Builtin::BI__builtin_stdc_leading_zeros:
3799 return emitStdcCountIntrinsic(E, Intrinsic::ctlz, /*InvertArg=*/false);
3800 case Builtin::BIstdc_leading_ones_uc:
3801 case Builtin::BIstdc_leading_ones_us:
3802 case Builtin::BIstdc_leading_ones_ui:
3803 case Builtin::BIstdc_leading_ones_ul:
3804 case Builtin::BIstdc_leading_ones_ull:
3805 case Builtin::BIstdc_leading_ones:
3806 case Builtin::BI__builtin_stdc_leading_ones:
3807 return emitStdcCountIntrinsic(E, Intrinsic::ctlz, /*InvertArg=*/true);
3808 case Builtin::BIstdc_trailing_zeros_uc:
3809 case Builtin::BIstdc_trailing_zeros_us:
3810 case Builtin::BIstdc_trailing_zeros_ui:
3811 case Builtin::BIstdc_trailing_zeros_ul:
3812 case Builtin::BIstdc_trailing_zeros_ull:
3813 case Builtin::BIstdc_trailing_zeros:
3814 case Builtin::BI__builtin_stdc_trailing_zeros:
3815 return emitStdcCountIntrinsic(E, Intrinsic::cttz, /*InvertArg=*/false);
3816 case Builtin::BIstdc_trailing_ones_uc:
3817 case Builtin::BIstdc_trailing_ones_us:
3818 case Builtin::BIstdc_trailing_ones_ui:
3819 case Builtin::BIstdc_trailing_ones_ul:
3820 case Builtin::BIstdc_trailing_ones_ull:
3821 case Builtin::BIstdc_trailing_ones:
3822 case Builtin::BI__builtin_stdc_trailing_ones:
3823 return emitStdcCountIntrinsic(E, Intrinsic::cttz, /*InvertArg=*/true);
3824 case Builtin::BIstdc_first_leading_zero_uc:
3825 case Builtin::BIstdc_first_leading_zero_us:
3826 case Builtin::BIstdc_first_leading_zero_ui:
3827 case Builtin::BIstdc_first_leading_zero_ul:
3828 case Builtin::BIstdc_first_leading_zero_ull:
3829 case Builtin::BIstdc_first_leading_zero:
3830 case Builtin::BI__builtin_stdc_first_leading_zero:
3831 return emitStdcFirstBit(E, Intrinsic::ctlz, /*InvertArg=*/true);
3832 case Builtin::BIstdc_first_leading_one_uc:
3833 case Builtin::BIstdc_first_leading_one_us:
3834 case Builtin::BIstdc_first_leading_one_ui:
3835 case Builtin::BIstdc_first_leading_one_ul:
3836 case Builtin::BIstdc_first_leading_one_ull:
3837 case Builtin::BIstdc_first_leading_one:
3838 case Builtin::BI__builtin_stdc_first_leading_one:
3839 return emitStdcFirstBit(E, Intrinsic::ctlz, /*InvertArg=*/false);
3840 case Builtin::BIstdc_first_trailing_zero_uc:
3841 case Builtin::BIstdc_first_trailing_zero_us:
3842 case Builtin::BIstdc_first_trailing_zero_ui:
3843 case Builtin::BIstdc_first_trailing_zero_ul:
3844 case Builtin::BIstdc_first_trailing_zero_ull:
3845 case Builtin::BIstdc_first_trailing_zero:
3846 case Builtin::BI__builtin_stdc_first_trailing_zero:
3847 return emitStdcFirstBit(E, Intrinsic::cttz, /*InvertArg=*/true);
3848 case Builtin::BIstdc_first_trailing_one_uc:
3849 case Builtin::BIstdc_first_trailing_one_us:
3850 case Builtin::BIstdc_first_trailing_one_ui:
3851 case Builtin::BIstdc_first_trailing_one_ul:
3852 case Builtin::BIstdc_first_trailing_one_ull:
3853 case Builtin::BIstdc_first_trailing_one:
3854 case Builtin::BI__builtin_stdc_first_trailing_one:
3855 return emitStdcFirstBit(E, Intrinsic::cttz, /*InvertArg=*/false);
3856 case Builtin::BIstdc_count_zeros_uc:
3857 case Builtin::BIstdc_count_zeros_us:
3858 case Builtin::BIstdc_count_zeros_ui:
3859 case Builtin::BIstdc_count_zeros_ul:
3860 case Builtin::BIstdc_count_zeros_ull:
3861 case Builtin::BIstdc_count_zeros:
3862 case Builtin::BI__builtin_stdc_count_zeros:
3863 return emitStdcBitWidthMinus(E, Intrinsic::ctpop, /*IsPop=*/true);
3864 case Builtin::BIstdc_count_ones_uc:
3865 case Builtin::BIstdc_count_ones_us:
3866 case Builtin::BIstdc_count_ones_ui:
3867 case Builtin::BIstdc_count_ones_ul:
3868 case Builtin::BIstdc_count_ones_ull:
3869 case Builtin::BIstdc_count_ones:
3870 case Builtin::BI__builtin_stdc_count_ones:
3871 return emitStdcCountIntrinsic(E, Intrinsic::ctpop, /*InvertArg=*/false,
3872 /*IsPop=*/true);
3873 case Builtin::BIstdc_has_single_bit_uc:
3874 case Builtin::BIstdc_has_single_bit_us:
3875 case Builtin::BIstdc_has_single_bit_ui:
3876 case Builtin::BIstdc_has_single_bit_ul:
3877 case Builtin::BIstdc_has_single_bit_ull:
3878 case Builtin::BIstdc_has_single_bit:
3879 case Builtin::BI__builtin_stdc_has_single_bit: {
3880 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3881 llvm::Type *ArgType = ArgValue->getType();
3882 Value *One = ConstantInt::get(ArgType, 1);
3883 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3884 Value *PopCnt = Builder.CreateCall(F, ArgValue);
3885 return RValue::get(Builder.CreateICmpEQ(PopCnt, One));
3886 }
3887 case Builtin::BIstdc_bit_width_uc:
3888 case Builtin::BIstdc_bit_width_us:
3889 case Builtin::BIstdc_bit_width_ui:
3890 case Builtin::BIstdc_bit_width_ul:
3891 case Builtin::BIstdc_bit_width_ull:
3892 case Builtin::BIstdc_bit_width:
3893 case Builtin::BI__builtin_stdc_bit_width:
3894 return emitStdcBitWidthMinus(E, Intrinsic::ctlz, /*IsPop=*/false);
3895 case Builtin::BIstdc_bit_floor_uc:
3896 case Builtin::BIstdc_bit_floor_us:
3897 case Builtin::BIstdc_bit_floor_ui:
3898 case Builtin::BIstdc_bit_floor_ul:
3899 case Builtin::BIstdc_bit_floor_ull:
3900 case Builtin::BIstdc_bit_floor:
3901 case Builtin::BI__builtin_stdc_bit_floor: {
3902 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3903 llvm::Type *ArgType = ArgValue->getType();
3904 unsigned BitWidth = ArgType->getIntegerBitWidth();
3905 Value *Zero = ConstantInt::get(ArgType, 0);
3906 Value *One = ConstantInt::get(ArgType, 1);
3907 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3908 Value *LZ = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
3909 Value *ShiftAmt =
3910 Builder.CreateSub(ConstantInt::get(ArgType, BitWidth - 1), LZ);
3911 Value *Shifted = Builder.CreateShl(One, ShiftAmt);
3912 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero);
3913 Value *Result = Builder.CreateSelect(IsZero, Zero, Shifted);
3914 return RValue::get(Result);
3915 }
3916 case Builtin::BIstdc_bit_ceil_uc:
3917 case Builtin::BIstdc_bit_ceil_us:
3918 case Builtin::BIstdc_bit_ceil_ui:
3919 case Builtin::BIstdc_bit_ceil_ul:
3920 case Builtin::BIstdc_bit_ceil_ull:
3921 case Builtin::BIstdc_bit_ceil:
3922 case Builtin::BI__builtin_stdc_bit_ceil: {
3923 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3924 llvm::Type *ArgType = ArgValue->getType();
3925 unsigned BitWidth = ArgType->getIntegerBitWidth();
3926 Value *One = ConstantInt::get(ArgType, 1);
3927 Value *Two = ConstantInt::get(ArgType, 2);
3928
3929 Value *IsLEOne = Builder.CreateICmpULE(ArgValue, One, "isleone");
3930
3931 BasicBlock *EntryBB = Builder.GetInsertBlock();
3932 BasicBlock *CalcBB = createBasicBlock("bitceil.calc", CurFn);
3933 BasicBlock *MergeBB = createBasicBlock("bitceil.merge", CurFn);
3934
3935 Builder.CreateCondBr(IsLEOne, MergeBB, CalcBB);
3936
3937 Builder.SetInsertPoint(CalcBB);
3938 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3939 Value *ArgMinusOne = Builder.CreateSub(ArgValue, One);
3940 Value *LZ = Builder.CreateCall(F, {ArgMinusOne, Builder.getFalse()});
3941 // 2<<(BitWidth-1-LZ) to get the next power of two. The shift
3942 // amount is always in [0, BitWidth-1], so when LZ==0 (argument has its MSB
3943 // set), the result wraps to 0
3944 Value *ShiftAmt =
3945 Builder.CreateSub(ConstantInt::get(ArgType, BitWidth - 1), LZ);
3946 Value *Tmp = Builder.CreateShl(Two, ShiftAmt);
3947 Builder.CreateBr(MergeBB);
3948
3949 Builder.SetInsertPoint(MergeBB);
3950 PHINode *Phi = Builder.CreatePHI(ArgType, 2);
3951 Phi->addIncoming(One, EntryBB);
3952 Phi->addIncoming(Tmp, CalcBB);
3953 return RValue::get(Phi);
3954 }
3955
3956 case Builtin::BI__builtin_constant_p: {
3957 llvm::Type *ResultType = ConvertType(E->getType());
3958
3959 const Expr *Arg = E->getArg(0);
3960 QualType ArgType = Arg->getType();
3961 // FIXME: The allowance for Obj-C pointers and block pointers is historical
3962 // and likely a mistake.
3963 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
3964 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
3965 // Per the GCC documentation, only numeric constants are recognized after
3966 // inlining.
3967 return RValue::get(ConstantInt::get(ResultType, 0));
3968
3969 if (Arg->HasSideEffects(getContext()))
3970 // The argument is unevaluated, so be conservative if it might have
3971 // side-effects.
3972 return RValue::get(ConstantInt::get(ResultType, 0));
3973
3974 Value *ArgValue = EmitScalarExpr(Arg);
3975 if (ArgType->isObjCObjectPointerType()) {
3976 // Convert Objective-C objects to id because we cannot distinguish between
3977 // LLVM types for Obj-C classes as they are opaque.
3978 ArgType = CGM.getContext().getObjCIdType();
3979 ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
3980 }
3981 Function *F =
3982 CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
3983 Value *Result = Builder.CreateCall(F, ArgValue);
3984 if (Result->getType() != ResultType)
3985 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
3986 return RValue::get(Result);
3987 }
3988 case Builtin::BI__builtin_dynamic_object_size:
3989 case Builtin::BI__builtin_object_size: {
3990 unsigned Type =
3991 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
3992 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
3993
3994 // We pass this builtin onto the optimizer so that it can figure out the
3995 // object size in more complex cases.
3996 bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
3997 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
3998 /*EmittedE=*/nullptr, IsDynamic));
3999 }
4000 case Builtin::BI__builtin_counted_by_ref: {
4001 // Default to returning '(void *) 0'.
4002 llvm::Value *Result = llvm::ConstantPointerNull::get(
4003 llvm::PointerType::getUnqual(getLLVMContext()));
4004
4005 const Expr *Arg = E->getArg(0)->IgnoreParenImpCasts();
4006
4007 if (auto *UO = dyn_cast<UnaryOperator>(Arg);
4008 UO && UO->getOpcode() == UO_AddrOf) {
4009 Arg = UO->getSubExpr()->IgnoreParenImpCasts();
4010
4011 if (auto *ASE = dyn_cast<ArraySubscriptExpr>(Arg))
4012 Arg = ASE->getBase()->IgnoreParenImpCasts();
4013 }
4014
4015 if (const MemberExpr *ME = dyn_cast_if_present<MemberExpr>(Arg)) {
4016 if (auto *CATy =
4018 CATy && CATy->getKind() == CountAttributedType::CountedBy) {
4019 const auto *MemberDecl = cast<FieldDecl>(ME->getMemberDecl());
4020 if (const FieldDecl *CountFD = MemberDecl->findCountedByField())
4021 Result = GetCountedByFieldExprGEP(Arg, MemberDecl, CountFD);
4022 else
4023 llvm::report_fatal_error("Cannot find the counted_by 'count' field");
4024 }
4025 }
4026
4027 return RValue::get(Result);
4028 }
4029 case Builtin::BI__builtin_prefetch: {
4030 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
4031 // FIXME: Technically these constants should of type 'int', yes?
4032 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
4033 llvm::ConstantInt::get(Int32Ty, 0);
4034 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
4035 llvm::ConstantInt::get(Int32Ty, 3);
4036 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
4037 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
4038 Builder.CreateCall(F, {Address, RW, Locality, Data});
4039 return RValue::get(nullptr);
4040 }
4041 case Builtin::BI__builtin_readcyclecounter: {
4042 Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
4043 return RValue::get(Builder.CreateCall(F));
4044 }
4045 case Builtin::BI__builtin_readsteadycounter: {
4046 Function *F = CGM.getIntrinsic(Intrinsic::readsteadycounter);
4047 return RValue::get(Builder.CreateCall(F));
4048 }
4049 case Builtin::BI__builtin___clear_cache: {
4050 Value *Begin = EmitScalarExpr(E->getArg(0));
4051 Value *End = EmitScalarExpr(E->getArg(1));
4052 Function *F = CGM.getIntrinsic(Intrinsic::clear_cache, {CGM.DefaultPtrTy});
4053 return RValue::get(Builder.CreateCall(F, {Begin, End}));
4054 }
4055 case Builtin::BI__builtin_trap:
4056 EmitTrapCall(Intrinsic::trap);
4057 return RValue::get(nullptr);
4058 case Builtin::BI__builtin_verbose_trap: {
4059 llvm::DILocation *TrapLocation = Builder.getCurrentDebugLocation();
4060 if (getDebugInfo()) {
4061 TrapLocation = getDebugInfo()->CreateTrapFailureMessageFor(
4062 TrapLocation, *E->getArg(0)->tryEvaluateString(getContext()),
4064 }
4065 ApplyDebugLocation ApplyTrapDI(*this, TrapLocation);
4066 // Currently no attempt is made to prevent traps from being merged.
4067 EmitTrapCall(Intrinsic::trap);
4068 return RValue::get(nullptr);
4069 }
4070 case Builtin::BI__debugbreak:
4071 EmitTrapCall(Intrinsic::debugtrap);
4072 return RValue::get(nullptr);
4073 case Builtin::BI__builtin_unreachable: {
4075
4076 // We do need to preserve an insertion point.
4077 EmitBlock(createBasicBlock("unreachable.cont"));
4078
4079 return RValue::get(nullptr);
4080 }
4081
4082 case Builtin::BI__builtin_powi:
4083 case Builtin::BI__builtin_powif:
4084 case Builtin::BI__builtin_powil: {
4085 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
4086 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
4087
4088 if (Builder.getIsFPConstrained()) {
4089 // FIXME: llvm.powi has 2 mangling types,
4090 // llvm.experimental.constrained.powi has one.
4091 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4092 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi,
4093 Src0->getType());
4094 return RValue::get(Builder.CreateConstrainedFPCall(F, { Src0, Src1 }));
4095 }
4096
4097 Function *F = CGM.getIntrinsic(Intrinsic::powi,
4098 { Src0->getType(), Src1->getType() });
4099 return RValue::get(Builder.CreateCall(F, { Src0, Src1 }));
4100 }
4101 case Builtin::BI__builtin_frexpl: {
4102 // Linux PPC will not be adding additional PPCDoubleDouble support.
4103 // WIP to switch default to IEEE long double. Will emit libcall for
4104 // frexpl instead of legalizing this type in the BE.
4105 if (&getTarget().getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble())
4106 break;
4107 [[fallthrough]];
4108 }
4109 case Builtin::BI__builtin_frexp:
4110 case Builtin::BI__builtin_frexpf:
4111 case Builtin::BI__builtin_frexpf128:
4112 case Builtin::BI__builtin_frexpf16:
4113 return RValue::get(emitFrexpBuiltin(*this, E, Intrinsic::frexp));
4114 case Builtin::BImodf:
4115 case Builtin::BImodff:
4116 case Builtin::BImodfl:
4117 case Builtin::BI__builtin_modf:
4118 case Builtin::BI__builtin_modff:
4119 case Builtin::BI__builtin_modfl:
4120 if (Builder.getIsFPConstrained())
4121 break; // TODO: Emit constrained modf intrinsic once one exists.
4122 return RValue::get(emitModfBuiltin(*this, E, Intrinsic::modf));
4123 case Builtin::BI__builtin_isgreater:
4124 case Builtin::BI__builtin_isgreaterequal:
4125 case Builtin::BI__builtin_isless:
4126 case Builtin::BI__builtin_islessequal:
4127 case Builtin::BI__builtin_islessgreater:
4128 case Builtin::BI__builtin_isunordered: {
4129 // Ordered comparisons: we know the arguments to these are matching scalar
4130 // floating point values.
4131 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4132 Value *LHS = EmitScalarExpr(E->getArg(0));
4133 Value *RHS = EmitScalarExpr(E->getArg(1));
4134
4135 switch (BuiltinID) {
4136 default: llvm_unreachable("Unknown ordered comparison");
4137 case Builtin::BI__builtin_isgreater:
4138 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
4139 break;
4140 case Builtin::BI__builtin_isgreaterequal:
4141 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
4142 break;
4143 case Builtin::BI__builtin_isless:
4144 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
4145 break;
4146 case Builtin::BI__builtin_islessequal:
4147 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
4148 break;
4149 case Builtin::BI__builtin_islessgreater:
4150 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
4151 break;
4152 case Builtin::BI__builtin_isunordered:
4153 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
4154 break;
4155 }
4156 // ZExt bool to int type.
4157 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
4158 }
4159
4160 case Builtin::BI__builtin_isnan: {
4161 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4162 Value *V = EmitScalarExpr(E->getArg(0));
4163 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
4164 return RValue::get(Result);
4165 return RValue::get(
4166 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNan),
4167 ConvertType(E->getType())));
4168 }
4169
4170 case Builtin::BI__builtin_issignaling: {
4171 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4172 Value *V = EmitScalarExpr(E->getArg(0));
4173 return RValue::get(
4174 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSNan),
4175 ConvertType(E->getType())));
4176 }
4177
4178 case Builtin::BI__builtin_isinf: {
4179 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4180 Value *V = EmitScalarExpr(E->getArg(0));
4181 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
4182 return RValue::get(Result);
4183 return RValue::get(
4184 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcInf),
4185 ConvertType(E->getType())));
4186 }
4187
4188 case Builtin::BIfinite:
4189 case Builtin::BI__finite:
4190 case Builtin::BIfinitef:
4191 case Builtin::BI__finitef:
4192 case Builtin::BIfinitel:
4193 case Builtin::BI__finitel:
4194 case Builtin::BI__builtin_isfinite: {
4195 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4196 Value *V = EmitScalarExpr(E->getArg(0));
4197 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
4198 return RValue::get(Result);
4199 return RValue::get(
4200 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcFinite),
4201 ConvertType(E->getType())));
4202 }
4203
4204 case Builtin::BI__builtin_isnormal: {
4205 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4206 Value *V = EmitScalarExpr(E->getArg(0));
4207 return RValue::get(
4208 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNormal),
4209 ConvertType(E->getType())));
4210 }
4211
4212 case Builtin::BI__builtin_issubnormal: {
4213 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4214 Value *V = EmitScalarExpr(E->getArg(0));
4215 return RValue::get(
4216 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSubnormal),
4217 ConvertType(E->getType())));
4218 }
4219
4220 case Builtin::BI__builtin_iszero: {
4221 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4222 Value *V = EmitScalarExpr(E->getArg(0));
4223 return RValue::get(
4224 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcZero),
4225 ConvertType(E->getType())));
4226 }
4227
4228 case Builtin::BI__builtin_isfpclass: {
4230 if (!E->getArg(1)->EvaluateAsInt(Result, CGM.getContext()))
4231 break;
4232 uint64_t Test = Result.Val.getInt().getLimitedValue();
4233 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4234 Value *V = EmitScalarExpr(E->getArg(0));
4235 return RValue::get(Builder.CreateZExt(Builder.createIsFPClass(V, Test),
4236 ConvertType(E->getType())));
4237 }
4238
4239 case Builtin::BI__builtin_nondeterministic_value: {
4240 llvm::Type *Ty = ConvertType(E->getArg(0)->getType());
4241
4242 Value *Result = PoisonValue::get(Ty);
4243 Result = Builder.CreateFreeze(Result);
4244
4245 return RValue::get(Result);
4246 }
4247
4248 case Builtin::BI__builtin_elementwise_abs: {
4249 Value *Result;
4250 QualType QT = E->getArg(0)->getType();
4251
4252 if (auto *VecTy = QT->getAs<VectorType>())
4253 QT = VecTy->getElementType();
4254 if (QT->isIntegerType())
4255 Result = Builder.CreateBinaryIntrinsic(
4256 Intrinsic::abs, EmitScalarExpr(E->getArg(0)), Builder.getFalse(),
4257 nullptr, "elt.abs");
4258 else
4259 Result = emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::fabs,
4260 "elt.abs");
4261
4262 return RValue::get(Result);
4263 }
4264 case Builtin::BI__builtin_elementwise_bitreverse:
4266 *this, E, Intrinsic::bitreverse, "elt.bitreverse"));
4267 case Builtin::BI__builtin_elementwise_popcount:
4269 *this, E, Intrinsic::ctpop, "elt.ctpop"));
4270 case Builtin::BI__builtin_elementwise_canonicalize:
4272 *this, E, Intrinsic::canonicalize, "elt.canonicalize"));
4273 case Builtin::BI__builtin_elementwise_copysign:
4274 return RValue::get(
4275 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::copysign));
4276 case Builtin::BI__builtin_elementwise_fshl:
4277 return RValue::get(
4278 emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fshl));
4279 case Builtin::BI__builtin_elementwise_fshr:
4280 return RValue::get(
4281 emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fshr));
4282
4283 case Builtin::BI__builtin_elementwise_add_sat:
4284 case Builtin::BI__builtin_elementwise_sub_sat: {
4285 Value *Op0 = EmitScalarExpr(E->getArg(0));
4286 Value *Op1 = EmitScalarExpr(E->getArg(1));
4287 Value *Result;
4288 assert(Op0->getType()->isIntOrIntVectorTy() && "integer type expected");
4289 QualType Ty = E->getArg(0)->getType();
4290 if (auto *VecTy = Ty->getAs<VectorType>())
4291 Ty = VecTy->getElementType();
4292 bool IsSigned = Ty->isSignedIntegerType();
4293 unsigned Opc;
4294 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_add_sat)
4295 Opc = IsSigned ? Intrinsic::sadd_sat : Intrinsic::uadd_sat;
4296 else
4297 Opc = IsSigned ? Intrinsic::ssub_sat : Intrinsic::usub_sat;
4298 Result = Builder.CreateBinaryIntrinsic(Opc, Op0, Op1, nullptr, "elt.sat");
4299 return RValue::get(Result);
4300 }
4301
4302 case Builtin::BI__builtin_elementwise_max: {
4303 Value *Op0 = EmitScalarExpr(E->getArg(0));
4304 Value *Op1 = EmitScalarExpr(E->getArg(1));
4305 Value *Result;
4306 if (Op0->getType()->isIntOrIntVectorTy()) {
4307 QualType Ty = E->getArg(0)->getType();
4308 if (auto *VecTy = Ty->getAs<VectorType>())
4309 Ty = VecTy->getElementType();
4310 Result = Builder.CreateBinaryIntrinsic(
4311 Ty->isSignedIntegerType() ? Intrinsic::smax : Intrinsic::umax, Op0,
4312 Op1, nullptr, "elt.max");
4313 } else
4314 Result = Builder.CreateMaxNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.max");
4315 return RValue::get(Result);
4316 }
4317 case Builtin::BI__builtin_elementwise_min: {
4318 Value *Op0 = EmitScalarExpr(E->getArg(0));
4319 Value *Op1 = EmitScalarExpr(E->getArg(1));
4320 Value *Result;
4321 if (Op0->getType()->isIntOrIntVectorTy()) {
4322 QualType Ty = E->getArg(0)->getType();
4323 if (auto *VecTy = Ty->getAs<VectorType>())
4324 Ty = VecTy->getElementType();
4325 Result = Builder.CreateBinaryIntrinsic(
4326 Ty->isSignedIntegerType() ? Intrinsic::smin : Intrinsic::umin, Op0,
4327 Op1, nullptr, "elt.min");
4328 } else
4329 Result = Builder.CreateMinNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.min");
4330 return RValue::get(Result);
4331 }
4332
4333 case Builtin::BI__builtin_elementwise_maxnum: {
4334 Value *Op0 = EmitScalarExpr(E->getArg(0));
4335 Value *Op1 = EmitScalarExpr(E->getArg(1));
4336 Value *Result = Builder.CreateBinaryIntrinsic(llvm::Intrinsic::maxnum, Op0,
4337 Op1, nullptr, "elt.maxnum");
4338 return RValue::get(Result);
4339 }
4340
4341 case Builtin::BI__builtin_elementwise_minnum: {
4342 Value *Op0 = EmitScalarExpr(E->getArg(0));
4343 Value *Op1 = EmitScalarExpr(E->getArg(1));
4344 Value *Result = Builder.CreateBinaryIntrinsic(llvm::Intrinsic::minnum, Op0,
4345 Op1, nullptr, "elt.minnum");
4346 return RValue::get(Result);
4347 }
4348
4349 case Builtin::BI__builtin_elementwise_maximum: {
4350 Value *Op0 = EmitScalarExpr(E->getArg(0));
4351 Value *Op1 = EmitScalarExpr(E->getArg(1));
4352 Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::maximum, Op0, Op1,
4353 nullptr, "elt.maximum");
4354 return RValue::get(Result);
4355 }
4356
4357 case Builtin::BI__builtin_elementwise_minimum: {
4358 Value *Op0 = EmitScalarExpr(E->getArg(0));
4359 Value *Op1 = EmitScalarExpr(E->getArg(1));
4360 Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::minimum, Op0, Op1,
4361 nullptr, "elt.minimum");
4362 return RValue::get(Result);
4363 }
4364
4365 case Builtin::BI__builtin_elementwise_maximumnum: {
4366 Value *Op0 = EmitScalarExpr(E->getArg(0));
4367 Value *Op1 = EmitScalarExpr(E->getArg(1));
4368 Value *Result = Builder.CreateBinaryIntrinsic(
4369 Intrinsic::maximumnum, Op0, Op1, nullptr, "elt.maximumnum");
4370 return RValue::get(Result);
4371 }
4372
4373 case Builtin::BI__builtin_elementwise_minimumnum: {
4374 Value *Op0 = EmitScalarExpr(E->getArg(0));
4375 Value *Op1 = EmitScalarExpr(E->getArg(1));
4376 Value *Result = Builder.CreateBinaryIntrinsic(
4377 Intrinsic::minimumnum, Op0, Op1, nullptr, "elt.minimumnum");
4378 return RValue::get(Result);
4379 }
4380
4381 case Builtin::BI__builtin_reduce_max: {
4382 auto GetIntrinsicID = [this](QualType QT) {
4383 if (auto *VecTy = QT->getAs<VectorType>())
4384 QT = VecTy->getElementType();
4385 else if (QT->isSizelessVectorType())
4386 QT = QT->getSizelessVectorEltType(CGM.getContext());
4387
4388 if (QT->isSignedIntegerType())
4389 return Intrinsic::vector_reduce_smax;
4390 if (QT->isUnsignedIntegerType())
4391 return Intrinsic::vector_reduce_umax;
4392 assert(QT->isFloatingType() && "must have a float here");
4393 return Intrinsic::vector_reduce_fmax;
4394 };
4396 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
4397 }
4398
4399 case Builtin::BI__builtin_reduce_min: {
4400 auto GetIntrinsicID = [this](QualType QT) {
4401 if (auto *VecTy = QT->getAs<VectorType>())
4402 QT = VecTy->getElementType();
4403 else if (QT->isSizelessVectorType())
4404 QT = QT->getSizelessVectorEltType(CGM.getContext());
4405
4406 if (QT->isSignedIntegerType())
4407 return Intrinsic::vector_reduce_smin;
4408 if (QT->isUnsignedIntegerType())
4409 return Intrinsic::vector_reduce_umin;
4410 assert(QT->isFloatingType() && "must have a float here");
4411 return Intrinsic::vector_reduce_fmin;
4412 };
4413
4415 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
4416 }
4417
4418 case Builtin::BI__builtin_reduce_add:
4420 *this, E, Intrinsic::vector_reduce_add, "rdx.add"));
4421 case Builtin::BI__builtin_reduce_mul:
4423 *this, E, Intrinsic::vector_reduce_mul, "rdx.mul"));
4424 case Builtin::BI__builtin_reduce_xor:
4426 *this, E, Intrinsic::vector_reduce_xor, "rdx.xor"));
4427 case Builtin::BI__builtin_reduce_or:
4429 *this, E, Intrinsic::vector_reduce_or, "rdx.or"));
4430 case Builtin::BI__builtin_reduce_and:
4432 *this, E, Intrinsic::vector_reduce_and, "rdx.and"));
4433 case Builtin::BI__builtin_reduce_maximum:
4435 *this, E, Intrinsic::vector_reduce_fmaximum, "rdx.maximum"));
4436 case Builtin::BI__builtin_reduce_minimum:
4438 *this, E, Intrinsic::vector_reduce_fminimum, "rdx.minimum"));
4439 case Builtin::BI__builtin_reduce_assoc_fadd:
4440 case Builtin::BI__builtin_reduce_in_order_fadd: {
4441 llvm::Value *Vector = EmitScalarExpr(E->getArg(0));
4442 llvm::Type *ScalarTy = Vector->getType()->getScalarType();
4443 llvm::Value *StartValue = nullptr;
4444 if (E->getNumArgs() == 2)
4445 StartValue = Builder.CreateFPCast(EmitScalarExpr(E->getArg(1)), ScalarTy);
4446 llvm::Value *Args[] = {/*start_value=*/StartValue
4447 ? StartValue
4448 : llvm::ConstantFP::get(ScalarTy, -0.0F),
4449 /*vector=*/Vector};
4450 llvm::Function *F =
4451 CGM.getIntrinsic(Intrinsic::vector_reduce_fadd, Vector->getType());
4452 llvm::CallBase *Reduce = Builder.CreateCall(F, Args, "rdx.addf");
4453 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_reduce_assoc_fadd) {
4454 // `__builtin_reduce_assoc_fadd` is an associative reduction which
4455 // requires the reassoc FMF flag.
4456 llvm::FastMathFlags FMF;
4457 FMF.setAllowReassoc();
4458 cast<llvm::CallBase>(Reduce)->setFastMathFlags(FMF);
4459 }
4460 return RValue::get(Reduce);
4461 }
4462
4463 case Builtin::BI__builtin_matrix_transpose: {
4464 auto *MatrixTy = E->getArg(0)->getType()->castAs<ConstantMatrixType>();
4465 Value *MatValue = EmitScalarExpr(E->getArg(0));
4466 MatrixBuilder MB(Builder);
4467 Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
4468 MatrixTy->getNumColumns());
4469 return RValue::get(Result);
4470 }
4471
4472 case Builtin::BI__builtin_matrix_column_major_load: {
4473 MatrixBuilder MB(Builder);
4474 // Emit everything that isn't dependent on the first parameter type
4475 Value *Stride = EmitScalarExpr(E->getArg(3));
4476 const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
4477 auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
4478 assert(PtrTy && "arg0 must be of pointer type");
4479 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
4480
4483 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4484 0);
4485 Value *Result = MB.CreateColumnMajorLoad(
4486 Src.getElementType(), Src.emitRawPointer(*this),
4487 Align(Src.getAlignment().getQuantity()), Stride, IsVolatile,
4488 ResultTy->getNumRows(), ResultTy->getNumColumns(), "matrix");
4489 return RValue::get(Result);
4490 }
4491
4492 case Builtin::BI__builtin_matrix_column_major_store: {
4493 MatrixBuilder MB(Builder);
4494 Value *Matrix = EmitScalarExpr(E->getArg(0));
4496 Value *Stride = EmitScalarExpr(E->getArg(2));
4497
4498 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
4499 auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
4500 assert(PtrTy && "arg1 must be of pointer type");
4501 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
4502
4504 E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
4505 0);
4506 Value *Result = MB.CreateColumnMajorStore(
4507 Matrix, Dst.emitRawPointer(*this),
4508 Align(Dst.getAlignment().getQuantity()), Stride, IsVolatile,
4509 MatrixTy->getNumRows(), MatrixTy->getNumColumns());
4511 return RValue::get(Result);
4512 }
4513
4514 case Builtin::BI__builtin_masked_load:
4515 case Builtin::BI__builtin_masked_expand_load: {
4516 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4517 llvm::Value *Ptr = EmitScalarExpr(E->getArg(1));
4518
4519 llvm::Type *RetTy = CGM.getTypes().ConvertType(E->getType());
4520 llvm::Value *PassThru = llvm::PoisonValue::get(RetTy);
4521 if (E->getNumArgs() > 2)
4522 PassThru = EmitScalarExpr(E->getArg(2));
4523
4524 CharUnits Align = CGM.getNaturalTypeAlignment(
4525 E->getType()->getAs<VectorType>()->getElementType(), nullptr);
4526
4527 llvm::Value *Result;
4528 if (BuiltinID == Builtin::BI__builtin_masked_load) {
4529 Result = Builder.CreateMaskedLoad(RetTy, Ptr, Align.getAsAlign(), Mask,
4530 PassThru, "masked_load");
4531 } else {
4532 Function *F = CGM.getIntrinsic(Intrinsic::masked_expandload, {RetTy});
4533 Result =
4534 Builder.CreateCall(F, {Ptr, Mask, PassThru}, "masked_expand_load");
4535 }
4536 return RValue::get(Result);
4537 };
4538 case Builtin::BI__builtin_masked_gather: {
4539 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4540 llvm::Value *Idx = EmitScalarExpr(E->getArg(1));
4541 llvm::Value *Ptr = EmitScalarExpr(E->getArg(2));
4542
4543 llvm::Type *RetTy = CGM.getTypes().ConvertType(E->getType());
4544 CharUnits Align = CGM.getNaturalTypeAlignment(
4545 E->getType()->getAs<VectorType>()->getElementType(), nullptr);
4546
4547 llvm::Value *PassThru = llvm::PoisonValue::get(RetTy);
4548 if (E->getNumArgs() > 3)
4549 PassThru = EmitScalarExpr(E->getArg(3));
4550
4551 llvm::Type *ElemTy = CGM.getTypes().ConvertType(
4553 llvm::Value *PtrVec = Builder.CreateGEP(ElemTy, Ptr, Idx);
4554
4555 llvm::Value *Result = Builder.CreateMaskedGather(
4556 RetTy, PtrVec, Align.getAsAlign(), Mask, PassThru, "masked_gather");
4557 return RValue::get(Result);
4558 }
4559 case Builtin::BI__builtin_masked_store:
4560 case Builtin::BI__builtin_masked_compress_store: {
4561 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4562 llvm::Value *Val = EmitScalarExpr(E->getArg(1));
4563 llvm::Value *Ptr = EmitScalarExpr(E->getArg(2));
4564
4565 QualType ValTy = E->getArg(1)->getType();
4566 llvm::Type *ValLLTy = CGM.getTypes().ConvertType(ValTy);
4567
4568 CharUnits Align = CGM.getNaturalTypeAlignment(
4570 nullptr);
4571
4572 if (BuiltinID == Builtin::BI__builtin_masked_store) {
4573 Builder.CreateMaskedStore(Val, Ptr, Align.getAsAlign(), Mask);
4574 } else {
4575 llvm::Function *F =
4576 CGM.getIntrinsic(llvm::Intrinsic::masked_compressstore, {ValLLTy});
4577 Builder.CreateCall(F, {Val, Ptr, Mask});
4578 }
4579 return RValue::get(nullptr);
4580 }
4581 case Builtin::BI__builtin_masked_scatter: {
4582 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4583 llvm::Value *Idx = EmitScalarExpr(E->getArg(1));
4584 llvm::Value *Val = EmitScalarExpr(E->getArg(2));
4585 llvm::Value *Ptr = EmitScalarExpr(E->getArg(3));
4586
4587 CharUnits Align = CGM.getNaturalTypeAlignment(
4589 nullptr);
4590
4591 llvm::Type *ElemTy = CGM.getTypes().ConvertType(
4592 E->getArg(1)->getType()->getAs<VectorType>()->getElementType());
4593 llvm::Value *PtrVec = Builder.CreateGEP(ElemTy, Ptr, Idx);
4594
4595 Builder.CreateMaskedScatter(Val, PtrVec, Align.getAsAlign(), Mask);
4596 return RValue();
4597 }
4598 case Builtin::BI__builtin_isinf_sign: {
4599 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
4600 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4601 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
4602 Value *Arg = EmitScalarExpr(E->getArg(0));
4603 Value *AbsArg = EmitFAbs(*this, Arg);
4604 Value *IsInf = Builder.CreateFCmpOEQ(
4605 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
4606 Value *IsNeg = EmitSignBit(*this, Arg);
4607
4608 llvm::Type *IntTy = ConvertType(E->getType());
4609 Value *Zero = Constant::getNullValue(IntTy);
4610 Value *One = ConstantInt::get(IntTy, 1);
4611 Value *NegativeOne = ConstantInt::getAllOnesValue(IntTy);
4612 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
4613 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
4614 return RValue::get(Result);
4615 }
4616
4617 case Builtin::BI__builtin_flt_rounds: {
4618 Function *F = CGM.getIntrinsic(Intrinsic::get_rounding);
4619
4620 llvm::Type *ResultType = ConvertType(E->getType());
4621 Value *Result = Builder.CreateCall(F);
4622 if (Result->getType() != ResultType)
4623 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
4624 "cast");
4625 return RValue::get(Result);
4626 }
4627
4628 case Builtin::BI__builtin_set_flt_rounds: {
4629 Function *F = CGM.getIntrinsic(Intrinsic::set_rounding);
4630
4631 Value *V = EmitScalarExpr(E->getArg(0));
4632 Builder.CreateCall(F, V);
4633 return RValue::get(nullptr);
4634 }
4635
4636 case Builtin::BI__builtin_fpclassify: {
4637 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4638 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
4639 Value *V = EmitScalarExpr(E->getArg(5));
4640 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
4641
4642 // Create Result
4643 BasicBlock *Begin = Builder.GetInsertBlock();
4644 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
4645 Builder.SetInsertPoint(End);
4646 PHINode *Result =
4647 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
4648 "fpclassify_result");
4649
4650 // if (V==0) return FP_ZERO
4651 Builder.SetInsertPoint(Begin);
4652 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
4653 "iszero");
4654 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
4655 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
4656 Builder.CreateCondBr(IsZero, End, NotZero);
4657 Result->addIncoming(ZeroLiteral, Begin);
4658
4659 // if (V != V) return FP_NAN
4660 Builder.SetInsertPoint(NotZero);
4661 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
4662 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
4663 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
4664 Builder.CreateCondBr(IsNan, End, NotNan);
4665 Result->addIncoming(NanLiteral, NotZero);
4666
4667 // if (fabs(V) == infinity) return FP_INFINITY
4668 Builder.SetInsertPoint(NotNan);
4669 Value *VAbs = EmitFAbs(*this, V);
4670 Value *IsInf =
4671 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
4672 "isinf");
4673 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
4674 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
4675 Builder.CreateCondBr(IsInf, End, NotInf);
4676 Result->addIncoming(InfLiteral, NotNan);
4677
4678 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
4679 Builder.SetInsertPoint(NotInf);
4680 APFloat Smallest = APFloat::getSmallestNormalized(
4681 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
4682 Value *IsNormal =
4683 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
4684 "isnormal");
4685 Value *NormalResult =
4686 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
4687 EmitScalarExpr(E->getArg(3)));
4688 Builder.CreateBr(End);
4689 Result->addIncoming(NormalResult, NotInf);
4690
4691 // return Result
4692 Builder.SetInsertPoint(End);
4693 return RValue::get(Result);
4694 }
4695
4696 // An alloca will always return a pointer to the alloca (stack) address
4697 // space. This address space need not be the same as the AST / Language
4698 // default (e.g. in C / C++ auto vars are in the generic address space). At
4699 // the AST level this is handled within CreateTempAlloca et al., but for the
4700 // builtin / dynamic alloca we have to handle it here. We use an explicit cast
4701 // instead of passing an AS to CreateAlloca so as to not inhibit optimisation.
4702 case Builtin::BIalloca:
4703 case Builtin::BI_alloca:
4704 case Builtin::BI__builtin_alloca_uninitialized:
4705 case Builtin::BI__builtin_alloca: {
4706 Value *Size = EmitScalarExpr(E->getArg(0));
4707 const TargetInfo &TI = getContext().getTargetInfo();
4708 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
4709 const Align SuitableAlignmentInBytes =
4710 CGM.getContext()
4711 .toCharUnitsFromBits(TI.getSuitableAlign())
4712 .getAsAlign();
4713 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
4714 AI->setAlignment(SuitableAlignmentInBytes);
4715 if (BuiltinID != Builtin::BI__builtin_alloca_uninitialized)
4716 initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
4717 if (AI->getAddressSpace() !=
4718 CGM.getContext().getTargetAddressSpace(
4720 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
4721 return RValue::get(performAddrSpaceCast(AI, Ty));
4722 }
4723 return RValue::get(AI);
4724 }
4725
4726 case Builtin::BI__builtin_alloca_with_align_uninitialized:
4727 case Builtin::BI__builtin_alloca_with_align: {
4728 Value *Size = EmitScalarExpr(E->getArg(0));
4729 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
4730 auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
4731 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
4732 const Align AlignmentInBytes =
4733 CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
4734 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
4735 AI->setAlignment(AlignmentInBytes);
4736 if (BuiltinID != Builtin::BI__builtin_alloca_with_align_uninitialized)
4737 initializeAlloca(*this, AI, Size, AlignmentInBytes);
4738 if (AI->getAddressSpace() !=
4739 CGM.getContext().getTargetAddressSpace(
4741 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
4742 return RValue::get(performAddrSpaceCast(AI, Ty));
4743 }
4744 return RValue::get(AI);
4745 }
4746
4747 case Builtin::BI__builtin_infer_alloc_token: {
4748 llvm::MDNode *MDN = buildAllocToken(E);
4749 llvm::Value *MDV = MetadataAsValue::get(getLLVMContext(), MDN);
4750 llvm::Function *F =
4751 CGM.getIntrinsic(llvm::Intrinsic::alloc_token_id, {IntPtrTy});
4752 llvm::CallBase *TokenID = Builder.CreateCall(F, MDV);
4753 return RValue::get(TokenID);
4754 }
4755
4756 case Builtin::BIbzero:
4757 case Builtin::BI__builtin_bzero: {
4759 Value *SizeVal = EmitScalarExpr(E->getArg(1));
4760 EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
4761 E->getArg(0)->getExprLoc(), FD, 0);
4762 auto *I = Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
4763 addInstToNewSourceAtom(I, nullptr);
4764 return RValue::get(nullptr);
4765 }
4766
4767 case Builtin::BIbcopy:
4768 case Builtin::BI__builtin_bcopy: {
4771 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4773 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4774 0);
4776 E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
4777 0);
4778 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4779 addInstToNewSourceAtom(I, nullptr);
4780 return RValue::get(nullptr);
4781 }
4782
4783 case Builtin::BImemcpy:
4784 case Builtin::BI__builtin_memcpy:
4785 case Builtin::BImempcpy:
4786 case Builtin::BI__builtin_mempcpy: {
4789 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4790 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4791 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4792 auto *I = Builder.CreateMemCpy(Dest, Src, SizeVal, false);
4793 addInstToNewSourceAtom(I, nullptr);
4794 if (BuiltinID == Builtin::BImempcpy ||
4795 BuiltinID == Builtin::BI__builtin_mempcpy)
4796 return RValue::get(Builder.CreateInBoundsGEP(
4797 Dest.getElementType(), Dest.emitRawPointer(*this), SizeVal));
4798 else
4799 return RValue::get(Dest, *this);
4800 }
4801
4802 case Builtin::BI__builtin_memcpy_inline: {
4805 uint64_t Size =
4806 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
4807 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4808 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4809 auto *I = Builder.CreateMemCpyInline(Dest, Src, Size);
4810 addInstToNewSourceAtom(I, nullptr);
4811 return RValue::get(nullptr);
4812 }
4813
4814 case Builtin::BI__builtin_char_memchr:
4815 BuiltinID = Builtin::BI__builtin_memchr;
4816 break;
4817
4818 case Builtin::BI__builtin___memcpy_chk: {
4819 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
4820 Expr::EvalResult SizeResult, DstSizeResult;
4821 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4822 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4823 break;
4824 llvm::APSInt Size = SizeResult.Val.getInt();
4825 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4826 if (Size.ugt(DstSize))
4827 break;
4830 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4831 auto *I = Builder.CreateMemCpy(Dest, Src, SizeVal, false);
4832 addInstToNewSourceAtom(I, nullptr);
4833 return RValue::get(Dest, *this);
4834 }
4835
4836 case Builtin::BI__builtin_objc_memmove_collectable: {
4837 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
4838 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
4839 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4840 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
4841 DestAddr, SrcAddr, SizeVal);
4842 return RValue::get(DestAddr, *this);
4843 }
4844
4845 case Builtin::BI__builtin___memmove_chk: {
4846 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
4847 Expr::EvalResult SizeResult, DstSizeResult;
4848 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4849 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4850 break;
4851 llvm::APSInt Size = SizeResult.Val.getInt();
4852 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4853 if (Size.ugt(DstSize))
4854 break;
4857 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4858 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4859 addInstToNewSourceAtom(I, nullptr);
4860 return RValue::get(Dest, *this);
4861 }
4862
4863 case Builtin::BI__builtin_trivially_relocate:
4864 case Builtin::BImemmove:
4865 case Builtin::BI__builtin_memmove: {
4868 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4869 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_trivially_relocate)
4870 SizeVal = Builder.CreateMul(
4871 SizeVal,
4872 ConstantInt::get(
4873 SizeVal->getType(),
4874 getContext()
4875 .getTypeSizeInChars(E->getArg(0)->getType()->getPointeeType())
4876 .getQuantity()));
4877 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4878 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4879 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4880 addInstToNewSourceAtom(I, nullptr);
4881 return RValue::get(Dest, *this);
4882 }
4883 case Builtin::BImemset:
4884 case Builtin::BI__builtin_memset: {
4886 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
4887 Builder.getInt8Ty());
4888 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4889 EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
4890 E->getArg(0)->getExprLoc(), FD, 0);
4891 auto *I = Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
4892 addInstToNewSourceAtom(I, ByteVal);
4893 return RValue::get(Dest, *this);
4894 }
4895 case Builtin::BI__builtin_memset_inline: {
4897 Value *ByteVal =
4898 Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty());
4899 uint64_t Size =
4900 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
4902 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4903 0);
4904 auto *I = Builder.CreateMemSetInline(Dest, ByteVal, Size);
4905 addInstToNewSourceAtom(I, nullptr);
4906 return RValue::get(nullptr);
4907 }
4908 case Builtin::BI__builtin___memset_chk: {
4909 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
4910 Expr::EvalResult SizeResult, DstSizeResult;
4911 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4912 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4913 break;
4914 llvm::APSInt Size = SizeResult.Val.getInt();
4915 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4916 if (Size.ugt(DstSize))
4917 break;
4919 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
4920 Builder.getInt8Ty());
4921 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4922 auto *I = Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
4923 addInstToNewSourceAtom(I, nullptr);
4924 return RValue::get(Dest, *this);
4925 }
4926 case Builtin::BI__builtin_wmemchr: {
4927 // The MSVC runtime library does not provide a definition of wmemchr, so we
4928 // need an inline implementation.
4929 if (!getTarget().getTriple().isOSMSVCRT())
4930 break;
4931
4932 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
4933 Value *Str = EmitScalarExpr(E->getArg(0));
4934 Value *Chr = EmitScalarExpr(E->getArg(1));
4935 Value *Size = EmitScalarExpr(E->getArg(2));
4936
4937 BasicBlock *Entry = Builder.GetInsertBlock();
4938 BasicBlock *CmpEq = createBasicBlock("wmemchr.eq");
4939 BasicBlock *Next = createBasicBlock("wmemchr.next");
4940 BasicBlock *Exit = createBasicBlock("wmemchr.exit");
4941 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
4942 Builder.CreateCondBr(SizeEq0, Exit, CmpEq);
4943
4944 EmitBlock(CmpEq);
4945 PHINode *StrPhi = Builder.CreatePHI(Str->getType(), 2);
4946 StrPhi->addIncoming(Str, Entry);
4947 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
4948 SizePhi->addIncoming(Size, Entry);
4949 CharUnits WCharAlign =
4951 Value *StrCh = Builder.CreateAlignedLoad(WCharTy, StrPhi, WCharAlign);
4952 Value *FoundChr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 0);
4953 Value *StrEqChr = Builder.CreateICmpEQ(StrCh, Chr);
4954 Builder.CreateCondBr(StrEqChr, Exit, Next);
4955
4956 EmitBlock(Next);
4957 Value *NextStr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 1);
4958 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
4959 Value *NextSizeEq0 =
4960 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
4961 Builder.CreateCondBr(NextSizeEq0, Exit, CmpEq);
4962 StrPhi->addIncoming(NextStr, Next);
4963 SizePhi->addIncoming(NextSize, Next);
4964
4965 EmitBlock(Exit);
4966 PHINode *Ret = Builder.CreatePHI(Str->getType(), 3);
4967 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Entry);
4968 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Next);
4969 Ret->addIncoming(FoundChr, CmpEq);
4970 return RValue::get(Ret);
4971 }
4972 case Builtin::BI__builtin_wmemcmp: {
4973 // The MSVC runtime library does not provide a definition of wmemcmp, so we
4974 // need an inline implementation.
4975 if (!getTarget().getTriple().isOSMSVCRT())
4976 break;
4977
4978 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
4979
4980 Value *Dst = EmitScalarExpr(E->getArg(0));
4981 Value *Src = EmitScalarExpr(E->getArg(1));
4982 Value *Size = EmitScalarExpr(E->getArg(2));
4983
4984 BasicBlock *Entry = Builder.GetInsertBlock();
4985 BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
4986 BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
4987 BasicBlock *Next = createBasicBlock("wmemcmp.next");
4988 BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
4989 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
4990 Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
4991
4992 EmitBlock(CmpGT);
4993 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
4994 DstPhi->addIncoming(Dst, Entry);
4995 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
4996 SrcPhi->addIncoming(Src, Entry);
4997 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
4998 SizePhi->addIncoming(Size, Entry);
4999 CharUnits WCharAlign =
5001 Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
5002 Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
5003 Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
5004 Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
5005
5006 EmitBlock(CmpLT);
5007 Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
5008 Builder.CreateCondBr(DstLtSrc, Exit, Next);
5009
5010 EmitBlock(Next);
5011 Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
5012 Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
5013 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
5014 Value *NextSizeEq0 =
5015 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
5016 Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
5017 DstPhi->addIncoming(NextDst, Next);
5018 SrcPhi->addIncoming(NextSrc, Next);
5019 SizePhi->addIncoming(NextSize, Next);
5020
5021 EmitBlock(Exit);
5022 PHINode *Ret = Builder.CreatePHI(IntTy, 4);
5023 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
5024 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
5025 Ret->addIncoming(ConstantInt::getAllOnesValue(IntTy), CmpLT);
5026 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
5027 return RValue::get(Ret);
5028 }
5029 case Builtin::BI__builtin_dwarf_cfa: {
5030 // The offset in bytes from the first argument to the CFA.
5031 //
5032 // Why on earth is this in the frontend? Is there any reason at
5033 // all that the backend can't reasonably determine this while
5034 // lowering llvm.eh.dwarf.cfa()?
5035 //
5036 // TODO: If there's a satisfactory reason, add a target hook for
5037 // this instead of hard-coding 0, which is correct for most targets.
5038 int32_t Offset = 0;
5039
5040 Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
5041 return RValue::get(Builder.CreateCall(F,
5042 llvm::ConstantInt::get(Int32Ty, Offset)));
5043 }
5044 case Builtin::BI__builtin_return_address: {
5045 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
5046 getContext().UnsignedIntTy);
5047 Function *F =
5048 CGM.getIntrinsic(Intrinsic::returnaddress, {CGM.ProgramPtrTy});
5049 return RValue::get(Builder.CreateCall(F, Depth));
5050 }
5051 case Builtin::BI_ReturnAddress: {
5052 Function *F =
5053 CGM.getIntrinsic(Intrinsic::returnaddress, {CGM.ProgramPtrTy});
5054 return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
5055 }
5056 case Builtin::BI__builtin_frame_address: {
5057 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
5058 getContext().UnsignedIntTy);
5059 Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
5060 return RValue::get(Builder.CreateCall(F, Depth));
5061 }
5062 case Builtin::BI__builtin_stack_address: {
5063 return RValue::get(Builder.CreateCall(
5064 CGM.getIntrinsic(Intrinsic::stackaddress, AllocaInt8PtrTy)));
5065 }
5066 case Builtin::BI__builtin_extract_return_addr: {
5069 return RValue::get(Result);
5070 }
5071 case Builtin::BI__builtin_frob_return_addr: {
5074 return RValue::get(Result);
5075 }
5076 case Builtin::BI__builtin_dwarf_sp_column: {
5077 llvm::IntegerType *Ty
5080 if (Column == -1) {
5081 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
5082 return RValue::get(llvm::UndefValue::get(Ty));
5083 }
5084 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
5085 }
5086 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
5088 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
5089 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
5090 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
5091 }
5092 case Builtin::BI__builtin_eh_return: {
5093 Value *Int = EmitScalarExpr(E->getArg(0));
5094 Value *Ptr = EmitScalarExpr(E->getArg(1));
5095
5096 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
5097 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
5098 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
5099 Function *F =
5100 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
5101 : Intrinsic::eh_return_i64);
5102 Builder.CreateCall(F, {Int, Ptr});
5103 Builder.CreateUnreachable();
5104
5105 // We do need to preserve an insertion point.
5106 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
5107
5108 return RValue::get(nullptr);
5109 }
5110 case Builtin::BI__builtin_unwind_init: {
5111 Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
5112 Builder.CreateCall(F);
5113 return RValue::get(nullptr);
5114 }
5115 case Builtin::BI__builtin_extend_pointer: {
5116 // Extends a pointer to the size of an _Unwind_Word, which is
5117 // uint64_t on all platforms. Generally this gets poked into a
5118 // register and eventually used as an address, so if the
5119 // addressing registers are wider than pointers and the platform
5120 // doesn't implicitly ignore high-order bits when doing
5121 // addressing, we need to make sure we zext / sext based on
5122 // the platform's expectations.
5123 //
5124 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
5125
5126 // Cast the pointer to intptr_t.
5127 Value *Ptr = EmitScalarExpr(E->getArg(0));
5128 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
5129
5130 // If that's 64 bits, we're done.
5131 if (IntPtrTy->getBitWidth() == 64)
5132 return RValue::get(Result);
5133
5134 // Otherwise, ask the codegen data what to do.
5135 if (getTargetHooks().extendPointerWithSExt())
5136 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
5137 else
5138 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
5139 }
5140 case Builtin::BI__builtin_setjmp: {
5141 // Buffer is a void**.
5143
5144 if (getTarget().getTriple().getArch() == llvm::Triple::systemz) {
5145 // On this target, the back end fills in the context buffer completely.
5146 // It doesn't really matter if the frontend stores to the buffer before
5147 // calling setjmp, the back-end is going to overwrite them anyway.
5148 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
5149 return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
5150 }
5151
5152 // Store the frame pointer to the setjmp buffer.
5153 Value *FrameAddr = Builder.CreateCall(
5154 CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
5155 ConstantInt::get(Int32Ty, 0));
5156 Builder.CreateStore(FrameAddr, Buf);
5157
5158 // Store the stack pointer to the setjmp buffer.
5159 Value *StackAddr = Builder.CreateStackSave();
5160 assert(Buf.emitRawPointer(*this)->getType() == StackAddr->getType());
5161
5162 Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
5163 Builder.CreateStore(StackAddr, StackSaveSlot);
5164
5165 // Call LLVM's EH setjmp, which is lightweight.
5166 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
5167 return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
5168 }
5169 case Builtin::BI__builtin_longjmp: {
5170 Value *Buf = EmitScalarExpr(E->getArg(0));
5171
5172 // Call LLVM's EH longjmp, which is lightweight.
5173 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
5174
5175 // longjmp doesn't return; mark this as unreachable.
5176 Builder.CreateUnreachable();
5177
5178 // We do need to preserve an insertion point.
5179 EmitBlock(createBasicBlock("longjmp.cont"));
5180
5181 return RValue::get(nullptr);
5182 }
5183 case Builtin::BI__builtin_launder: {
5184 const Expr *Arg = E->getArg(0);
5185 QualType ArgTy = Arg->getType()->getPointeeType();
5186 Value *Ptr = EmitScalarExpr(Arg);
5187 if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
5188 Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
5189
5190 return RValue::get(Ptr);
5191 }
5192 case Builtin::BI__sync_fetch_and_add:
5193 case Builtin::BI__sync_fetch_and_sub:
5194 case Builtin::BI__sync_fetch_and_or:
5195 case Builtin::BI__sync_fetch_and_and:
5196 case Builtin::BI__sync_fetch_and_xor:
5197 case Builtin::BI__sync_fetch_and_nand:
5198 case Builtin::BI__sync_add_and_fetch:
5199 case Builtin::BI__sync_sub_and_fetch:
5200 case Builtin::BI__sync_and_and_fetch:
5201 case Builtin::BI__sync_or_and_fetch:
5202 case Builtin::BI__sync_xor_and_fetch:
5203 case Builtin::BI__sync_nand_and_fetch:
5204 case Builtin::BI__sync_val_compare_and_swap:
5205 case Builtin::BI__sync_bool_compare_and_swap:
5206 case Builtin::BI__sync_lock_test_and_set:
5207 case Builtin::BI__sync_lock_release:
5208 case Builtin::BI__sync_swap:
5209 llvm_unreachable("Shouldn't make it through sema");
5210 case Builtin::BI__sync_fetch_and_add_1:
5211 case Builtin::BI__sync_fetch_and_add_2:
5212 case Builtin::BI__sync_fetch_and_add_4:
5213 case Builtin::BI__sync_fetch_and_add_8:
5214 case Builtin::BI__sync_fetch_and_add_16:
5215 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
5216 case Builtin::BI__sync_fetch_and_sub_1:
5217 case Builtin::BI__sync_fetch_and_sub_2:
5218 case Builtin::BI__sync_fetch_and_sub_4:
5219 case Builtin::BI__sync_fetch_and_sub_8:
5220 case Builtin::BI__sync_fetch_and_sub_16:
5221 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
5222 case Builtin::BI__sync_fetch_and_or_1:
5223 case Builtin::BI__sync_fetch_and_or_2:
5224 case Builtin::BI__sync_fetch_and_or_4:
5225 case Builtin::BI__sync_fetch_and_or_8:
5226 case Builtin::BI__sync_fetch_and_or_16:
5227 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
5228 case Builtin::BI__sync_fetch_and_and_1:
5229 case Builtin::BI__sync_fetch_and_and_2:
5230 case Builtin::BI__sync_fetch_and_and_4:
5231 case Builtin::BI__sync_fetch_and_and_8:
5232 case Builtin::BI__sync_fetch_and_and_16:
5233 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
5234 case Builtin::BI__sync_fetch_and_xor_1:
5235 case Builtin::BI__sync_fetch_and_xor_2:
5236 case Builtin::BI__sync_fetch_and_xor_4:
5237 case Builtin::BI__sync_fetch_and_xor_8:
5238 case Builtin::BI__sync_fetch_and_xor_16:
5239 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
5240 case Builtin::BI__sync_fetch_and_nand_1:
5241 case Builtin::BI__sync_fetch_and_nand_2:
5242 case Builtin::BI__sync_fetch_and_nand_4:
5243 case Builtin::BI__sync_fetch_and_nand_8:
5244 case Builtin::BI__sync_fetch_and_nand_16:
5245 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
5246
5247 // Clang extensions: not overloaded yet.
5248 case Builtin::BI__sync_fetch_and_min:
5249 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
5250 case Builtin::BI__sync_fetch_and_max:
5251 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
5252 case Builtin::BI__sync_fetch_and_umin:
5253 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
5254 case Builtin::BI__sync_fetch_and_umax:
5255 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
5256
5257 case Builtin::BI__sync_add_and_fetch_1:
5258 case Builtin::BI__sync_add_and_fetch_2:
5259 case Builtin::BI__sync_add_and_fetch_4:
5260 case Builtin::BI__sync_add_and_fetch_8:
5261 case Builtin::BI__sync_add_and_fetch_16:
5262 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
5263 llvm::Instruction::Add);
5264 case Builtin::BI__sync_sub_and_fetch_1:
5265 case Builtin::BI__sync_sub_and_fetch_2:
5266 case Builtin::BI__sync_sub_and_fetch_4:
5267 case Builtin::BI__sync_sub_and_fetch_8:
5268 case Builtin::BI__sync_sub_and_fetch_16:
5269 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
5270 llvm::Instruction::Sub);
5271 case Builtin::BI__sync_and_and_fetch_1:
5272 case Builtin::BI__sync_and_and_fetch_2:
5273 case Builtin::BI__sync_and_and_fetch_4:
5274 case Builtin::BI__sync_and_and_fetch_8:
5275 case Builtin::BI__sync_and_and_fetch_16:
5276 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
5277 llvm::Instruction::And);
5278 case Builtin::BI__sync_or_and_fetch_1:
5279 case Builtin::BI__sync_or_and_fetch_2:
5280 case Builtin::BI__sync_or_and_fetch_4:
5281 case Builtin::BI__sync_or_and_fetch_8:
5282 case Builtin::BI__sync_or_and_fetch_16:
5283 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
5284 llvm::Instruction::Or);
5285 case Builtin::BI__sync_xor_and_fetch_1:
5286 case Builtin::BI__sync_xor_and_fetch_2:
5287 case Builtin::BI__sync_xor_and_fetch_4:
5288 case Builtin::BI__sync_xor_and_fetch_8:
5289 case Builtin::BI__sync_xor_and_fetch_16:
5290 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
5291 llvm::Instruction::Xor);
5292 case Builtin::BI__sync_nand_and_fetch_1:
5293 case Builtin::BI__sync_nand_and_fetch_2:
5294 case Builtin::BI__sync_nand_and_fetch_4:
5295 case Builtin::BI__sync_nand_and_fetch_8:
5296 case Builtin::BI__sync_nand_and_fetch_16:
5297 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
5298 llvm::Instruction::And, true);
5299
5300 case Builtin::BI__sync_val_compare_and_swap_1:
5301 case Builtin::BI__sync_val_compare_and_swap_2:
5302 case Builtin::BI__sync_val_compare_and_swap_4:
5303 case Builtin::BI__sync_val_compare_and_swap_8:
5304 case Builtin::BI__sync_val_compare_and_swap_16:
5306 *this, E, false, AtomicOrdering::SequentiallyConsistent,
5307 AtomicOrdering::SequentiallyConsistent));
5308
5309 case Builtin::BI__sync_bool_compare_and_swap_1:
5310 case Builtin::BI__sync_bool_compare_and_swap_2:
5311 case Builtin::BI__sync_bool_compare_and_swap_4:
5312 case Builtin::BI__sync_bool_compare_and_swap_8:
5313 case Builtin::BI__sync_bool_compare_and_swap_16:
5315 *this, E, true, AtomicOrdering::SequentiallyConsistent,
5316 AtomicOrdering::SequentiallyConsistent));
5317
5318 case Builtin::BI__sync_swap_1:
5319 case Builtin::BI__sync_swap_2:
5320 case Builtin::BI__sync_swap_4:
5321 case Builtin::BI__sync_swap_8:
5322 case Builtin::BI__sync_swap_16:
5323 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
5324
5325 case Builtin::BI__sync_lock_test_and_set_1:
5326 case Builtin::BI__sync_lock_test_and_set_2:
5327 case Builtin::BI__sync_lock_test_and_set_4:
5328 case Builtin::BI__sync_lock_test_and_set_8:
5329 case Builtin::BI__sync_lock_test_and_set_16:
5330 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
5331
5332 case Builtin::BI__sync_lock_release_1:
5333 case Builtin::BI__sync_lock_release_2:
5334 case Builtin::BI__sync_lock_release_4:
5335 case Builtin::BI__sync_lock_release_8:
5336 case Builtin::BI__sync_lock_release_16: {
5337 Address Ptr = CheckAtomicAlignment(*this, E);
5338 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
5339
5340 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
5341 getContext().getTypeSize(ElTy));
5342 llvm::StoreInst *Store =
5343 Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
5344 Store->setAtomic(llvm::AtomicOrdering::Release);
5345 return RValue::get(nullptr);
5346 }
5347
5348 case Builtin::BI__sync_synchronize: {
5349 // We assume this is supposed to correspond to a C++0x-style
5350 // sequentially-consistent fence (i.e. this is only usable for
5351 // synchronization, not device I/O or anything like that). This intrinsic
5352 // is really badly designed in the sense that in theory, there isn't
5353 // any way to safely use it... but in practice, it mostly works
5354 // to use it with non-atomic loads and stores to get acquire/release
5355 // semantics.
5356 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
5357 return RValue::get(nullptr);
5358 }
5359
5360 case Builtin::BI__builtin_nontemporal_load:
5361 return RValue::get(EmitNontemporalLoad(*this, E));
5362 case Builtin::BI__builtin_nontemporal_store:
5363 return RValue::get(EmitNontemporalStore(*this, E));
5364 case Builtin::BI__c11_atomic_is_lock_free:
5365 case Builtin::BI__atomic_is_lock_free: {
5366 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
5367 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
5368 // _Atomic(T) is always properly-aligned.
5369 const char *LibCallName = "__atomic_is_lock_free";
5370 CallArgList Args;
5371 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
5372 getContext().getSizeType());
5373 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
5374 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
5376 else
5377 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
5379 const CGFunctionInfo &FuncInfo =
5380 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
5381 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
5382 llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
5383 return EmitCall(FuncInfo, CGCallee::forDirect(Func),
5384 ReturnValueSlot(), Args);
5385 }
5386
5387 case Builtin::BI__atomic_thread_fence:
5388 case Builtin::BI__atomic_signal_fence:
5389 case Builtin::BI__c11_atomic_thread_fence:
5390 case Builtin::BI__c11_atomic_signal_fence: {
5391 llvm::SyncScope::ID SSID;
5392 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
5393 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
5394 SSID = llvm::SyncScope::SingleThread;
5395 else
5396 SSID = llvm::SyncScope::System;
5397 Value *Order = EmitScalarExpr(E->getArg(0));
5398 if (isa<llvm::ConstantInt>(Order)) {
5399 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
5400 switch (ord) {
5401 case 0: // memory_order_relaxed
5402 default: // invalid order
5403 break;
5404 case 1: // memory_order_consume
5405 case 2: // memory_order_acquire
5406 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
5407 break;
5408 case 3: // memory_order_release
5409 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
5410 break;
5411 case 4: // memory_order_acq_rel
5412 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
5413 break;
5414 case 5: // memory_order_seq_cst
5415 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
5416 break;
5417 }
5418 return RValue::get(nullptr);
5419 }
5420
5421 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
5422 AcquireBB = createBasicBlock("acquire", CurFn);
5423 ReleaseBB = createBasicBlock("release", CurFn);
5424 AcqRelBB = createBasicBlock("acqrel", CurFn);
5425 SeqCstBB = createBasicBlock("seqcst", CurFn);
5426 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
5427
5428 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
5429 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
5430
5431 Builder.SetInsertPoint(AcquireBB);
5432 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
5433 Builder.CreateBr(ContBB);
5434 SI->addCase(Builder.getInt32(1), AcquireBB);
5435 SI->addCase(Builder.getInt32(2), AcquireBB);
5436
5437 Builder.SetInsertPoint(ReleaseBB);
5438 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
5439 Builder.CreateBr(ContBB);
5440 SI->addCase(Builder.getInt32(3), ReleaseBB);
5441
5442 Builder.SetInsertPoint(AcqRelBB);
5443 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
5444 Builder.CreateBr(ContBB);
5445 SI->addCase(Builder.getInt32(4), AcqRelBB);
5446
5447 Builder.SetInsertPoint(SeqCstBB);
5448 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
5449 Builder.CreateBr(ContBB);
5450 SI->addCase(Builder.getInt32(5), SeqCstBB);
5451
5452 Builder.SetInsertPoint(ContBB);
5453 return RValue::get(nullptr);
5454 }
5455 case Builtin::BI__scoped_atomic_thread_fence: {
5457
5458 Value *Order = EmitScalarExpr(E->getArg(0));
5459 Value *Scope = EmitScalarExpr(E->getArg(1));
5460 auto Ord = dyn_cast<llvm::ConstantInt>(Order);
5461 auto Scp = dyn_cast<llvm::ConstantInt>(Scope);
5462 if (Ord && Scp) {
5463 SyncScope SS = ScopeModel->isValid(Scp->getZExtValue())
5464 ? ScopeModel->map(Scp->getZExtValue())
5465 : ScopeModel->map(ScopeModel->getFallBackValue());
5466 switch (Ord->getZExtValue()) {
5467 case 0: // memory_order_relaxed
5468 default: // invalid order
5469 break;
5470 case 1: // memory_order_consume
5471 case 2: // memory_order_acquire
5472 Builder.CreateFence(
5473 llvm::AtomicOrdering::Acquire,
5474 getTargetHooks().getLLVMSyncScopeID(getLangOpts(), SS,
5475 llvm::AtomicOrdering::Acquire,
5476 getLLVMContext()));
5477 break;
5478 case 3: // memory_order_release
5479 Builder.CreateFence(
5480 llvm::AtomicOrdering::Release,
5481 getTargetHooks().getLLVMSyncScopeID(getLangOpts(), SS,
5482 llvm::AtomicOrdering::Release,
5483 getLLVMContext()));
5484 break;
5485 case 4: // memory_order_acq_rel
5486 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease,
5487 getTargetHooks().getLLVMSyncScopeID(
5488 getLangOpts(), SS,
5489 llvm::AtomicOrdering::AcquireRelease,
5490 getLLVMContext()));
5491 break;
5492 case 5: // memory_order_seq_cst
5493 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
5494 getTargetHooks().getLLVMSyncScopeID(
5495 getLangOpts(), SS,
5496 llvm::AtomicOrdering::SequentiallyConsistent,
5497 getLLVMContext()));
5498 break;
5499 }
5500 return RValue::get(nullptr);
5501 }
5502
5503 llvm::BasicBlock *ContBB = createBasicBlock("atomic.scope.continue", CurFn);
5504
5506 OrderBBs;
5507 if (Ord) {
5508 switch (Ord->getZExtValue()) {
5509 case 0: // memory_order_relaxed
5510 default: // invalid order
5511 ContBB->eraseFromParent();
5512 return RValue::get(nullptr);
5513 case 1: // memory_order_consume
5514 case 2: // memory_order_acquire
5515 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5516 llvm::AtomicOrdering::Acquire);
5517 break;
5518 case 3: // memory_order_release
5519 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5520 llvm::AtomicOrdering::Release);
5521 break;
5522 case 4: // memory_order_acq_rel
5523 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5524 llvm::AtomicOrdering::AcquireRelease);
5525 break;
5526 case 5: // memory_order_seq_cst
5527 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5528 llvm::AtomicOrdering::SequentiallyConsistent);
5529 break;
5530 }
5531 } else {
5532 llvm::BasicBlock *AcquireBB = createBasicBlock("acquire", CurFn);
5533 llvm::BasicBlock *ReleaseBB = createBasicBlock("release", CurFn);
5534 llvm::BasicBlock *AcqRelBB = createBasicBlock("acqrel", CurFn);
5535 llvm::BasicBlock *SeqCstBB = createBasicBlock("seqcst", CurFn);
5536
5537 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
5538 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
5539 SI->addCase(Builder.getInt32(1), AcquireBB);
5540 SI->addCase(Builder.getInt32(2), AcquireBB);
5541 SI->addCase(Builder.getInt32(3), ReleaseBB);
5542 SI->addCase(Builder.getInt32(4), AcqRelBB);
5543 SI->addCase(Builder.getInt32(5), SeqCstBB);
5544
5545 OrderBBs.emplace_back(AcquireBB, llvm::AtomicOrdering::Acquire);
5546 OrderBBs.emplace_back(ReleaseBB, llvm::AtomicOrdering::Release);
5547 OrderBBs.emplace_back(AcqRelBB, llvm::AtomicOrdering::AcquireRelease);
5548 OrderBBs.emplace_back(SeqCstBB,
5549 llvm::AtomicOrdering::SequentiallyConsistent);
5550 }
5551
5552 for (auto &[OrderBB, Ordering] : OrderBBs) {
5553 Builder.SetInsertPoint(OrderBB);
5554 if (Scp) {
5555 SyncScope SS = ScopeModel->isValid(Scp->getZExtValue())
5556 ? ScopeModel->map(Scp->getZExtValue())
5557 : ScopeModel->map(ScopeModel->getFallBackValue());
5558 Builder.CreateFence(Ordering,
5559 getTargetHooks().getLLVMSyncScopeID(
5560 getLangOpts(), SS, Ordering, getLLVMContext()));
5561 Builder.CreateBr(ContBB);
5562 } else {
5563 llvm::DenseMap<unsigned, llvm::BasicBlock *> BBs;
5564 for (unsigned Scp : ScopeModel->getRuntimeValues())
5565 BBs[Scp] = createBasicBlock(getAsString(ScopeModel->map(Scp)), CurFn);
5566
5567 auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
5568 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, ContBB);
5569 for (unsigned Scp : ScopeModel->getRuntimeValues()) {
5570 auto *B = BBs[Scp];
5571 SI->addCase(Builder.getInt32(Scp), B);
5572
5573 Builder.SetInsertPoint(B);
5574 Builder.CreateFence(Ordering, getTargetHooks().getLLVMSyncScopeID(
5575 getLangOpts(), ScopeModel->map(Scp),
5576 Ordering, getLLVMContext()));
5577 Builder.CreateBr(ContBB);
5578 }
5579 }
5580 }
5581
5582 Builder.SetInsertPoint(ContBB);
5583 return RValue::get(nullptr);
5584 }
5585
5586 case Builtin::BI__builtin_signbit:
5587 case Builtin::BI__builtin_signbitf:
5588 case Builtin::BI__builtin_signbitl: {
5589 return RValue::get(
5590 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
5591 ConvertType(E->getType())));
5592 }
5593 case Builtin::BI__warn_memset_zero_len:
5594 return RValue::getIgnored();
5595 case Builtin::BI__annotation: {
5596 // Re-encode each wide string to UTF8 and make an MDString.
5598 for (const Expr *Arg : E->arguments()) {
5599 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
5600 assert(Str->getCharByteWidth() == 2);
5601 StringRef WideBytes = Str->getBytes();
5602 std::string StrUtf8;
5603 if (!convertUTF16ToUTF8String(
5604 ArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
5605 CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
5606 continue;
5607 }
5608 Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
5609 }
5610
5611 // Build and MDTuple of MDStrings and emit the intrinsic call.
5612 llvm::Function *F = CGM.getIntrinsic(Intrinsic::codeview_annotation, {});
5613 MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
5614 Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
5615 return RValue::getIgnored();
5616 }
5617 case Builtin::BI__builtin_annotation: {
5618 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
5619 llvm::Function *F = CGM.getIntrinsic(
5620 Intrinsic::annotation, {AnnVal->getType(), CGM.ConstGlobalsPtrTy});
5621
5622 // Get the annotation string, go through casts. Sema requires this to be a
5623 // non-wide string literal, potentially casted, so the cast<> is safe.
5624 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
5625 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
5626 return RValue::get(
5627 EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr));
5628 }
5629 case Builtin::BI__builtin_addcb:
5630 case Builtin::BI__builtin_addcs:
5631 case Builtin::BI__builtin_addc:
5632 case Builtin::BI__builtin_addcl:
5633 case Builtin::BI__builtin_addcll:
5634 case Builtin::BI__builtin_subcb:
5635 case Builtin::BI__builtin_subcs:
5636 case Builtin::BI__builtin_subc:
5637 case Builtin::BI__builtin_subcl:
5638 case Builtin::BI__builtin_subcll: {
5639
5640 // We translate all of these builtins from expressions of the form:
5641 // int x = ..., y = ..., carryin = ..., carryout, result;
5642 // result = __builtin_addc(x, y, carryin, &carryout);
5643 //
5644 // to LLVM IR of the form:
5645 //
5646 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
5647 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
5648 // %carry1 = extractvalue {i32, i1} %tmp1, 1
5649 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
5650 // i32 %carryin)
5651 // %result = extractvalue {i32, i1} %tmp2, 0
5652 // %carry2 = extractvalue {i32, i1} %tmp2, 1
5653 // %tmp3 = or i1 %carry1, %carry2
5654 // %tmp4 = zext i1 %tmp3 to i32
5655 // store i32 %tmp4, i32* %carryout
5656
5657 // Scalarize our inputs.
5658 llvm::Value *X = EmitScalarExpr(E->getArg(0));
5659 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
5660 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
5661 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
5662
5663 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
5664 Intrinsic::ID IntrinsicId;
5665 switch (BuiltinID) {
5666 default: llvm_unreachable("Unknown multiprecision builtin id.");
5667 case Builtin::BI__builtin_addcb:
5668 case Builtin::BI__builtin_addcs:
5669 case Builtin::BI__builtin_addc:
5670 case Builtin::BI__builtin_addcl:
5671 case Builtin::BI__builtin_addcll:
5672 IntrinsicId = Intrinsic::uadd_with_overflow;
5673 break;
5674 case Builtin::BI__builtin_subcb:
5675 case Builtin::BI__builtin_subcs:
5676 case Builtin::BI__builtin_subc:
5677 case Builtin::BI__builtin_subcl:
5678 case Builtin::BI__builtin_subcll:
5679 IntrinsicId = Intrinsic::usub_with_overflow;
5680 break;
5681 }
5682
5683 // Construct our resulting LLVM IR expression.
5684 llvm::Value *Carry1;
5685 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
5686 X, Y, Carry1);
5687 llvm::Value *Carry2;
5688 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
5689 Sum1, Carryin, Carry2);
5690 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
5691 X->getType());
5692 Builder.CreateStore(CarryOut, CarryOutPtr);
5693 return RValue::get(Sum2);
5694 }
5695
5696 case Builtin::BI__builtin_add_overflow:
5697 case Builtin::BI__builtin_sub_overflow:
5698 case Builtin::BI__builtin_mul_overflow: {
5699 const clang::Expr *LeftArg = E->getArg(0);
5700 const clang::Expr *RightArg = E->getArg(1);
5701 const clang::Expr *ResultArg = E->getArg(2);
5702
5703 clang::QualType ResultQTy =
5704 ResultArg->getType()->castAs<PointerType>()->getPointeeType();
5705
5706 WidthAndSignedness LeftInfo =
5707 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
5708 WidthAndSignedness RightInfo =
5709 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
5710 WidthAndSignedness ResultInfo =
5711 getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
5712
5713 // Handle mixed-sign multiplication as a special case, because adding
5714 // runtime or backend support for our generic irgen would be too expensive.
5715 if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
5716 return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
5717 RightInfo, ResultArg, ResultQTy,
5718 ResultInfo);
5719
5720 if (isSpecialUnsignedMultiplySignedResult(BuiltinID, LeftInfo, RightInfo,
5721 ResultInfo))
5723 *this, LeftArg, LeftInfo, RightArg, RightInfo, ResultArg, ResultQTy,
5724 ResultInfo);
5725
5726 WidthAndSignedness EncompassingInfo =
5727 EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
5728
5729 llvm::Type *EncompassingLLVMTy =
5730 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
5731
5732 llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
5733
5734 Intrinsic::ID IntrinsicId;
5735 switch (BuiltinID) {
5736 default:
5737 llvm_unreachable("Unknown overflow builtin id.");
5738 case Builtin::BI__builtin_add_overflow:
5739 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::sadd_with_overflow
5740 : Intrinsic::uadd_with_overflow;
5741 break;
5742 case Builtin::BI__builtin_sub_overflow:
5743 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::ssub_with_overflow
5744 : Intrinsic::usub_with_overflow;
5745 break;
5746 case Builtin::BI__builtin_mul_overflow:
5747 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::smul_with_overflow
5748 : Intrinsic::umul_with_overflow;
5749 break;
5750 }
5751
5752 llvm::Value *Left = EmitScalarExpr(LeftArg);
5753 llvm::Value *Right = EmitScalarExpr(RightArg);
5754 Address ResultPtr = EmitPointerWithAlignment(ResultArg);
5755
5756 // Extend each operand to the encompassing type.
5757 Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
5758 Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
5759
5760 // Perform the operation on the extended values.
5761 llvm::Value *Overflow, *Result;
5762 Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
5763
5764 if (EncompassingInfo.Width > ResultInfo.Width) {
5765 // The encompassing type is wider than the result type, so we need to
5766 // truncate it.
5767 llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
5768
5769 // To see if the truncation caused an overflow, we will extend
5770 // the result and then compare it to the original result.
5771 llvm::Value *ResultTruncExt = Builder.CreateIntCast(
5772 ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
5773 llvm::Value *TruncationOverflow =
5774 Builder.CreateICmpNE(Result, ResultTruncExt);
5775
5776 Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
5777 Result = ResultTrunc;
5778 }
5779
5780 // Finally, store the result using the pointer.
5781 bool isVolatile =
5782 ResultArg->getType()->getPointeeType().isVolatileQualified();
5783 Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
5784
5785 return RValue::get(Overflow);
5786 }
5787
5788 case Builtin::BI__builtin_uadd_overflow:
5789 case Builtin::BI__builtin_uaddl_overflow:
5790 case Builtin::BI__builtin_uaddll_overflow:
5791 case Builtin::BI__builtin_usub_overflow:
5792 case Builtin::BI__builtin_usubl_overflow:
5793 case Builtin::BI__builtin_usubll_overflow:
5794 case Builtin::BI__builtin_umul_overflow:
5795 case Builtin::BI__builtin_umull_overflow:
5796 case Builtin::BI__builtin_umulll_overflow:
5797 case Builtin::BI__builtin_sadd_overflow:
5798 case Builtin::BI__builtin_saddl_overflow:
5799 case Builtin::BI__builtin_saddll_overflow:
5800 case Builtin::BI__builtin_ssub_overflow:
5801 case Builtin::BI__builtin_ssubl_overflow:
5802 case Builtin::BI__builtin_ssubll_overflow:
5803 case Builtin::BI__builtin_smul_overflow:
5804 case Builtin::BI__builtin_smull_overflow:
5805 case Builtin::BI__builtin_smulll_overflow: {
5806
5807 // We translate all of these builtins directly to the relevant llvm IR node.
5808
5809 // Scalarize our inputs.
5810 llvm::Value *X = EmitScalarExpr(E->getArg(0));
5811 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
5812 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
5813
5814 // Decide which of the overflow intrinsics we are lowering to:
5815 Intrinsic::ID IntrinsicId;
5816 switch (BuiltinID) {
5817 default: llvm_unreachable("Unknown overflow builtin id.");
5818 case Builtin::BI__builtin_uadd_overflow:
5819 case Builtin::BI__builtin_uaddl_overflow:
5820 case Builtin::BI__builtin_uaddll_overflow:
5821 IntrinsicId = Intrinsic::uadd_with_overflow;
5822 break;
5823 case Builtin::BI__builtin_usub_overflow:
5824 case Builtin::BI__builtin_usubl_overflow:
5825 case Builtin::BI__builtin_usubll_overflow:
5826 IntrinsicId = Intrinsic::usub_with_overflow;
5827 break;
5828 case Builtin::BI__builtin_umul_overflow:
5829 case Builtin::BI__builtin_umull_overflow:
5830 case Builtin::BI__builtin_umulll_overflow:
5831 IntrinsicId = Intrinsic::umul_with_overflow;
5832 break;
5833 case Builtin::BI__builtin_sadd_overflow:
5834 case Builtin::BI__builtin_saddl_overflow:
5835 case Builtin::BI__builtin_saddll_overflow:
5836 IntrinsicId = Intrinsic::sadd_with_overflow;
5837 break;
5838 case Builtin::BI__builtin_ssub_overflow:
5839 case Builtin::BI__builtin_ssubl_overflow:
5840 case Builtin::BI__builtin_ssubll_overflow:
5841 IntrinsicId = Intrinsic::ssub_with_overflow;
5842 break;
5843 case Builtin::BI__builtin_smul_overflow:
5844 case Builtin::BI__builtin_smull_overflow:
5845 case Builtin::BI__builtin_smulll_overflow:
5846 IntrinsicId = Intrinsic::smul_with_overflow;
5847 break;
5848 }
5849
5850
5851 llvm::Value *Carry;
5852 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
5853 Builder.CreateStore(Sum, SumOutPtr);
5854
5855 return RValue::get(Carry);
5856 }
5857 case Builtin::BIaddressof:
5858 case Builtin::BI__addressof:
5859 case Builtin::BI__builtin_addressof:
5860 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
5861 case Builtin::BI__builtin_function_start:
5862 return RValue::get(CGM.GetFunctionStart(
5863 E->getArg(0)->getAsBuiltinConstantDeclRef(CGM.getContext())));
5864 case Builtin::BI__builtin_operator_new:
5866 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
5867 case Builtin::BI__builtin_operator_delete:
5869 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
5870 return RValue::get(nullptr);
5871
5872 case Builtin::BI__builtin_is_aligned:
5873 return EmitBuiltinIsAligned(E);
5874 case Builtin::BI__builtin_align_up:
5875 return EmitBuiltinAlignTo(E, true);
5876 case Builtin::BI__builtin_align_down:
5877 return EmitBuiltinAlignTo(E, false);
5878
5879 case Builtin::BI__noop:
5880 // __noop always evaluates to an integer literal zero.
5881 return RValue::get(ConstantInt::get(IntTy, 0));
5882 case Builtin::BI__builtin_call_with_static_chain: {
5883 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
5884 const Expr *Chain = E->getArg(1);
5885 return EmitCall(Call->getCallee()->getType(),
5886 EmitCallee(Call->getCallee()), Call, ReturnValue,
5887 EmitScalarExpr(Chain));
5888 }
5889 case Builtin::BI_InterlockedExchange8:
5890 case Builtin::BI_InterlockedExchange16:
5891 case Builtin::BI_InterlockedExchange:
5892 case Builtin::BI_InterlockedExchangePointer:
5893 return RValue::get(
5895 case Builtin::BI_InterlockedCompareExchangePointer:
5896 return RValue::get(
5898 case Builtin::BI_InterlockedCompareExchangePointer_nf:
5899 return RValue::get(
5901 case Builtin::BI_InterlockedCompareExchange8:
5902 case Builtin::BI_InterlockedCompareExchange16:
5903 case Builtin::BI_InterlockedCompareExchange:
5904 case Builtin::BI_InterlockedCompareExchange64:
5905 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
5906 case Builtin::BI_InterlockedIncrement16:
5907 case Builtin::BI_InterlockedIncrement:
5908 return RValue::get(
5910 case Builtin::BI_InterlockedDecrement16:
5911 case Builtin::BI_InterlockedDecrement:
5912 return RValue::get(
5914 case Builtin::BI_InterlockedAnd8:
5915 case Builtin::BI_InterlockedAnd16:
5916 case Builtin::BI_InterlockedAnd:
5918 case Builtin::BI_InterlockedExchangeAdd8:
5919 case Builtin::BI_InterlockedExchangeAdd16:
5920 case Builtin::BI_InterlockedExchangeAdd:
5921 return RValue::get(
5923 case Builtin::BI_InterlockedExchangeSub8:
5924 case Builtin::BI_InterlockedExchangeSub16:
5925 case Builtin::BI_InterlockedExchangeSub:
5926 return RValue::get(
5928 case Builtin::BI_InterlockedOr8:
5929 case Builtin::BI_InterlockedOr16:
5930 case Builtin::BI_InterlockedOr:
5932 case Builtin::BI_InterlockedXor8:
5933 case Builtin::BI_InterlockedXor16:
5934 case Builtin::BI_InterlockedXor:
5936
5937 case Builtin::BI_bittest64:
5938 case Builtin::BI_bittest:
5939 case Builtin::BI_bittestandcomplement64:
5940 case Builtin::BI_bittestandcomplement:
5941 case Builtin::BI_bittestandreset64:
5942 case Builtin::BI_bittestandreset:
5943 case Builtin::BI_bittestandset64:
5944 case Builtin::BI_bittestandset:
5945 case Builtin::BI_interlockedbittestandreset:
5946 case Builtin::BI_interlockedbittestandreset64:
5947 case Builtin::BI_interlockedbittestandreset64_acq:
5948 case Builtin::BI_interlockedbittestandreset64_rel:
5949 case Builtin::BI_interlockedbittestandreset64_nf:
5950 case Builtin::BI_interlockedbittestandset64:
5951 case Builtin::BI_interlockedbittestandset64_acq:
5952 case Builtin::BI_interlockedbittestandset64_rel:
5953 case Builtin::BI_interlockedbittestandset64_nf:
5954 case Builtin::BI_interlockedbittestandset:
5955 case Builtin::BI_interlockedbittestandset_acq:
5956 case Builtin::BI_interlockedbittestandset_rel:
5957 case Builtin::BI_interlockedbittestandset_nf:
5958 case Builtin::BI_interlockedbittestandreset_acq:
5959 case Builtin::BI_interlockedbittestandreset_rel:
5960 case Builtin::BI_interlockedbittestandreset_nf:
5961 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
5962
5963 // These builtins exist to emit regular volatile loads and stores not
5964 // affected by the -fms-volatile setting.
5965 case Builtin::BI__iso_volatile_load8:
5966 case Builtin::BI__iso_volatile_load16:
5967 case Builtin::BI__iso_volatile_load32:
5968 case Builtin::BI__iso_volatile_load64:
5969 return RValue::get(EmitISOVolatileLoad(*this, E));
5970 case Builtin::BI__iso_volatile_store8:
5971 case Builtin::BI__iso_volatile_store16:
5972 case Builtin::BI__iso_volatile_store32:
5973 case Builtin::BI__iso_volatile_store64:
5974 return RValue::get(EmitISOVolatileStore(*this, E));
5975
5976 case Builtin::BI__builtin_ptrauth_sign_constant:
5977 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
5978
5979 case Builtin::BI__builtin_ptrauth_auth:
5980 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5981 case Builtin::BI__builtin_ptrauth_auth_load_relative_and_sign:
5982 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5983 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5984 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5985 case Builtin::BI__builtin_ptrauth_strip: {
5986 // Emit the arguments.
5988 for (auto argExpr : E->arguments())
5989 Args.push_back(EmitScalarExpr(argExpr));
5990
5991 // Cast the value to intptr_t, saving its original type.
5992 llvm::Type *OrigValueType = Args[0]->getType();
5993 if (OrigValueType->isPointerTy())
5994 Args[0] = Builder.CreatePtrToInt(Args[0], IntPtrTy);
5995
5996 switch (BuiltinID) {
5997 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5998 case Builtin::BI__builtin_ptrauth_auth_load_relative_and_sign:
5999 if (Args[4]->getType()->isPointerTy())
6000 Args[4] = Builder.CreatePtrToInt(Args[4], IntPtrTy);
6001 [[fallthrough]];
6002
6003 case Builtin::BI__builtin_ptrauth_auth:
6004 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
6005 if (Args[2]->getType()->isPointerTy())
6006 Args[2] = Builder.CreatePtrToInt(Args[2], IntPtrTy);
6007 break;
6008
6009 case Builtin::BI__builtin_ptrauth_sign_generic_data:
6010 if (Args[1]->getType()->isPointerTy())
6011 Args[1] = Builder.CreatePtrToInt(Args[1], IntPtrTy);
6012 break;
6013
6014 case Builtin::BI__builtin_ptrauth_blend_discriminator:
6015 case Builtin::BI__builtin_ptrauth_strip:
6016 break;
6017 }
6018
6019 // Call the intrinsic.
6020 auto IntrinsicID = [&]() -> unsigned {
6021 switch (BuiltinID) {
6022 case Builtin::BI__builtin_ptrauth_auth:
6023 return Intrinsic::ptrauth_auth;
6024 case Builtin::BI__builtin_ptrauth_auth_and_resign:
6025 return Intrinsic::ptrauth_resign;
6026 case Builtin::BI__builtin_ptrauth_auth_load_relative_and_sign:
6027 return Intrinsic::ptrauth_resign_load_relative;
6028 case Builtin::BI__builtin_ptrauth_blend_discriminator:
6029 return Intrinsic::ptrauth_blend;
6030 case Builtin::BI__builtin_ptrauth_sign_generic_data:
6031 return Intrinsic::ptrauth_sign_generic;
6032 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
6033 return Intrinsic::ptrauth_sign;
6034 case Builtin::BI__builtin_ptrauth_strip:
6035 return Intrinsic::ptrauth_strip;
6036 }
6037 llvm_unreachable("bad ptrauth intrinsic");
6038 }();
6039 auto Intrinsic = CGM.getIntrinsic(IntrinsicID);
6040 llvm::Value *Result = EmitRuntimeCall(Intrinsic, Args);
6041
6042 if (BuiltinID != Builtin::BI__builtin_ptrauth_sign_generic_data &&
6043 BuiltinID != Builtin::BI__builtin_ptrauth_blend_discriminator &&
6044 OrigValueType->isPointerTy()) {
6045 Result = Builder.CreateIntToPtr(Result, OrigValueType);
6046 }
6047 return RValue::get(Result);
6048 }
6049
6050 case Builtin::BI__builtin_get_vtable_pointer: {
6051 const Expr *Target = E->getArg(0);
6052 QualType TargetType = Target->getType();
6053 const CXXRecordDecl *Decl = TargetType->getPointeeCXXRecordDecl();
6054 assert(Decl);
6055 auto ThisAddress = EmitPointerWithAlignment(Target);
6056 assert(ThisAddress.isValid());
6057 llvm::Value *VTablePointer =
6059 return RValue::get(VTablePointer);
6060 }
6061
6062 case Builtin::BI__exception_code:
6063 case Builtin::BI_exception_code:
6065 case Builtin::BI__exception_info:
6066 case Builtin::BI_exception_info:
6068 case Builtin::BI__abnormal_termination:
6069 case Builtin::BI_abnormal_termination:
6071 case Builtin::BI_setjmpex:
6072 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
6073 E->getArg(0)->getType()->isPointerType())
6074 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
6075 break;
6076 case Builtin::BI_setjmp:
6077 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
6078 E->getArg(0)->getType()->isPointerType()) {
6079 if (getTarget().getTriple().getArch() == llvm::Triple::x86)
6080 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
6081 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
6082 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
6083 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
6084 }
6085 break;
6086
6087 // C++ std:: builtins.
6088 case Builtin::BImove:
6089 case Builtin::BImove_if_noexcept:
6090 case Builtin::BIforward:
6091 case Builtin::BIforward_like:
6092 case Builtin::BIas_const:
6093 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
6094 case Builtin::BI__GetExceptionInfo: {
6095 if (llvm::GlobalVariable *GV =
6096 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
6097 return RValue::get(GV);
6098 break;
6099 }
6100
6101 case Builtin::BI__fastfail:
6103
6104 case Builtin::BI__builtin_coro_id:
6105 return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
6106 case Builtin::BI__builtin_coro_promise:
6107 return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
6108 case Builtin::BI__builtin_coro_resume:
6109 EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
6110 return RValue::get(nullptr);
6111 case Builtin::BI__builtin_coro_frame:
6112 return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
6113 case Builtin::BI__builtin_coro_noop:
6114 return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
6115 case Builtin::BI__builtin_coro_free:
6116 return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
6117 case Builtin::BI__builtin_coro_destroy:
6118 EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
6119 return RValue::get(nullptr);
6120 case Builtin::BI__builtin_coro_done:
6121 return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
6122 case Builtin::BI__builtin_coro_alloc:
6123 return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
6124 case Builtin::BI__builtin_coro_begin:
6125 return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
6126 case Builtin::BI__builtin_coro_end:
6127 return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
6128 case Builtin::BI__builtin_coro_suspend:
6129 return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
6130 case Builtin::BI__builtin_coro_size:
6131 return EmitCoroutineIntrinsic(E, Intrinsic::coro_size);
6132 case Builtin::BI__builtin_coro_align:
6133 return EmitCoroutineIntrinsic(E, Intrinsic::coro_align);
6134
6135 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
6136 case Builtin::BIread_pipe:
6137 case Builtin::BIwrite_pipe: {
6138 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
6139 *Arg1 = EmitScalarExpr(E->getArg(1));
6140 CGOpenCLRuntime OpenCLRT(CGM);
6141 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
6142 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
6143
6144 // Type of the generic packet parameter.
6145 unsigned GenericAS =
6147 llvm::Type *I8PTy = llvm::PointerType::get(getLLVMContext(), GenericAS);
6148
6149 // Testing which overloaded version we should generate the call for.
6150 if (2U == E->getNumArgs()) {
6151 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
6152 : "__write_pipe_2";
6153 // Creating a generic function type to be able to call with any builtin or
6154 // user defined type.
6155 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
6156 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6157 Value *ACast = Builder.CreateAddrSpaceCast(Arg1, I8PTy);
6158 return RValue::get(
6159 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6160 {Arg0, ACast, PacketSize, PacketAlign}));
6161 } else {
6162 assert(4 == E->getNumArgs() &&
6163 "Illegal number of parameters to pipe function");
6164 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
6165 : "__write_pipe_4";
6166
6167 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
6168 Int32Ty, Int32Ty};
6169 Value *Arg2 = EmitScalarExpr(E->getArg(2)),
6170 *Arg3 = EmitScalarExpr(E->getArg(3));
6171 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6172 Value *ACast = Builder.CreateAddrSpaceCast(Arg3, I8PTy);
6173 // We know the third argument is an integer type, but we may need to cast
6174 // it to i32.
6175 if (Arg2->getType() != Int32Ty)
6176 Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
6177 return RValue::get(
6178 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6179 {Arg0, Arg1, Arg2, ACast, PacketSize, PacketAlign}));
6180 }
6181 }
6182 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
6183 // functions
6184 case Builtin::BIreserve_read_pipe:
6185 case Builtin::BIreserve_write_pipe:
6186 case Builtin::BIwork_group_reserve_read_pipe:
6187 case Builtin::BIwork_group_reserve_write_pipe:
6188 case Builtin::BIsub_group_reserve_read_pipe:
6189 case Builtin::BIsub_group_reserve_write_pipe: {
6190 // Composing the mangled name for the function.
6191 const char *Name;
6192 if (BuiltinID == Builtin::BIreserve_read_pipe)
6193 Name = "__reserve_read_pipe";
6194 else if (BuiltinID == Builtin::BIreserve_write_pipe)
6195 Name = "__reserve_write_pipe";
6196 else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
6197 Name = "__work_group_reserve_read_pipe";
6198 else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
6199 Name = "__work_group_reserve_write_pipe";
6200 else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
6201 Name = "__sub_group_reserve_read_pipe";
6202 else
6203 Name = "__sub_group_reserve_write_pipe";
6204
6205 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
6206 *Arg1 = EmitScalarExpr(E->getArg(1));
6207 llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
6208 CGOpenCLRuntime OpenCLRT(CGM);
6209 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
6210 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
6211
6212 // Building the generic function prototype.
6213 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
6214 llvm::FunctionType *FTy =
6215 llvm::FunctionType::get(ReservedIDTy, ArgTys, false);
6216 // We know the second argument is an integer type, but we may need to cast
6217 // it to i32.
6218 if (Arg1->getType() != Int32Ty)
6219 Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
6220 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6221 {Arg0, Arg1, PacketSize, PacketAlign}));
6222 }
6223 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
6224 // functions
6225 case Builtin::BIcommit_read_pipe:
6226 case Builtin::BIcommit_write_pipe:
6227 case Builtin::BIwork_group_commit_read_pipe:
6228 case Builtin::BIwork_group_commit_write_pipe:
6229 case Builtin::BIsub_group_commit_read_pipe:
6230 case Builtin::BIsub_group_commit_write_pipe: {
6231 const char *Name;
6232 if (BuiltinID == Builtin::BIcommit_read_pipe)
6233 Name = "__commit_read_pipe";
6234 else if (BuiltinID == Builtin::BIcommit_write_pipe)
6235 Name = "__commit_write_pipe";
6236 else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
6237 Name = "__work_group_commit_read_pipe";
6238 else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
6239 Name = "__work_group_commit_write_pipe";
6240 else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
6241 Name = "__sub_group_commit_read_pipe";
6242 else
6243 Name = "__sub_group_commit_write_pipe";
6244
6245 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
6246 *Arg1 = EmitScalarExpr(E->getArg(1));
6247 CGOpenCLRuntime OpenCLRT(CGM);
6248 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
6249 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
6250
6251 // Building the generic function prototype.
6252 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
6253 llvm::FunctionType *FTy = llvm::FunctionType::get(
6254 llvm::Type::getVoidTy(getLLVMContext()), ArgTys, false);
6255
6256 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6257 {Arg0, Arg1, PacketSize, PacketAlign}));
6258 }
6259 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
6260 case Builtin::BIget_pipe_num_packets:
6261 case Builtin::BIget_pipe_max_packets: {
6262 const char *BaseName;
6263 const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
6264 if (BuiltinID == Builtin::BIget_pipe_num_packets)
6265 BaseName = "__get_pipe_num_packets";
6266 else
6267 BaseName = "__get_pipe_max_packets";
6268 std::string Name = std::string(BaseName) +
6269 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
6270
6271 // Building the generic function prototype.
6272 Value *Arg0 = EmitScalarExpr(E->getArg(0));
6273 CGOpenCLRuntime OpenCLRT(CGM);
6274 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
6275 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
6276 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
6277 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6278
6279 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6280 {Arg0, PacketSize, PacketAlign}));
6281 }
6282
6283 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
6284 case Builtin::BIto_global:
6285 case Builtin::BIto_local:
6286 case Builtin::BIto_private: {
6287 auto Arg0 = EmitScalarExpr(E->getArg(0));
6288 auto NewArgT = llvm::PointerType::get(
6290 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
6291 auto NewRetT = llvm::PointerType::get(
6293 CGM.getContext().getTargetAddressSpace(
6295 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
6296 llvm::Value *NewArg;
6297 if (Arg0->getType()->getPointerAddressSpace() !=
6298 NewArgT->getPointerAddressSpace())
6299 NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
6300 else
6301 NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
6302 auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
6303 auto NewCall =
6304 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
6305 return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
6306 ConvertType(E->getType())));
6307 }
6308
6309 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
6310 // Table 6.13.17.1 specifies four overload forms of enqueue_kernel.
6311 // The code below expands the builtin call to a call to one of the following
6312 // functions that an OpenCL runtime library will have to provide:
6313 // __enqueue_kernel_basic
6314 // __enqueue_kernel_varargs
6315 // __enqueue_kernel_basic_events
6316 // __enqueue_kernel_events_varargs
6317 case Builtin::BIenqueue_kernel: {
6318 StringRef Name; // Generated function call name
6319 unsigned NumArgs = E->getNumArgs();
6320
6321 llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
6322 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6323 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6324
6325 llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
6326 llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
6327 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
6328 llvm::Value *Range = NDRangeL.getAddress().emitRawPointer(*this);
6329
6330 // FIXME: Look through the addrspacecast which may exist to the stack
6331 // temporary as a hack.
6332 //
6333 // This is hardcoding the assumed ABI of the target function. This assumes
6334 // direct passing for every argument except NDRange, which is assumed to be
6335 // byval or byref indirect passed.
6336 //
6337 // This should be fixed to query a signature from CGOpenCLRuntime, and go
6338 // through EmitCallArgs to get the correct target ABI.
6339 Range = Range->stripPointerCasts();
6340
6341 llvm::Type *RangePtrTy = Range->getType();
6342
6343 if (NumArgs == 4) {
6344 // The most basic form of the call with parameters:
6345 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
6346 Name = "__enqueue_kernel_basic";
6347 llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangePtrTy, GenericVoidPtrTy,
6348 GenericVoidPtrTy};
6349 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6350
6351 auto Info =
6352 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
6353 llvm::Value *Kernel =
6354 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6355 llvm::Value *Block =
6356 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6357
6358 auto RTCall = EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6359 {Queue, Flags, Range, Kernel, Block});
6360 return RValue::get(RTCall);
6361 }
6362 assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
6363
6364 // Create a temporary array to hold the sizes of local pointer arguments
6365 // for the block. \p First is the position of the first size argument.
6366 auto CreateArrayForSizeVar =
6367 [=](unsigned First) -> std::pair<llvm::Value *, llvm::Value *> {
6368 llvm::APInt ArraySize(32, NumArgs - First);
6370 getContext().getSizeType(), ArraySize, nullptr,
6372 /*IndexTypeQuals=*/0);
6373 auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
6374 llvm::Value *TmpPtr = Tmp.getPointer();
6375 // The EmitLifetime* pair expect a naked Alloca as their last argument,
6376 // however for cases where the default AS is not the Alloca AS, Tmp is
6377 // actually the Alloca ascasted to the default AS, hence the
6378 // stripPointerCasts()
6379 llvm::Value *Alloca = TmpPtr->stripPointerCasts();
6380 llvm::Value *ElemPtr;
6381 EmitLifetimeStart(Alloca);
6382 // Each of the following arguments specifies the size of the corresponding
6383 // argument passed to the enqueued block.
6384 auto *Zero = llvm::ConstantInt::get(IntTy, 0);
6385 for (unsigned I = First; I < NumArgs; ++I) {
6386 auto *Index = llvm::ConstantInt::get(IntTy, I - First);
6387 auto *GEP =
6388 Builder.CreateGEP(Tmp.getElementType(), Alloca, {Zero, Index});
6389 if (I == First)
6390 ElemPtr = GEP;
6391 auto *V =
6392 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
6393 Builder.CreateAlignedStore(
6394 V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
6395 }
6396 // Return the Alloca itself rather than a potential ascast as this is only
6397 // used by the paired EmitLifetimeEnd.
6398 return {ElemPtr, Alloca};
6399 };
6400
6401 // Could have events and/or varargs.
6402 if (E->getArg(3)->getType()->isBlockPointerType()) {
6403 // No events passed, but has variadic arguments.
6404 Name = "__enqueue_kernel_varargs";
6405 auto Info =
6406 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
6407 llvm::Value *Kernel =
6408 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6409 auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6410 auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(4);
6411
6412 // Create a vector of the arguments, as well as a constant value to
6413 // express to the runtime the number of variadic arguments.
6414 llvm::Value *const Args[] = {Queue, Flags,
6415 Range, Kernel,
6416 Block, ConstantInt::get(IntTy, NumArgs - 4),
6417 ElemPtr};
6418 llvm::Type *const ArgTys[] = {
6419 QueueTy, IntTy, RangePtrTy, GenericVoidPtrTy,
6420 GenericVoidPtrTy, IntTy, ElemPtr->getType()};
6421
6422 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6423 auto Call = RValue::get(
6424 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6425 EmitLifetimeEnd(TmpPtr);
6426 return Call;
6427 }
6428 // Any calls now have event arguments passed.
6429 if (NumArgs >= 7) {
6430 llvm::PointerType *PtrTy = llvm::PointerType::get(
6431 CGM.getLLVMContext(),
6432 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
6433
6434 llvm::Value *NumEvents =
6435 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
6436
6437 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
6438 // to be a null pointer constant (including `0` literal), we can take it
6439 // into account and emit null pointer directly.
6440 llvm::Value *EventWaitList = nullptr;
6441 if (E->getArg(4)->isNullPointerConstant(
6443 EventWaitList = llvm::ConstantPointerNull::get(PtrTy);
6444 } else {
6445 EventWaitList =
6446 E->getArg(4)->getType()->isArrayType()
6448 : EmitScalarExpr(E->getArg(4));
6449 // Convert to generic address space.
6450 EventWaitList = Builder.CreatePointerCast(EventWaitList, PtrTy);
6451 }
6452 llvm::Value *EventRet = nullptr;
6453 if (E->getArg(5)->isNullPointerConstant(
6455 EventRet = llvm::ConstantPointerNull::get(PtrTy);
6456 } else {
6457 EventRet =
6458 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), PtrTy);
6459 }
6460
6461 auto Info =
6462 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
6463 llvm::Value *Kernel =
6464 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6465 llvm::Value *Block =
6466 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6467
6468 std::vector<llvm::Type *> ArgTys = {
6469 QueueTy, Int32Ty, RangePtrTy, Int32Ty,
6470 PtrTy, PtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
6471
6472 std::vector<llvm::Value *> Args = {Queue, Flags, Range,
6473 NumEvents, EventWaitList, EventRet,
6474 Kernel, Block};
6475
6476 if (NumArgs == 7) {
6477 // Has events but no variadics.
6478 Name = "__enqueue_kernel_basic_events";
6479 llvm::FunctionType *FTy =
6480 llvm::FunctionType::get(Int32Ty, ArgTys, false);
6481 return RValue::get(
6482 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6483 }
6484 // Has event info and variadics
6485 // Pass the number of variadics to the runtime function too.
6486 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
6487 ArgTys.push_back(Int32Ty);
6488 Name = "__enqueue_kernel_events_varargs";
6489
6490 auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(7);
6491 Args.push_back(ElemPtr);
6492 ArgTys.push_back(ElemPtr->getType());
6493
6494 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6495 auto Call = RValue::get(
6496 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6497 EmitLifetimeEnd(TmpPtr);
6498 return Call;
6499 }
6500 llvm_unreachable("Unexpected enqueue_kernel signature");
6501 }
6502 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
6503 // parameter.
6504 case Builtin::BIget_kernel_work_group_size: {
6505 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6506 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6507 auto Info =
6508 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
6509 Value *Kernel =
6510 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6511 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6513 CGM.CreateRuntimeFunction(
6514 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
6515 false),
6516 "__get_kernel_work_group_size_impl"),
6517 {Kernel, Arg}));
6518 }
6519 case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
6520 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6521 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6522 auto Info =
6523 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
6524 Value *Kernel =
6525 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6526 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6528 CGM.CreateRuntimeFunction(
6529 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
6530 false),
6531 "__get_kernel_preferred_work_group_size_multiple_impl"),
6532 {Kernel, Arg}));
6533 }
6534 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
6535 case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
6536 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6537 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6538 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
6539 llvm::Value *NDRange = NDRangeL.getAddress().emitRawPointer(*this);
6540 auto Info =
6541 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
6542 Value *Kernel =
6543 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6544 Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6545 const char *Name =
6546 BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
6547 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
6548 : "__get_kernel_sub_group_count_for_ndrange_impl";
6550 CGM.CreateRuntimeFunction(
6551 llvm::FunctionType::get(
6552 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
6553 false),
6554 Name),
6555 {NDRange, Kernel, Block}));
6556 }
6557 case Builtin::BI__builtin_store_half:
6558 case Builtin::BI__builtin_store_halff: {
6559 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
6560 Value *Val = EmitScalarExpr(E->getArg(0));
6562 Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
6563 Builder.CreateStore(HalfVal, Address);
6564 return RValue::get(nullptr);
6565 }
6566 case Builtin::BI__builtin_load_half: {
6568 Value *HalfVal = Builder.CreateLoad(Address);
6569 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
6570 }
6571 case Builtin::BI__builtin_load_halff: {
6573 Value *HalfVal = Builder.CreateLoad(Address);
6574 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
6575 }
6576 case Builtin::BI__builtin_printf:
6577 case Builtin::BIprintf:
6578 if (getTarget().getTriple().isNVPTX() ||
6579 getTarget().getTriple().isAMDGCN() ||
6580 (getTarget().getTriple().isSPIRV() &&
6581 getTarget().getTriple().getVendor() == Triple::VendorType::AMD)) {
6582 if (getTarget().getTriple().isNVPTX())
6584 if ((getTarget().getTriple().isAMDGCN() ||
6585 getTarget().getTriple().isSPIRV()) &&
6586 getLangOpts().HIP)
6588 }
6589
6590 break;
6591 case Builtin::BI__builtin_canonicalize:
6592 case Builtin::BI__builtin_canonicalizef:
6593 case Builtin::BI__builtin_canonicalizef16:
6594 case Builtin::BI__builtin_canonicalizel:
6595 return RValue::get(
6596 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::canonicalize));
6597
6598 case Builtin::BI__builtin_thread_pointer: {
6599 if (!getContext().getTargetInfo().isTLSSupported())
6600 CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
6601
6602 return RValue::get(Builder.CreateIntrinsic(llvm::Intrinsic::thread_pointer,
6603 {GlobalsInt8PtrTy}, {}));
6604 }
6605 case Builtin::BI__builtin_os_log_format:
6606 return emitBuiltinOSLogFormat(*E);
6607
6608 case Builtin::BI__xray_customevent: {
6610 return RValue::getIgnored();
6611
6612 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
6614 return RValue::getIgnored();
6615
6616 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
6617 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
6618 return RValue::getIgnored();
6619
6620 Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
6621 auto FTy = F->getFunctionType();
6622 auto Arg0 = E->getArg(0);
6623 auto Arg0Val = EmitScalarExpr(Arg0);
6624 auto Arg0Ty = Arg0->getType();
6625 auto PTy0 = FTy->getParamType(0);
6626 if (PTy0 != Arg0Val->getType()) {
6627 if (Arg0Ty->isArrayType())
6628 Arg0Val = EmitArrayToPointerDecay(Arg0).emitRawPointer(*this);
6629 else
6630 Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
6631 }
6632 auto Arg1 = EmitScalarExpr(E->getArg(1));
6633 auto PTy1 = FTy->getParamType(1);
6634 if (PTy1 != Arg1->getType())
6635 Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
6636 return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
6637 }
6638
6639 case Builtin::BI__xray_typedevent: {
6640 // TODO: There should be a way to always emit events even if the current
6641 // function is not instrumented. Losing events in a stream can cripple
6642 // a trace.
6644 return RValue::getIgnored();
6645
6646 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
6648 return RValue::getIgnored();
6649
6650 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
6651 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
6652 return RValue::getIgnored();
6653
6654 Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
6655 auto FTy = F->getFunctionType();
6656 auto Arg0 = EmitScalarExpr(E->getArg(0));
6657 auto PTy0 = FTy->getParamType(0);
6658 if (PTy0 != Arg0->getType())
6659 Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
6660 auto Arg1 = E->getArg(1);
6661 auto Arg1Val = EmitScalarExpr(Arg1);
6662 auto Arg1Ty = Arg1->getType();
6663 auto PTy1 = FTy->getParamType(1);
6664 if (PTy1 != Arg1Val->getType()) {
6665 if (Arg1Ty->isArrayType())
6666 Arg1Val = EmitArrayToPointerDecay(Arg1).emitRawPointer(*this);
6667 else
6668 Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
6669 }
6670 auto Arg2 = EmitScalarExpr(E->getArg(2));
6671 auto PTy2 = FTy->getParamType(2);
6672 if (PTy2 != Arg2->getType())
6673 Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
6674 return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
6675 }
6676
6677 case Builtin::BI__builtin_ms_va_start:
6678 case Builtin::BI__builtin_ms_va_end:
6679 return RValue::get(
6681 BuiltinID == Builtin::BI__builtin_ms_va_start));
6682
6683 case Builtin::BI__builtin_ms_va_copy: {
6684 // Lower this manually. We can't reliably determine whether or not any
6685 // given va_copy() is for a Win64 va_list from the calling convention
6686 // alone, because it's legal to do this from a System V ABI function.
6687 // With opaque pointer types, we won't have enough information in LLVM
6688 // IR to determine this from the argument types, either. Best to do it
6689 // now, while we have enough information.
6690 Address DestAddr = EmitMSVAListRef(E->getArg(0));
6691 Address SrcAddr = EmitMSVAListRef(E->getArg(1));
6692
6693 DestAddr = DestAddr.withElementType(Int8PtrTy);
6694 SrcAddr = SrcAddr.withElementType(Int8PtrTy);
6695
6696 Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
6697 return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
6698 }
6699
6700 case Builtin::BI__builtin_get_device_side_mangled_name: {
6701 auto Name = CGM.getCUDARuntime().getDeviceSideName(
6702 cast<DeclRefExpr>(E->getArg(0)->IgnoreImpCasts())->getDecl());
6703 auto Str = CGM.GetAddrOfConstantCString(Name, "");
6704 return RValue::get(Str.getPointer());
6705 }
6706 }
6707
6708 // If this is an alias for a lib function (e.g. __builtin_sin), emit
6709 // the call using the normal call path, but using the unmangled
6710 // version of the function name.
6711 const auto &BI = getContext().BuiltinInfo;
6712 if (!shouldEmitBuiltinAsIR(BuiltinID, BI, *this) &&
6713 BI.isLibFunction(BuiltinID))
6714 return emitLibraryCall(*this, FD, E,
6715 CGM.getBuiltinLibFunction(FD, BuiltinID));
6716
6717 // If this is a predefined lib function (e.g. malloc), emit the call
6718 // using exactly the normal call path.
6719 if (BI.isPredefinedLibFunction(BuiltinID))
6720 return emitLibraryCall(*this, FD, E, CGM.getRawFunctionPointer(FD));
6721
6722 // Check that a call to a target specific builtin has the correct target
6723 // features.
6724 // This is down here to avoid non-target specific builtins, however, if
6725 // generic builtins start to require generic target features then we
6726 // can move this up to the beginning of the function.
6727 checkTargetFeatures(E, FD);
6728
6729 if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
6730 LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
6731
6732 // See if we have a target specific intrinsic.
6733 std::string Name = getContext().BuiltinInfo.getName(BuiltinID);
6734 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
6735 StringRef Prefix =
6736 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
6737 if (!Prefix.empty()) {
6738 IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin(Prefix.data(), Name);
6739 if (IntrinsicID == Intrinsic::not_intrinsic && Prefix == "spv" &&
6740 getTarget().getTriple().getOS() == llvm::Triple::OSType::AMDHSA)
6741 IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin("amdgcn", Name);
6742 // NOTE we don't need to perform a compatibility flag check here since the
6743 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
6744 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
6745 if (IntrinsicID == Intrinsic::not_intrinsic)
6746 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
6747 }
6748
6749 if (IntrinsicID != Intrinsic::not_intrinsic) {
6751
6752 // Find out if any arguments are required to be integer constant
6753 // expressions.
6754 unsigned ICEArguments = 0;
6756 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
6757 assert(Error == ASTContext::GE_None && "Should not codegen an error");
6758
6759 Function *F = CGM.getIntrinsic(IntrinsicID);
6760 llvm::FunctionType *FTy = F->getFunctionType();
6761
6762 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
6763 Value *ArgValue = EmitScalarOrConstFoldImmArg(ICEArguments, i, E);
6764 // If the intrinsic arg type is different from the builtin arg type
6765 // we need to do a bit cast.
6766 llvm::Type *PTy = FTy->getParamType(i);
6767 if (PTy != ArgValue->getType()) {
6768 // XXX - vector of pointers?
6769 if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
6770 if (PtrTy->getAddressSpace() !=
6771 ArgValue->getType()->getPointerAddressSpace()) {
6772 ArgValue = Builder.CreateAddrSpaceCast(
6773 ArgValue, llvm::PointerType::get(getLLVMContext(),
6774 PtrTy->getAddressSpace()));
6775 }
6776 }
6777
6778 // Cast vector type (e.g., v256i32) to x86_amx, this only happen
6779 // in amx intrinsics.
6780 if (PTy->isX86_AMXTy())
6781 ArgValue = Builder.CreateIntrinsic(Intrinsic::x86_cast_vector_to_tile,
6782 {ArgValue->getType()}, {ArgValue});
6783 else
6784 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
6785 }
6786
6787 Args.push_back(ArgValue);
6788 }
6789
6790 Value *V = Builder.CreateCall(F, Args);
6791 QualType BuiltinRetType = E->getType();
6792
6793 llvm::Type *RetTy = VoidTy;
6794 if (!BuiltinRetType->isVoidType())
6795 RetTy = ConvertType(BuiltinRetType);
6796
6797 if (RetTy != V->getType()) {
6798 // XXX - vector of pointers?
6799 if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
6800 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
6801 V = Builder.CreateAddrSpaceCast(
6802 V, llvm::PointerType::get(getLLVMContext(),
6803 PtrTy->getAddressSpace()));
6804 }
6805 }
6806
6807 // Cast x86_amx to vector type (e.g., v256i32), this only happen
6808 // in amx intrinsics.
6809 if (V->getType()->isX86_AMXTy())
6810 V = Builder.CreateIntrinsic(Intrinsic::x86_cast_tile_to_vector, {RetTy},
6811 {V});
6812 else
6813 V = Builder.CreateBitCast(V, RetTy);
6814 }
6815
6816 if (RetTy->isVoidTy())
6817 return RValue::get(nullptr);
6818
6819 return RValue::get(V);
6820 }
6821
6822 // Some target-specific builtins can have aggregate return values, e.g.
6823 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
6824 // ReturnValue to be non-null, so that the target-specific emission code can
6825 // always just emit into it.
6827 if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) {
6828 Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp");
6829 ReturnValue = ReturnValueSlot(DestPtr, false);
6830 }
6831
6832 // Now see if we can emit a target-specific builtin.
6833 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
6834 switch (EvalKind) {
6835 case TEK_Scalar:
6836 if (V->getType()->isVoidTy())
6837 return RValue::get(nullptr);
6838 return RValue::get(V);
6839 case TEK_Aggregate:
6840 return RValue::getAggregate(ReturnValue.getAddress(),
6841 ReturnValue.isVolatile());
6842 case TEK_Complex:
6843 llvm_unreachable("No current target builtin returns complex");
6844 }
6845 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
6846 }
6847
6848 // EmitHLSLBuiltinExpr will check getLangOpts().HLSL
6849 if (Value *V = EmitHLSLBuiltinExpr(BuiltinID, E, ReturnValue)) {
6850 switch (EvalKind) {
6851 case TEK_Scalar:
6852 if (V->getType()->isVoidTy())
6853 return RValue::get(nullptr);
6854 return RValue::get(V);
6855 case TEK_Aggregate:
6856 return RValue::getAggregate(ReturnValue.getAddress(),
6857 ReturnValue.isVolatile());
6858 case TEK_Complex:
6859 llvm_unreachable("No current hlsl builtin returns complex");
6860 }
6861 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
6862 }
6863
6864 if (getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice)
6865 return EmitHipStdParUnsupportedBuiltin(this, FD);
6866
6867 ErrorUnsupported(E, "builtin function");
6868
6869 // Unknown builtin, for now just dump it out and return undef.
6870 return GetUndefRValue(E->getType());
6871}
6872
6873namespace {
6874struct BuiltinAlignArgs {
6875 llvm::Value *Src = nullptr;
6876 llvm::Type *SrcType = nullptr;
6877 llvm::Value *Alignment = nullptr;
6878 llvm::Value *Mask = nullptr;
6879 llvm::IntegerType *IntType = nullptr;
6880
6881 BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
6882 QualType AstType = E->getArg(0)->getType();
6883 if (AstType->isArrayType())
6884 Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(CGF);
6885 else
6886 Src = CGF.EmitScalarExpr(E->getArg(0));
6887 SrcType = Src->getType();
6888 if (SrcType->isPointerTy()) {
6889 IntType = IntegerType::get(
6890 CGF.getLLVMContext(),
6891 CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType));
6892 } else {
6893 assert(SrcType->isIntegerTy());
6894 IntType = cast<llvm::IntegerType>(SrcType);
6895 }
6896 Alignment = CGF.EmitScalarExpr(E->getArg(1));
6897 Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment");
6898 auto *One = llvm::ConstantInt::get(IntType, 1);
6899 Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
6900 }
6901};
6902} // namespace
6903
6904/// Generate (x & (y-1)) == 0.
6906 BuiltinAlignArgs Args(E, *this);
6907 llvm::Value *SrcAddress = Args.Src;
6908 if (Args.SrcType->isPointerTy())
6909 SrcAddress =
6910 Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr");
6911 return RValue::get(Builder.CreateICmpEQ(
6912 Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"),
6913 llvm::Constant::getNullValue(Args.IntType), "is_aligned"));
6914}
6915
6916/// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
6917/// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
6918/// llvm.ptrmask intrinsic (with a GEP before in the align_up case).
6920 BuiltinAlignArgs Args(E, *this);
6921 llvm::Value *SrcForMask = Args.Src;
6922 if (AlignUp) {
6923 // When aligning up we have to first add the mask to ensure we go over the
6924 // next alignment value and then align down to the next valid multiple.
6925 // By adding the mask, we ensure that align_up on an already aligned
6926 // value will not change the value.
6927 if (Args.Src->getType()->isPointerTy()) {
6928 if (getLangOpts().PointerOverflowDefined)
6929 SrcForMask =
6930 Builder.CreateGEP(Int8Ty, SrcForMask, Args.Mask, "over_boundary");
6931 else
6932 SrcForMask = EmitCheckedInBoundsGEP(Int8Ty, SrcForMask, Args.Mask,
6933 /*SignedIndices=*/true,
6934 /*isSubtraction=*/false,
6935 E->getExprLoc(), "over_boundary");
6936 } else {
6937 SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
6938 }
6939 }
6940 // Invert the mask to only clear the lower bits.
6941 llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
6942 llvm::Value *Result = nullptr;
6943 if (Args.Src->getType()->isPointerTy()) {
6944 Result = Builder.CreateIntrinsic(
6945 Intrinsic::ptrmask, {Args.SrcType, Args.IntType},
6946 {SrcForMask, InvertedMask}, nullptr, "aligned_result");
6947 } else {
6948 Result = Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
6949 }
6950 assert(Result->getType() == Args.SrcType);
6951 return RValue::get(Result);
6952}
#define V(N, I)
static char bitActionToX86BTCode(BitTest::ActionKind A)
static Value * EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering SuccessOrdering)
static void emitSincosBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static CanQualType getOSLogArgType(ASTContext &C, int Size)
Get the argument type for arguments to os_log_helper.
static Value * EmitOverflowCheckedAbs(CodeGenFunction &CGF, const CallExpr *E, bool SanitizeOverflow)
static llvm::Value * EmitBitCountExpr(CodeGenFunction &CGF, const Expr *E)
static Value * tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID, Value *V)
static bool areBOSTypesCompatible(int From, int To)
Checks if using the result of __builtin_object_size(p, From) in place of __builtin_object_size(p,...
static std::pair< llvm::Value *, llvm::Value * > GetCountFieldAndIndex(CodeGenFunction &CGF, const MemberExpr *ME, const FieldDecl *ArrayFD, const FieldDecl *CountFD, const Expr *Idx, llvm::IntegerType *ResType, bool IsSigned)
Value * EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, QualType T, llvm::Type *ResultType)
static bool TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty, llvm::SmallPtrSetImpl< const Decl * > &Seen)
Value * MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E, bool ReturnBool, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering)
Utility to insert an atomic cmpxchg instruction.
static Value * EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)
static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind, const CallExpr *E)
MSVC handles setjmp a bit differently on different platforms.
#define MUTATE_LDBL(func)
static Value * emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty)
Determine if the specified type requires laundering by checking if it is a dynamic class type or cont...
static Value * EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E)
static Value * EmitTargetArchBuiltinExpr(CodeGenFunction *CGF, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
Definition CGBuiltin.cpp:72
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, Instruction::BinaryOps Op, bool Invert=false)
Utility to insert an atomic instruction based Intrinsic::ID and the expression node,...
static bool HasNoIndirectArgumentsOrResults(CGFunctionInfo const &FnInfo)
Checks no arguments or results are passed indirectly in the ABI (i.e.
Value * EmitToInt(CodeGenFunction &CGF, llvm::Value *V, QualType T, llvm::IntegerType *IntType)
Emit the conversions required to turn the given value into an integer of the given size.
static Value * emitBinaryExpMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID, Intrinsic::ID ConstrainedIntrinsicID)
static llvm::Value * EmitBitTestIntrinsic(CodeGenFunction &CGF, unsigned BuiltinID, const CallExpr *E)
Emit a _bittest* intrinsic.
static Value * EmitSignBit(CodeGenFunction &CGF, Value *V)
Emit the computation of the sign bit for a floating point value.
static Value * EmitFAbs(CodeGenFunction &CGF, Value *V)
EmitFAbs - Emit a call to @llvm.fabs().
static llvm::Value * EmitPositiveResultOrZero(CodeGenFunction &CGF, llvm::Value *Res, llvm::Value *Index, llvm::IntegerType *ResType, bool IsSigned)
static bool shouldEmitBuiltinAsIR(unsigned BuiltinID, const Builtin::Context &BI, const CodeGenFunction &CGF)
Some builtins do not have library implementation on some targets and are instead emitted as LLVM IRs ...
Definition CGBuiltin.cpp:48
static bool isSpecialUnsignedMultiplySignedResult(unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, WidthAndSignedness ResultInfo)
static llvm::Value * getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType)
static Value * emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static RValue EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, const clang::Expr *Op2, WidthAndSignedness Op2Info, const clang::Expr *ResultArg, QualType ResultQTy, WidthAndSignedness ResultInfo)
Emit a checked mixed-sign multiply.
static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID)
static RValue EmitBinaryAtomic(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E)
static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size, Align AlignmentInBytes)
static Value * EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering SuccessOrdering=AtomicOrdering::SequentiallyConsistent)
This function should be invoked to emit atomic cmpxchg for Microsoft's _InterlockedCompareExchange* i...
static bool isSpecialMixedSignMultiply(unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, WidthAndSignedness ResultInfo)
Determine if a binop is a checked mixed-sign multiply we can specialize.
static Value * emitFrexpBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static llvm::Value * emitModfBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static Value * EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E)
static const FieldDecl * FindFlexibleArrayMemberField(CodeGenFunction &CGF, ASTContext &Ctx, const RecordDecl *RD)
Find a struct's flexible array member.
static Value * EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E)
static RValue EmitHipStdParUnsupportedBuiltin(CodeGenFunction *CGF, const FunctionDecl *FD)
static llvm::Value * EmitX86BitTestIntrinsic(CodeGenFunction &CGF, BitTest BT, const CallExpr *E, Value *BitBase, Value *BitPos)
static RValue EmitCheckedUnsignedMultiplySignedResult(CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, const clang::Expr *Op2, WidthAndSignedness Op2Info, const clang::Expr *ResultArg, QualType ResultQTy, WidthAndSignedness ResultInfo)
Address CheckAtomicAlignment(CodeGenFunction &CGF, const CallExpr *E)
static Value * EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E)
static llvm::AtomicOrdering getBitTestAtomicOrdering(BitTest::InterlockingKind I)
static bool GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FD, int64_t &Offset)
Calculate the offset of a struct field.
Value * MakeBinaryAtomicValue(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, AtomicOrdering Ordering)
Utility to insert an atomic instruction based on Intrinsic::ID and the expression node.
llvm::Value * EmitOverflowIntrinsic(CodeGenFunction &CGF, const Intrinsic::ID IntrinsicID, llvm::Value *X, llvm::Value *Y, llvm::Value *&Carry)
Emit a call to llvm.
static Value * EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW)
static Value * EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)
llvm::Value * emitBuiltinWithOneOverloadedType(clang::CodeGen::CodeGenFunction &CGF, const clang::CallExpr *E, unsigned IntrinsicID, llvm::StringRef Name="")
Definition CGBuiltin.h:63
static mlir::Value emitBinaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitUnaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitLibraryCall(CIRGenFunction &cgf, const FunctionDecl *fd, const CallExpr *e, mlir::Operation *calleeValue)
static WidthAndSignedness getIntegerWidthAndSignedness(const clang::ASTContext &astContext, const clang::QualType type)
static struct WidthAndSignedness EncompassingIntegerType(ArrayRef< struct WidthAndSignedness > types)
TokenType getType() const
Returns the token's type, e.g.
FormatToken * Next
The next token in the unwrapped line.
#define X(type, name)
Definition Value.h:97
static unsigned getCharWidth(tok::TokenKind kind, const TargetInfo &Target)
llvm::MachO::Record Record
Definition MachO.h:31
static StringRef getTriple(const Command &Job)
SanitizerHandler
static QualType getPointeeType(const MemRegion *R)
__DEVICE__ float modf(float __x, float *__iptr)
__DEVICE__ double nan(const char *)
APSInt & getInt()
Definition APValue.h:508
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType VoidPtrTy
IdentifierTable & Idents
Definition ASTContext.h:798
Builtin::Context & BuiltinInfo
Definition ASTContext.h:800
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, const Expr *SizeExpr, ArraySizeModifier ASM, unsigned IndexTypeQuals) const
Return the unique reference to the type for a constant array of the specified element type.
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CanQualType VoidTy
QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error, unsigned *IntegerConstantArgs=nullptr) const
Return the type for the specified builtin.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:917
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
@ GE_None
No error.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
QualType getElementType() const
Definition TypeBase.h:3784
static std::unique_ptr< AtomicScopeModel > create(AtomicScopeModelKind K)
Create an atomic scope model by AtomicScopeModelKind.
Definition SyncScope.h:298
static bool isCommaOp(Opcode Opc)
Definition Expr.h:4144
Expr * getRHS() const
Definition Expr.h:4093
Holds information about both target-independent and target-specific builtins, allowing easy queries b...
Definition Builtins.h:235
bool shouldGenerateFPMathIntrinsic(unsigned BuiltinID, llvm::Triple Trip, std::optional< bool > ErrnoOverwritten, bool MathErrnoEnabled, bool HasOptNoneAttr, bool IsOptimizationEnabled) const
Determine whether we can generate LLVM intrinsics for the given builtin ID, based on whether it has s...
Definition Builtins.cpp:225
bool isConstWithoutErrnoAndExceptions(unsigned ID) const
Return true if this function has no side effects and doesn't read memory, except for possibly errno o...
Definition Builtins.h:412
std::string getName(unsigned ID) const
Return the identifier name for the specified builtin, e.g.
Definition Builtins.cpp:80
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3150
bool hasStoredFPFeatures() const
Definition Expr.h:3105
SourceLocation getBeginLoc() const
Definition Expr.h:3280
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3129
Expr * getCallee()
Definition Expr.h:3093
FPOptionsOverride getFPFeatures() const
Definition Expr.h:3245
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3137
arg_range arguments()
Definition Expr.h:3198
CastKind getCastKind() const
Definition Expr.h:3723
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
Address withAlignment(CharUnits NewAlignment) const
Return address with different alignment, but same pointer and element type.
Definition Address.h:269
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition CGBuilder.h:146
llvm::StoreInst * CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr, CharUnits Align, bool IsVolatile=false)
Definition CGBuilder.h:153
llvm::AtomicRMWInst * CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Ordering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition CGBuilder.h:190
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition CGBuilder.h:408
llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(Address Addr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition CGBuilder.h:179
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:118
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition CGBuilder.h:138
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition CGBuilder.h:356
All available information about a concrete callee.
Definition CGCall.h:63
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition CGCall.h:137
llvm::DILocation * CreateTrapFailureMessageFor(llvm::DebugLoc TrapLocation, StringRef Category, StringRef FailureMsg)
Create a debug location from TrapLocation that adds an artificial inline frame where the frame name i...
CGFunctionInfo - Class to encapsulate the information about a function definition.
MutableArrayRef< ArgInfo > arguments()
llvm::Value * getPipeElemAlign(const Expr *PipeArg)
llvm::Value * getPipeElemSize(const Expr *PipeArg)
llvm::StructType * getLLVMType() const
Return the "complete object" LLVM type associated with this record.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition CGCall.h:274
void add(RValue rvalue, QualType type)
Definition CGCall.h:302
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E)
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Definition CGClass.cpp:2812
RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E)
RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID)
llvm::Value * performAddrSpaceCast(llvm::Value *Src, llvm::Type *DestTy)
llvm::Value * EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, const CallExpr *E)
SanitizerSet SanOpts
Sanitizers enabled for this function.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
llvm::Value * GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Definition CGExpr.cpp:1198
llvm::Type * ConvertType(QualType T)
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
BuiltinCheckKind
Specifies which type of sanitizer check to apply when handling a particular builtin.
llvm::Value * EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition SystemZ.cpp:39
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
Definition CGCall.cpp:5203
llvm::Value * EmitSEHAbnormalTermination()
RValue emitStdcFirstBit(const CallExpr *E, llvm::Intrinsic::ID IntID, bool InvertArg)
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
Produce the code to do a retain.
Definition CGObjC.cpp:2360
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
llvm::Value * EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart)
Emits a call to an LLVM variable-argument intrinsic, either llvm.va_start or llvm....
RValue emitStdcBitWidthMinus(const CallExpr *E, llvm::Intrinsic::ID IntID, bool IsPop)
llvm::Value * EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition AMDGPU.cpp:521
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:4035
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:7205
RValue emitBuiltinOSLogFormat(const CallExpr &E)
Emit IR for __builtin_os_log_format.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
llvm::Function * generateBuiltinOSLogHelperFunction(const analyze_os_log::OSLogBufferLayout &Layout, CharUnits BufferAlignment)
const LangOptions & getLangOpts() const
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
llvm::Value * EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
Definition RISCV.cpp:1073
llvm::Value * EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind)
Emits an argument for a call to a builtin.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3925
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
Definition CGCall.cpp:4699
const TargetInfo & getTarget() const
RValue emitRotate(const CallExpr *E, bool IsRotateRight)
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location, const AnnotateAttr *Attr)
Emit an annotation call (intrinsic).
llvm::Value * EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
Definition ARM.cpp:2157
CGCallee EmitCallee(const Expr *E)
Definition CGExpr.cpp:6566
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression, potentially with an active ...
llvm::Value * EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition ARM.cpp:7196
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:237
llvm::Value * EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition PPC.cpp:205
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:4183
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
Definition CGDecl.cpp:1356
llvm::MDNode * buildAllocToken(QualType AllocType)
Build metadata used by the AllocToken instrumentation.
Definition CGExpr.cpp:1321
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2242
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition CGCall.cpp:5359
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp)
Emit IR for __builtin_align_up/__builtin_align_down.
void EmitLifetimeEnd(llvm::Value *Addr)
Definition CGDecl.cpp:1368
llvm::Value * EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
llvm::Value * EmitDirectXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition DirectX.cpp:22
llvm::Value * EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm::Triple::ArchType Arch)
Definition ARM.cpp:4488
llvm::Value * EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4618
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
Definition CGDecl.cpp:2353
llvm::Value * EmitSPIRVBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition SPIR.cpp:22
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
Address EmitVAListRef(const Expr *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
Definition CGExpr.cpp:1613
RValue EmitBuiltinIsAligned(const CallExpr *E)
Emit IR for __builtin_is_aligned.
RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, const CallExpr *TheCallExpr, bool IsDelete)
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition Hexagon.cpp:77
CodeGenTypes & getTypes() const
RValue emitStdcCountIntrinsic(const CallExpr *E, llvm::Intrinsic::ID IntID, bool InvertArg, bool IsPop=false)
llvm::Value * EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition X86.cpp:793
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1596
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:194
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
Definition CGExpr.cpp:4603
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4525
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2276
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
Definition CGExpr.cpp:1252
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
llvm::Value * EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition NVPTX.cpp:428
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
Definition CGExpr.cpp:4513
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1712
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
EmitTargetBuiltinExpr - Emit the given builtin call.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
llvm::Value * EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Given a number of pointers, inform the optimizer that they're being intrinsically used up until this ...
Definition CGObjC.cpp:2199
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:643
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
llvm::Constant * getBuiltinLibFunction(const FunctionDecl *FD, unsigned BuiltinID)
Given a builtin id for a function like "__builtin_fabsf", return a Function* for "fabsf".
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
const llvm::Triple & getTriple() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition CGCall.cpp:1805
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
LValue - This represents an lvalue references.
Definition CGValue.h:183
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition CGValue.h:373
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
static RValue getIgnored()
Definition CGValue.h:94
static RValue get(llvm::Value *V)
Definition CGValue.h:99
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition CGValue.h:126
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition CGValue.h:109
An abstract representation of an aligned address.
Definition Address.h:42
static RawAddress invalid()
Definition Address.h:61
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition CGCall.h:379
virtual bool supportsLibCall() const
supportsLibCall - Query to whether or not target supports all lib calls.
Definition TargetInfo.h:78
virtual llvm::Value * encodeReturnAddress(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Performs the code-generation required to convert the address of an instruction into a return address ...
Definition TargetInfo.h:176
virtual llvm::Value * decodeReturnAddress(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Performs the code-generation required to convert a return address as stored by the system into the ac...
Definition TargetInfo.h:166
virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const
Determines the DWARF register number for the stack pointer, for exception-handling purposes.
Definition TargetInfo.h:148
virtual llvm::Value * testFPKind(llvm::Value *V, unsigned BuiltinID, CGBuilderTy &Builder, CodeGenModule &CGM) const
Performs a target specific test of a floating point value for things like IsNaN, Infinity,...
Definition TargetInfo.h:185
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3325
Represents a concrete matrix type with constant number of rows and columns.
Definition TypeBase.h:4437
Represents a sugar type with __counted_by or __sized_by annotations, including their _or_null variant...
Definition TypeBase.h:3486
DynamicCountPointerKind getKind() const
Definition TypeBase.h:3516
static bool isFlexibleArrayMemberLike(const ASTContext &Context, const Decl *D, QualType Ty, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, bool IgnoreTemplateOrMacroSubstitution)
Whether it resembles a flexible array member.
Definition DeclBase.cpp:460
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition DeclBase.h:601
FunctionDecl * getAsFunction() LLVM_READONLY
Returns the function itself, or the templated function if this is a function template.
Definition DeclBase.cpp:273
bool hasAttr() const
Definition DeclBase.h:585
Concrete class used by the front-end to report problems and issues.
Definition Diagnostic.h:233
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3117
Expr * IgnoreParenCasts() LLVM_READONLY
Skip past any parentheses and casts which might surround this expression until reaching a fixed point...
Definition Expr.cpp:3095
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3090
bool EvaluateAsFloat(llvm::APFloat &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsFloat - Return true if this is a constant which we can fold and convert to a floating point...
bool isPRValue() const
Definition Expr.h:285
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
Definition Expr.h:838
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3688
std::optional< std::string > tryEvaluateString(ASTContext &Ctx) const
If the current Expr can be evaluated to a pointer to a null-terminated constant string,...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3070
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Definition Expr.cpp:4068
std::optional< uint64_t > tryEvaluateObjectSize(const ASTContext &Ctx, unsigned Type) const
If the current Expr is a pointer, this will try to statically determine the number of bytes available...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
const ValueDecl * getAsBuiltinConstantDeclRef(const ASTContext &Context) const
If this expression is an unambiguous reference to a single declaration, in the style of __builtin_fun...
Definition Expr.cpp:226
Represents difference between two FPOptions values.
LangOptions::FPExceptionModeKind getExceptionMode() const
Represents a member of a struct/union/class.
Definition Decl.h:3175
const FieldDecl * findCountedByField() const
Find the FieldDecl specified in a FAM's "counted_by" attribute.
Definition Decl.cpp:4857
Represents a function declaration or definition.
Definition Decl.h:2015
const ParmVarDecl * getParamDecl(unsigned i) const
Definition Decl.h:2812
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition Decl.cpp:3764
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5357
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
IdentifierInfo & get(StringRef Name)
Return the identifier token info for the specified named identifier.
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition Decl.cpp:5604
@ FPE_Ignore
Assume that floating-point exceptions are masked.
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3367
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3450
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
std::string getNameAsString() const
Get a human-readable name for the declaration, even if it is one of the special kinds of names (C++ c...
Definition Decl.h:317
const Expr * getSubExpr() const
Definition Expr.h:2202
PipeType - OpenCL20.
Definition TypeBase.h:8249
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3378
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8515
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8557
Represents a struct/union/class.
Definition Decl.h:4342
field_range fields() const
Definition Decl.h:4545
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
Encodes a location in the source.
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:355
bool isUnion() const
Definition Decl.h:3943
Exposes information about the current target.
Definition TargetInfo.h:227
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool isBigEndian() const
virtual bool checkArithmeticFenceSupported() const
Controls if __arithmetic_fence is supported in the targeted backend.
unsigned getSuitableAlign() const
Return the alignment that is the largest alignment ever used for any scalar/SIMD data type on the tar...
Definition TargetInfo.h:748
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
The base class of the type hierarchy.
Definition TypeBase.h:1866
bool isBlockPointerType() const
Definition TypeBase.h:8688
bool isVoidType() const
Definition TypeBase.h:9034
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2231
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isArrayType() const
Definition TypeBase.h:8767
bool isCountAttributedType() const
Definition Type.cpp:743
bool isPointerType() const
Definition TypeBase.h:8668
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:9078
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9328
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1923
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:754
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9261
Expr * getSubExpr() const
Definition Expr.h:2288
QualType getType() const
Definition Decl.h:723
QualType getType() const
Definition Value.cpp:237
Represents a GCC generic vector type.
Definition TypeBase.h:4225
QualType getElementType() const
Definition TypeBase.h:4239
SmallVector< OSLogBufferItem, 4 > Items
Definition OSLog.h:113
unsigned char getNumArgsByte() const
Definition OSLog.h:148
unsigned char getSummaryByte() const
Definition OSLog.h:139
Defines the clang::TargetInfo interface.
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
Definition CGValue.h:155
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:146
llvm::Constant * initializationPatternFor(CodeGenModule &, llvm::Type *)
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
constexpr XRayInstrMask Typed
Definition XRayInstr.h:42
constexpr XRayInstrMask Custom
Definition XRayInstr.h:41
bool computeOSLogBufferLayout(clang::ASTContext &Ctx, const clang::CallExpr *E, OSLogBufferLayout &layout)
Definition OSLog.cpp:192
bool Mul(InterpState &S, CodePtr OpPC)
Definition Interp.h:453
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ Success
Annotation was successful.
Definition Parser.h:65
@ Vector
'vector' clause, allowed on 'loop', Combined, and 'routine' directives.
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Expr * Cond
};
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
SyncScope
Defines sync scope values used internally by clang.
Definition SyncScope.h:42
llvm::StringRef getAsString(SyncScope S)
Definition SyncScope.h:62
U cast(CodeGen::Address addr)
Definition Address.h:327
@ Other
Other implicit parameter.
Definition Decl.h:1761
long int64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650
void clear(SanitizerMask K=SanitizerKind::All)
Disable the sanitizers specified in K.
Definition Sanitizers.h:195
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition Sanitizers.h:187
#define sinh(__x)
Definition tgmath.h:373
#define asin(__x)
Definition tgmath.h:112
#define scalbln(__x, __y)
Definition tgmath.h:1182
#define sqrt(__x)
Definition tgmath.h:520
#define acos(__x)
Definition tgmath.h:83
#define fmin(__x, __y)
Definition tgmath.h:780
#define exp(__x)
Definition tgmath.h:431
#define ilogb(__x)
Definition tgmath.h:851
#define copysign(__x, __y)
Definition tgmath.h:618
#define erf(__x)
Definition tgmath.h:636
#define atanh(__x)
Definition tgmath.h:228
#define remquo(__x, __y, __z)
Definition tgmath.h:1111
#define nextafter(__x, __y)
Definition tgmath.h:1055
#define frexp(__x, __y)
Definition tgmath.h:816
#define asinh(__x)
Definition tgmath.h:199
#define erfc(__x)
Definition tgmath.h:653
#define atan2(__x, __y)
Definition tgmath.h:566
#define nexttoward(__x, __y)
Definition tgmath.h:1073
#define hypot(__x, __y)
Definition tgmath.h:833
#define exp2(__x)
Definition tgmath.h:670
#define sin(__x)
Definition tgmath.h:286
#define cbrt(__x)
Definition tgmath.h:584
#define log2(__x)
Definition tgmath.h:970
#define llround(__x)
Definition tgmath.h:919
#define cosh(__x)
Definition tgmath.h:344
#define trunc(__x)
Definition tgmath.h:1216
#define fmax(__x, __y)
Definition tgmath.h:762
#define ldexp(__x, __y)
Definition tgmath.h:868
#define acosh(__x)
Definition tgmath.h:170
#define tgamma(__x)
Definition tgmath.h:1199
#define scalbn(__x, __y)
Definition tgmath.h:1165
#define round(__x)
Definition tgmath.h:1148
#define fmod(__x, __y)
Definition tgmath.h:798
#define llrint(__x)
Definition tgmath.h:902
#define tan(__x)
Definition tgmath.h:315
#define cos(__x)
Definition tgmath.h:257
#define log10(__x)
Definition tgmath.h:936
#define fabs(__x)
Definition tgmath.h:549
#define pow(__x, __y)
Definition tgmath.h:490
#define log1p(__x)
Definition tgmath.h:953
#define rint(__x)
Definition tgmath.h:1131
#define expm1(__x)
Definition tgmath.h:687
#define remainder(__x, __y)
Definition tgmath.h:1090
#define fdim(__x, __y)
Definition tgmath.h:704
#define lgamma(__x)
Definition tgmath.h:885
#define tanh(__x)
Definition tgmath.h:402
#define lrint(__x)
Definition tgmath.h:1004
#define atan(__x)
Definition tgmath.h:141
#define floor(__x)
Definition tgmath.h:722
#define ceil(__x)
Definition tgmath.h:601
#define log(__x)
Definition tgmath.h:460
#define logb(__x)
Definition tgmath.h:987
#define nearbyint(__x)
Definition tgmath.h:1038
#define lround(__x)
Definition tgmath.h:1021
#define fma(__x, __y, __z)
Definition tgmath.h:742