clang 23.0.0git
CGBuiltin.cpp
Go to the documentation of this file.
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGBuiltin.h"
14#include "ABIInfo.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGDebugInfo.h"
18#include "CGObjCRuntime.h"
19#include "CGOpenCLRuntime.h"
20#include "CGRecordLayout.h"
21#include "CGValue.h"
22#include "CodeGenFunction.h"
23#include "CodeGenModule.h"
24#include "ConstantEmitter.h"
25#include "PatternInit.h"
26#include "TargetInfo.h"
27#include "clang/AST/OSLog.h"
31#include "llvm/IR/InlineAsm.h"
32#include "llvm/IR/Instruction.h"
33#include "llvm/IR/Intrinsics.h"
34#include "llvm/IR/IntrinsicsX86.h"
35#include "llvm/IR/MatrixBuilder.h"
36#include "llvm/Support/ConvertUTF.h"
37#include "llvm/Support/ScopedPrinter.h"
38#include <optional>
39#include <utility>
40
41using namespace clang;
42using namespace CodeGen;
43using namespace llvm;
44
45/// Some builtins do not have library implementation on some targets and
46/// are instead emitted as LLVM IRs by some target builtin emitters.
47/// FIXME: Remove this when library support is added
48static bool shouldEmitBuiltinAsIR(unsigned BuiltinID,
49 const Builtin::Context &BI,
50 const CodeGenFunction &CGF) {
51 if (!CGF.CGM.getLangOpts().MathErrno &&
55 switch (BuiltinID) {
56 default:
57 return false;
58 case Builtin::BIlogbf:
59 case Builtin::BI__builtin_logbf:
60 case Builtin::BIlogb:
61 case Builtin::BI__builtin_logb:
62 case Builtin::BIscalbnf:
63 case Builtin::BI__builtin_scalbnf:
64 case Builtin::BIscalbn:
65 case Builtin::BI__builtin_scalbn:
66 return true;
67 }
68 }
69 return false;
70}
71
73 unsigned BuiltinID, const CallExpr *E,
74 ReturnValueSlot ReturnValue,
75 llvm::Triple::ArchType Arch) {
76 // When compiling in HipStdPar mode we have to be conservative in rejecting
77 // target specific features in the FE, and defer the possible error to the
78 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
79 // referenced by an accelerator executable function, we emit an error.
80 // Returning nullptr here leads to the builtin being handled in
81 // EmitStdParUnsupportedBuiltin.
82 if (CGF->getLangOpts().HIPStdPar && CGF->getLangOpts().CUDAIsDevice &&
83 Arch != CGF->getTarget().getTriple().getArch())
84 return nullptr;
85
86 switch (Arch) {
87 case llvm::Triple::arm:
88 case llvm::Triple::armeb:
89 case llvm::Triple::thumb:
90 case llvm::Triple::thumbeb:
91 return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch);
92 case llvm::Triple::aarch64:
93 case llvm::Triple::aarch64_32:
94 case llvm::Triple::aarch64_be:
95 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
96 case llvm::Triple::bpfeb:
97 case llvm::Triple::bpfel:
98 return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
99 case llvm::Triple::dxil:
100 return CGF->EmitDirectXBuiltinExpr(BuiltinID, E);
101 case llvm::Triple::x86:
102 case llvm::Triple::x86_64:
103 return CGF->EmitX86BuiltinExpr(BuiltinID, E);
104 case llvm::Triple::ppc:
105 case llvm::Triple::ppcle:
106 case llvm::Triple::ppc64:
107 case llvm::Triple::ppc64le:
108 return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
109 case llvm::Triple::r600:
110 case llvm::Triple::amdgcn:
111 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
112 case llvm::Triple::systemz:
113 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
114 case llvm::Triple::nvptx:
115 case llvm::Triple::nvptx64:
116 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
117 case llvm::Triple::wasm32:
118 case llvm::Triple::wasm64:
119 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
120 case llvm::Triple::hexagon:
121 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
122 case llvm::Triple::riscv32:
123 case llvm::Triple::riscv64:
124 case llvm::Triple::riscv32be:
125 case llvm::Triple::riscv64be:
126 return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue);
127 case llvm::Triple::spirv32:
128 case llvm::Triple::spirv64:
129 if (CGF->getTarget().getTriple().getOS() == llvm::Triple::OSType::AMDHSA)
130 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
131 [[fallthrough]];
132 case llvm::Triple::spirv:
133 return CGF->EmitSPIRVBuiltinExpr(BuiltinID, E);
134 default:
135 return nullptr;
136 }
137}
138
140 const CallExpr *E,
142 if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
143 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
145 this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
146 ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
147 }
148
149 return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue,
150 getTarget().getTriple().getArch());
151}
152
153static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
154 Align AlignmentInBytes) {
155 ConstantInt *Byte;
156 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
158 // Nothing to initialize.
159 return;
161 Byte = CGF.Builder.getInt8(0x00);
162 break;
164 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
165 Byte = llvm::dyn_cast<llvm::ConstantInt>(
166 initializationPatternFor(CGF.CGM, Int8));
167 break;
168 }
169 }
170 if (CGF.CGM.stopAutoInit())
171 return;
172 auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
173 I->addAnnotationMetadata("auto-init");
174}
175
176/// getBuiltinLibFunction - Given a builtin id for a function like
177/// "__builtin_fabsf", return a Function* for "fabsf".
179 unsigned BuiltinID) {
180 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
181
182 // Get the name, skip over the __builtin_ prefix (if necessary). We may have
183 // to build this up so provide a small stack buffer to handle the vast
184 // majority of names.
186 GlobalDecl D(FD);
187
188 // TODO: This list should be expanded or refactored after all GCC-compatible
189 // std libcall builtins are implemented.
190 static const SmallDenseMap<unsigned, StringRef, 64> F128Builtins{
191 {Builtin::BI__builtin___fprintf_chk, "__fprintf_chkieee128"},
192 {Builtin::BI__builtin___printf_chk, "__printf_chkieee128"},
193 {Builtin::BI__builtin___snprintf_chk, "__snprintf_chkieee128"},
194 {Builtin::BI__builtin___sprintf_chk, "__sprintf_chkieee128"},
195 {Builtin::BI__builtin___vfprintf_chk, "__vfprintf_chkieee128"},
196 {Builtin::BI__builtin___vprintf_chk, "__vprintf_chkieee128"},
197 {Builtin::BI__builtin___vsnprintf_chk, "__vsnprintf_chkieee128"},
198 {Builtin::BI__builtin___vsprintf_chk, "__vsprintf_chkieee128"},
199 {Builtin::BI__builtin_fprintf, "__fprintfieee128"},
200 {Builtin::BI__builtin_printf, "__printfieee128"},
201 {Builtin::BI__builtin_snprintf, "__snprintfieee128"},
202 {Builtin::BI__builtin_sprintf, "__sprintfieee128"},
203 {Builtin::BI__builtin_vfprintf, "__vfprintfieee128"},
204 {Builtin::BI__builtin_vprintf, "__vprintfieee128"},
205 {Builtin::BI__builtin_vsnprintf, "__vsnprintfieee128"},
206 {Builtin::BI__builtin_vsprintf, "__vsprintfieee128"},
207 {Builtin::BI__builtin_fscanf, "__fscanfieee128"},
208 {Builtin::BI__builtin_scanf, "__scanfieee128"},
209 {Builtin::BI__builtin_sscanf, "__sscanfieee128"},
210 {Builtin::BI__builtin_vfscanf, "__vfscanfieee128"},
211 {Builtin::BI__builtin_vscanf, "__vscanfieee128"},
212 {Builtin::BI__builtin_vsscanf, "__vsscanfieee128"},
213 {Builtin::BI__builtin_nexttowardf128, "__nexttowardieee128"},
214 };
215
216 // The AIX library functions frexpl, ldexpl, and modfl are for 128-bit
217 // IBM 'long double' (i.e. __ibm128). Map to the 'double' versions
218 // if it is 64-bit 'long double' mode.
219 static const SmallDenseMap<unsigned, StringRef, 4> AIXLongDouble64Builtins{
220 {Builtin::BI__builtin_frexpl, "frexp"},
221 {Builtin::BI__builtin_ldexpl, "ldexp"},
222 {Builtin::BI__builtin_modfl, "modf"},
223 };
224
225 // If the builtin has been declared explicitly with an assembler label,
226 // use the mangled name. This differs from the plain label on platforms
227 // that prefix labels.
228 if (FD->hasAttr<AsmLabelAttr>())
229 Name = getMangledName(D);
230 else {
231 // TODO: This mutation should also be applied to other targets other than
232 // PPC, after backend supports IEEE 128-bit style libcalls.
233 if (getTriple().isPPC64() &&
234 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad() &&
235 F128Builtins.contains(BuiltinID))
236 Name = F128Builtins.lookup(BuiltinID);
237 else if (getTriple().isOSAIX() &&
238 &getTarget().getLongDoubleFormat() ==
239 &llvm::APFloat::IEEEdouble() &&
240 AIXLongDouble64Builtins.contains(BuiltinID))
241 Name = AIXLongDouble64Builtins.lookup(BuiltinID);
242 else
243 Name = Context.BuiltinInfo.getName(BuiltinID).substr(10);
244 }
245
246 llvm::FunctionType *Ty =
247 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
248
249 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
250}
251
252/// Emit the conversions required to turn the given value into an
253/// integer of the given size.
254Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
255 QualType T, llvm::IntegerType *IntType) {
256 V = CGF.EmitToMemory(V, T);
257
258 if (V->getType()->isPointerTy())
259 return CGF.Builder.CreatePtrToInt(V, IntType);
260
261 assert(V->getType() == IntType);
262 return V;
263}
264
265Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
266 QualType T, llvm::Type *ResultType) {
267 V = CGF.EmitFromMemory(V, T);
268
269 if (ResultType->isPointerTy())
270 return CGF.Builder.CreateIntToPtr(V, ResultType);
271
272 assert(V->getType() == ResultType);
273 return V;
274}
275
277 ASTContext &Ctx = CGF.getContext();
278 Address Ptr = CGF.EmitPointerWithAlignment(E->getArg(0));
279 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
280 unsigned Bytes = Ptr.getElementType()->isPointerTy()
282 : DL.getTypeStoreSize(Ptr.getElementType());
283 unsigned Align = Ptr.getAlignment().getQuantity();
284 if (Align % Bytes != 0) {
285 DiagnosticsEngine &Diags = CGF.CGM.getDiags();
286 Diags.Report(E->getBeginLoc(), diag::warn_sync_op_misaligned);
287 // Force address to be at least naturally-aligned.
288 return Ptr.withAlignment(CharUnits::fromQuantity(Bytes));
289 }
290 return Ptr;
291}
292
293/// Utility to insert an atomic instruction based on Intrinsic::ID
294/// and the expression node.
296 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
297 AtomicOrdering Ordering) {
298
299 QualType T = E->getType();
300 assert(E->getArg(0)->getType()->isPointerType());
301 assert(CGF.getContext().hasSameUnqualifiedType(T,
302 E->getArg(0)->getType()->getPointeeType()));
303 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
304
305 Address DestAddr = CheckAtomicAlignment(CGF, E);
306
307 llvm::IntegerType *IntType = llvm::IntegerType::get(
308 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
309
310 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1));
311 llvm::Type *ValueType = Val->getType();
312 Val = EmitToInt(CGF, Val, T, IntType);
313
314 llvm::Value *Result =
315 CGF.Builder.CreateAtomicRMW(Kind, DestAddr, Val, Ordering);
316 return EmitFromInt(CGF, Result, T, ValueType);
317}
318
320 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
322
323 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
324 LValue LV = CGF.MakeAddrLValue(Addr, E->getArg(0)->getType());
325 LV.setNontemporal(true);
326 CGF.EmitStoreOfScalar(Val, LV, false);
327 return nullptr;
328}
329
332
333 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
334 LV.setNontemporal(true);
335 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
336}
337
339 llvm::AtomicRMWInst::BinOp Kind,
340 const CallExpr *E) {
341 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
342}
343
344/// Utility to insert an atomic instruction based Intrinsic::ID and
345/// the expression node, where the return value is the result of the
346/// operation.
348 llvm::AtomicRMWInst::BinOp Kind,
349 const CallExpr *E,
350 Instruction::BinaryOps Op,
351 bool Invert = false) {
352 QualType T = E->getType();
353 assert(E->getArg(0)->getType()->isPointerType());
354 assert(CGF.getContext().hasSameUnqualifiedType(T,
355 E->getArg(0)->getType()->getPointeeType()));
356 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
357
358 Address DestAddr = CheckAtomicAlignment(CGF, E);
359
360 llvm::IntegerType *IntType = llvm::IntegerType::get(
361 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
362
363 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1));
364 llvm::Type *ValueType = Val->getType();
365 Val = EmitToInt(CGF, Val, T, IntType);
366
367 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
368 Kind, DestAddr, Val, llvm::AtomicOrdering::SequentiallyConsistent);
369 Result = CGF.Builder.CreateBinOp(Op, Result, Val);
370 if (Invert)
371 Result =
372 CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
373 llvm::ConstantInt::getAllOnesValue(IntType));
374 Result = EmitFromInt(CGF, Result, T, ValueType);
375 return RValue::get(Result);
376}
377
378/// Utility to insert an atomic cmpxchg instruction.
379///
380/// @param CGF The current codegen function.
381/// @param E Builtin call expression to convert to cmpxchg.
382/// arg0 - address to operate on
383/// arg1 - value to compare with
384/// arg2 - new value
385/// @param ReturnBool Specifies whether to return success flag of
386/// cmpxchg result or the old value.
387///
388/// @returns result of cmpxchg, according to ReturnBool
389///
390/// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
391/// invoke the function EmitAtomicCmpXchgForMSIntrin.
393 bool ReturnBool,
394 llvm::AtomicOrdering SuccessOrdering,
395 llvm::AtomicOrdering FailureOrdering) {
396 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
397 Address DestAddr = CheckAtomicAlignment(CGF, E);
398
399 llvm::IntegerType *IntType = llvm::IntegerType::get(
400 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
401
402 Value *Cmp = CGF.EmitScalarExpr(E->getArg(1));
403 llvm::Type *ValueType = Cmp->getType();
404 Cmp = EmitToInt(CGF, Cmp, T, IntType);
405 Value *New = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
406
408 DestAddr, Cmp, New, SuccessOrdering, FailureOrdering);
409 if (ReturnBool)
410 // Extract boolean success flag and zext it to int.
411 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
412 CGF.ConvertType(E->getType()));
413 else
414 // Extract old value and emit it using the same type as compare value.
415 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
416 ValueType);
417}
418
419/// This function should be invoked to emit atomic cmpxchg for Microsoft's
420/// _InterlockedCompareExchange* intrinsics which have the following signature:
421/// T _InterlockedCompareExchange(T volatile *Destination,
422/// T Exchange,
423/// T Comparand);
424///
425/// Whereas the llvm 'cmpxchg' instruction has the following syntax:
426/// cmpxchg *Destination, Comparand, Exchange.
427/// So we need to swap Comparand and Exchange when invoking
428/// CreateAtomicCmpXchg. That is the reason we could not use the above utility
429/// function MakeAtomicCmpXchgValue since it expects the arguments to be
430/// already swapped.
431
432static
434 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
435 assert(E->getArg(0)->getType()->isPointerType());
437 E->getType(), E->getArg(0)->getType()->getPointeeType()));
438 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
439 E->getArg(1)->getType()));
440 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
441 E->getArg(2)->getType()));
442
443 Address DestAddr = CheckAtomicAlignment(CGF, E);
444
445 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
446 auto *RTy = Exchange->getType();
447
448 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
449
450 if (RTy->isPointerTy()) {
451 Exchange = CGF.Builder.CreatePtrToInt(Exchange, CGF.IntPtrTy);
452 Comparand = CGF.Builder.CreatePtrToInt(Comparand, CGF.IntPtrTy);
453 }
454
455 // For Release ordering, the failure ordering should be Monotonic.
456 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
457 AtomicOrdering::Monotonic :
458 SuccessOrdering;
459
460 // The atomic instruction is marked volatile for consistency with MSVC. This
461 // blocks the few atomics optimizations that LLVM has. If we want to optimize
462 // _Interlocked* operations in the future, we will have to remove the volatile
463 // marker.
464 auto *CmpXchg = CGF.Builder.CreateAtomicCmpXchg(
465 DestAddr, Comparand, Exchange, SuccessOrdering, FailureOrdering);
466 CmpXchg->setVolatile(true);
467
468 auto *Result = CGF.Builder.CreateExtractValue(CmpXchg, 0);
469 if (RTy->isPointerTy()) {
470 Result = CGF.Builder.CreateIntToPtr(Result, RTy);
471 }
472
473 return Result;
474}
475
476// 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
477// prototyped like this:
478//
479// unsigned char _InterlockedCompareExchange128...(
480// __int64 volatile * _Destination,
481// __int64 _ExchangeHigh,
482// __int64 _ExchangeLow,
483// __int64 * _ComparandResult);
484//
485// Note that Destination is assumed to be at least 16-byte aligned, despite
486// being typed int64.
487
489 const CallExpr *E,
490 AtomicOrdering SuccessOrdering) {
491 assert(E->getNumArgs() == 4);
492 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
493 llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1));
494 llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2));
495 Address ComparandAddr = CGF.EmitPointerWithAlignment(E->getArg(3));
496
497 assert(DestPtr->getType()->isPointerTy());
498 assert(!ExchangeHigh->getType()->isPointerTy());
499 assert(!ExchangeLow->getType()->isPointerTy());
500
501 // For Release ordering, the failure ordering should be Monotonic.
502 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
503 ? AtomicOrdering::Monotonic
504 : SuccessOrdering;
505
506 // Convert to i128 pointers and values. Alignment is also overridden for
507 // destination pointer.
508 llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128);
509 Address DestAddr(DestPtr, Int128Ty,
511 ComparandAddr = ComparandAddr.withElementType(Int128Ty);
512
513 // (((i128)hi) << 64) | ((i128)lo)
514 ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
515 ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty);
516 ExchangeHigh =
517 CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64));
518 llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow);
519
520 // Load the comparand for the instruction.
521 llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandAddr);
522
523 auto *CXI = CGF.Builder.CreateAtomicCmpXchg(DestAddr, Comparand, Exchange,
524 SuccessOrdering, FailureOrdering);
525
526 // The atomic instruction is marked volatile for consistency with MSVC. This
527 // blocks the few atomics optimizations that LLVM has. If we want to optimize
528 // _Interlocked* operations in the future, we will have to remove the volatile
529 // marker.
530 CXI->setVolatile(true);
531
532 // Store the result as an outparameter.
533 CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0),
534 ComparandAddr);
535
536 // Get the success boolean and zero extend it to i8.
537 Value *Success = CGF.Builder.CreateExtractValue(CXI, 1);
538 return CGF.Builder.CreateZExt(Success, CGF.Int8Ty);
539}
540
542 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
543 assert(E->getArg(0)->getType()->isPointerType());
544
545 auto *IntTy = CGF.ConvertType(E->getType());
546 Address DestAddr = CheckAtomicAlignment(CGF, E);
547 auto *Result = CGF.Builder.CreateAtomicRMW(
548 AtomicRMWInst::Add, DestAddr, ConstantInt::get(IntTy, 1), Ordering);
549 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
550}
551
553 CodeGenFunction &CGF, const CallExpr *E,
554 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
555 assert(E->getArg(0)->getType()->isPointerType());
556
557 auto *IntTy = CGF.ConvertType(E->getType());
558 Address DestAddr = CheckAtomicAlignment(CGF, E);
559 auto *Result = CGF.Builder.CreateAtomicRMW(
560 AtomicRMWInst::Sub, DestAddr, ConstantInt::get(IntTy, 1), Ordering);
561 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
562}
563
564// Build a plain volatile load.
566 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
567 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
568 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
569 llvm::Type *ITy =
570 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
571 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize);
572 Load->setAtomic(llvm::AtomicOrdering::Monotonic);
573 Load->setVolatile(true);
574 return Load;
575}
576
577// Build a plain volatile store.
579 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
580 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
581 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
582 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
583 llvm::StoreInst *Store =
584 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
585 Store->setAtomic(llvm::AtomicOrdering::Monotonic);
586 Store->setVolatile(true);
587 return Store;
588}
589
590// Emit a simple mangled intrinsic that has 1 argument and a return type
591// matching the argument type. Depending on mode, this may be a constrained
592// floating-point intrinsic.
594 const CallExpr *E, unsigned IntrinsicID,
595 unsigned ConstrainedIntrinsicID) {
596 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
597
598 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
599 if (CGF.Builder.getIsFPConstrained()) {
600 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
601 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
602 } else {
603 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
604 return CGF.Builder.CreateCall(F, Src0);
605 }
606}
607
608// Emit an intrinsic that has 2 operands of the same type as its result.
609// Depending on mode, this may be a constrained floating-point intrinsic.
611 const CallExpr *E, unsigned IntrinsicID,
612 unsigned ConstrainedIntrinsicID) {
613 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
614 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
615
616 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
617 if (CGF.Builder.getIsFPConstrained()) {
618 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
619 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
620 } else {
621 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
622 return CGF.Builder.CreateCall(F, { Src0, Src1 });
623 }
624}
625
626// Has second type mangled argument.
627static Value *
629 Intrinsic::ID IntrinsicID,
630 Intrinsic::ID ConstrainedIntrinsicID) {
631 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
632 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
633
634 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
635 if (CGF.Builder.getIsFPConstrained()) {
636 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
637 {Src0->getType(), Src1->getType()});
638 return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1});
639 }
640
641 Function *F =
642 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), Src1->getType()});
643 return CGF.Builder.CreateCall(F, {Src0, Src1});
644}
645
646// Emit an intrinsic that has 3 operands of the same type as its result.
647// Depending on mode, this may be a constrained floating-point intrinsic.
649 const CallExpr *E, unsigned IntrinsicID,
650 unsigned ConstrainedIntrinsicID) {
651 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
652 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
653 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
654
655 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
656 if (CGF.Builder.getIsFPConstrained()) {
657 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
658 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
659 } else {
660 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
661 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
662 }
663}
664
665// Emit an intrinsic that has overloaded integer result and fp operand.
666static Value *
668 unsigned IntrinsicID,
669 unsigned ConstrainedIntrinsicID) {
670 llvm::Type *ResultType = CGF.ConvertType(E->getType());
671 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
672
673 if (CGF.Builder.getIsFPConstrained()) {
674 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
675 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
676 {ResultType, Src0->getType()});
677 return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
678 } else {
679 Function *F =
680 CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
681 return CGF.Builder.CreateCall(F, Src0);
682 }
683}
684
686 Intrinsic::ID IntrinsicID) {
687 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
688 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
689
690 QualType IntPtrTy = E->getArg(1)->getType()->getPointeeType();
691 llvm::Type *IntTy = CGF.ConvertType(IntPtrTy);
692 llvm::Function *F =
693 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), IntTy});
694 llvm::Value *Call = CGF.Builder.CreateCall(F, Src0);
695
696 llvm::Value *Exp = CGF.Builder.CreateExtractValue(Call, 1);
697 LValue LV = CGF.MakeNaturalAlignAddrLValue(Src1, IntPtrTy);
698 CGF.EmitStoreOfScalar(Exp, LV);
699
700 return CGF.Builder.CreateExtractValue(Call, 0);
701}
702
703static void emitSincosBuiltin(CodeGenFunction &CGF, const CallExpr *E,
704 Intrinsic::ID IntrinsicID) {
705 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(0));
706 llvm::Value *Dest0 = CGF.EmitScalarExpr(E->getArg(1));
707 llvm::Value *Dest1 = CGF.EmitScalarExpr(E->getArg(2));
708
709 llvm::Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {Val->getType()});
710 llvm::Value *Call = CGF.Builder.CreateCall(F, Val);
711
712 llvm::Value *SinResult = CGF.Builder.CreateExtractValue(Call, 0);
713 llvm::Value *CosResult = CGF.Builder.CreateExtractValue(Call, 1);
714
715 QualType DestPtrType = E->getArg(1)->getType()->getPointeeType();
716 LValue SinLV = CGF.MakeNaturalAlignAddrLValue(Dest0, DestPtrType);
717 LValue CosLV = CGF.MakeNaturalAlignAddrLValue(Dest1, DestPtrType);
718
719 llvm::StoreInst *StoreSin =
720 CGF.Builder.CreateStore(SinResult, SinLV.getAddress());
721 llvm::StoreInst *StoreCos =
722 CGF.Builder.CreateStore(CosResult, CosLV.getAddress());
723
724 // Mark the two stores as non-aliasing with each other. The order of stores
725 // emitted by this builtin is arbitrary, enforcing a particular order will
726 // prevent optimizations later on.
727 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
728 MDNode *Domain = MDHelper.createAnonymousAliasScopeDomain();
729 MDNode *AliasScope = MDHelper.createAnonymousAliasScope(Domain);
730 MDNode *AliasScopeList = MDNode::get(Call->getContext(), AliasScope);
731 StoreSin->setMetadata(LLVMContext::MD_alias_scope, AliasScopeList);
732 StoreCos->setMetadata(LLVMContext::MD_noalias, AliasScopeList);
733}
734
735static llvm::Value *emitModfBuiltin(CodeGenFunction &CGF, const CallExpr *E,
736 Intrinsic::ID IntrinsicID) {
737 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(0));
738 llvm::Value *IntPartDest = CGF.EmitScalarExpr(E->getArg(1));
739
740 llvm::Value *Call =
741 CGF.Builder.CreateIntrinsic(IntrinsicID, {Val->getType()}, Val);
742
743 llvm::Value *FractionalResult = CGF.Builder.CreateExtractValue(Call, 0);
744 llvm::Value *IntegralResult = CGF.Builder.CreateExtractValue(Call, 1);
745
746 QualType DestPtrType = E->getArg(1)->getType()->getPointeeType();
747 LValue IntegralLV = CGF.MakeNaturalAlignAddrLValue(IntPartDest, DestPtrType);
748 CGF.EmitStoreOfScalar(IntegralResult, IntegralLV);
749
750 return FractionalResult;
751}
752
753/// EmitFAbs - Emit a call to @llvm.fabs().
755 llvm::CallInst *Call = CGF.Builder.CreateFAbs(V);
756 Call->setDoesNotAccessMemory();
757 return Call;
758}
759
760/// Emit the computation of the sign bit for a floating point value. Returns
761/// the i1 sign bit value.
763 LLVMContext &C = CGF.CGM.getLLVMContext();
764
765 llvm::Type *Ty = V->getType();
766 int Width = Ty->getPrimitiveSizeInBits();
767 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
768 V = CGF.Builder.CreateBitCast(V, IntTy);
769 if (Ty->isPPC_FP128Ty()) {
770 // We want the sign bit of the higher-order double. The bitcast we just
771 // did works as if the double-double was stored to memory and then
772 // read as an i128. The "store" will put the higher-order double in the
773 // lower address in both little- and big-Endian modes, but the "load"
774 // will treat those bits as a different part of the i128: the low bits in
775 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
776 // we need to shift the high bits down to the low before truncating.
777 Width >>= 1;
778 if (CGF.getTarget().isBigEndian()) {
779 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
780 V = CGF.Builder.CreateLShr(V, ShiftCst);
781 }
782 // We are truncating value in order to extract the higher-order
783 // double, which we will be using to extract the sign from.
784 IntTy = llvm::IntegerType::get(C, Width);
785 V = CGF.Builder.CreateTrunc(V, IntTy);
786 }
787 Value *Zero = llvm::Constant::getNullValue(IntTy);
788 return CGF.Builder.CreateICmpSLT(V, Zero);
789}
790
791/// Checks no arguments or results are passed indirectly in the ABI (i.e. via a
792/// hidden pointer). This is used to check annotating FP libcalls (that could
793/// set `errno`) with "int" TBAA metadata is safe. If any floating-point
794/// arguments are passed indirectly, setup for the call could be incorrectly
795/// optimized out.
797 auto IsIndirect = [&](ABIArgInfo const &info) {
798 return info.isIndirect() || info.isIndirectAliased() || info.isInAlloca();
799 };
800 return !IsIndirect(FnInfo.getReturnInfo()) &&
801 llvm::none_of(FnInfo.arguments(),
802 [&](CGFunctionInfoArgInfo const &ArgInfo) {
803 return IsIndirect(ArgInfo.info);
804 });
805}
806
808 const CallExpr *E, llvm::Constant *calleeValue) {
809 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
810 CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
811 llvm::CallBase *callOrInvoke = nullptr;
812 CGFunctionInfo const *FnInfo = nullptr;
813 RValue Call =
814 CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot(),
815 /*Chain=*/nullptr, &callOrInvoke, &FnInfo);
816
817 if (unsigned BuiltinID = FD->getBuiltinID()) {
818 // Check whether a FP math builtin function, such as BI__builtin_expf
819 ASTContext &Context = CGF.getContext();
820 bool ConstWithoutErrnoAndExceptions =
822 // Restrict to target with errno, for example, MacOS doesn't set errno.
823 // TODO: Support builtin function with complex type returned, eg: cacosh
824 if (ConstWithoutErrnoAndExceptions && CGF.CGM.getLangOpts().MathErrno &&
825 !CGF.Builder.getIsFPConstrained() && Call.isScalar() &&
827 // Emit "int" TBAA metadata on FP math libcalls.
828 clang::QualType IntTy = Context.IntTy;
829 TBAAAccessInfo TBAAInfo = CGF.CGM.getTBAAAccessInfo(IntTy);
830 CGF.CGM.DecorateInstructionWithTBAA(callOrInvoke, TBAAInfo);
831 }
832 }
833 return Call;
834}
835
836/// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
837/// depending on IntrinsicID.
838///
839/// \arg CGF The current codegen function.
840/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
841/// \arg X The first argument to the llvm.*.with.overflow.*.
842/// \arg Y The second argument to the llvm.*.with.overflow.*.
843/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
844/// \returns The result (i.e. sum/product) returned by the intrinsic.
846 const Intrinsic::ID IntrinsicID,
847 llvm::Value *X, llvm::Value *Y,
848 llvm::Value *&Carry) {
849 // Make sure we have integers of the same width.
850 assert(X->getType() == Y->getType() &&
851 "Arguments must be the same type. (Did you forget to make sure both "
852 "arguments have the same integer width?)");
853
854 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
855 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
856 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
857 return CGF.Builder.CreateExtractValue(Tmp, 0);
858}
859
860namespace {
861 struct WidthAndSignedness {
862 unsigned Width;
863 bool Signed;
864 };
865}
866
867static WidthAndSignedness
869 const clang::QualType Type) {
870 assert(Type->isIntegerType() && "Given type is not an integer.");
871 unsigned Width = context.getIntWidth(Type);
873 return {Width, Signed};
874}
875
876// Given one or more integer types, this function produces an integer type that
877// encompasses them: any value in one of the given types could be expressed in
878// the encompassing type.
879static struct WidthAndSignedness
880EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
881 assert(Types.size() > 0 && "Empty list of types.");
882
883 // If any of the given types is signed, we must return a signed type.
884 bool Signed = false;
885 for (const auto &Type : Types) {
886 Signed |= Type.Signed;
887 }
888
889 // The encompassing type must have a width greater than or equal to the width
890 // of the specified types. Additionally, if the encompassing type is signed,
891 // its width must be strictly greater than the width of any unsigned types
892 // given.
893 unsigned Width = 0;
894 for (const auto &Type : Types) {
895 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
896 if (Width < MinWidth) {
897 Width = MinWidth;
898 }
899 }
900
901 return {Width, Signed};
902}
903
904Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
905 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
906 return Builder.CreateCall(CGM.getIntrinsic(inst, {ArgValue->getType()}),
907 ArgValue);
908}
909
910/// Checks if using the result of __builtin_object_size(p, @p From) in place of
911/// __builtin_object_size(p, @p To) is correct
912static bool areBOSTypesCompatible(int From, int To) {
913 // Note: Our __builtin_object_size implementation currently treats Type=0 and
914 // Type=2 identically. Encoding this implementation detail here may make
915 // improving __builtin_object_size difficult in the future, so it's omitted.
916 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
917}
918
919static llvm::Value *
920getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
921 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
922}
923
924llvm::Value *
925CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
926 llvm::IntegerType *ResType,
927 llvm::Value *EmittedE,
928 bool IsDynamic) {
929 if (std::optional<uint64_t> ObjectSize =
931 return ConstantInt::get(ResType, *ObjectSize, /*isSigned=*/true);
932 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
933}
934
935namespace {
936
937/// StructFieldAccess is a simple visitor class to grab the first MemberExpr
938/// from an Expr. It records any ArraySubscriptExpr we meet along the way.
939class StructFieldAccess
940 : public ConstStmtVisitor<StructFieldAccess, const Expr *> {
941 bool AddrOfSeen = false;
942
943public:
944 const Expr *ArrayIndex = nullptr;
945 QualType ArrayElementTy;
946
947 const Expr *VisitMemberExpr(const MemberExpr *E) {
948 if (AddrOfSeen && E->getType()->isArrayType())
949 // Avoid forms like '&ptr->array'.
950 return nullptr;
951 return E;
952 }
953
954 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
955 if (ArrayIndex)
956 // We don't support multiple subscripts.
957 return nullptr;
958
959 AddrOfSeen = false; // '&ptr->array[idx]' is okay.
960 ArrayIndex = E->getIdx();
961 ArrayElementTy = E->getBase()->getType();
962 return Visit(E->getBase());
963 }
964 const Expr *VisitCastExpr(const CastExpr *E) {
965 if (E->getCastKind() == CK_LValueToRValue)
966 return E;
967 return Visit(E->getSubExpr());
968 }
969 const Expr *VisitParenExpr(const ParenExpr *E) {
970 return Visit(E->getSubExpr());
971 }
972 const Expr *VisitUnaryAddrOf(const clang::UnaryOperator *E) {
973 AddrOfSeen = true;
974 return Visit(E->getSubExpr());
975 }
976 const Expr *VisitUnaryDeref(const clang::UnaryOperator *E) {
977 AddrOfSeen = false;
978 return Visit(E->getSubExpr());
979 }
980 const Expr *VisitBinaryOperator(const clang::BinaryOperator *Op) {
981 return Op->isCommaOp() ? Visit(Op->getRHS()) : nullptr;
982 }
983};
984
985} // end anonymous namespace
986
987/// Find a struct's flexible array member. It may be embedded inside multiple
988/// sub-structs, but must still be the last field.
990 ASTContext &Ctx,
991 const RecordDecl *RD) {
992 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
993 CGF.getLangOpts().getStrictFlexArraysLevel();
994
995 if (RD->isImplicit())
996 return nullptr;
997
998 for (const FieldDecl *FD : RD->fields()) {
1000 Ctx, FD, FD->getType(), StrictFlexArraysLevel,
1001 /*IgnoreTemplateOrMacroSubstitution=*/true))
1002 return FD;
1003
1004 if (const auto *RD = FD->getType()->getAsRecordDecl())
1005 if (const FieldDecl *FD = FindFlexibleArrayMemberField(CGF, Ctx, RD))
1006 return FD;
1007 }
1008
1009 return nullptr;
1010}
1011
1012/// Calculate the offset of a struct field. It may be embedded inside multiple
1013/// sub-structs.
1014static bool GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD,
1015 const FieldDecl *FD, int64_t &Offset) {
1016 if (RD->isImplicit())
1017 return false;
1018
1019 // Keep track of the field number ourselves, because the other methods
1020 // (CGRecordLayout::getLLVMFieldNo) aren't always equivalent to how the AST
1021 // is laid out.
1022 uint32_t FieldNo = 0;
1023 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
1024
1025 for (const FieldDecl *Field : RD->fields()) {
1026 if (Field == FD) {
1027 Offset += Layout.getFieldOffset(FieldNo);
1028 return true;
1029 }
1030
1031 if (const auto *RD = Field->getType()->getAsRecordDecl()) {
1032 if (GetFieldOffset(Ctx, RD, FD, Offset)) {
1033 Offset += Layout.getFieldOffset(FieldNo);
1034 return true;
1035 }
1036 }
1037
1038 if (!RD->isUnion())
1039 ++FieldNo;
1040 }
1041
1042 return false;
1043}
1044
1045static std::optional<int64_t>
1046GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FD) {
1047 int64_t Offset = 0;
1048
1049 if (GetFieldOffset(Ctx, RD, FD, Offset))
1050 return std::optional<int64_t>(Offset);
1051
1052 return std::nullopt;
1053}
1054
1055llvm::Value *CodeGenFunction::emitCountedBySize(const Expr *E,
1056 llvm::Value *EmittedE,
1057 unsigned Type,
1058 llvm::IntegerType *ResType) {
1059 // Note: If the whole struct is specificed in the __bdos (i.e. Visitor
1060 // returns a DeclRefExpr). The calculation of the whole size of the structure
1061 // with a flexible array member can be done in two ways:
1062 //
1063 // 1) sizeof(struct S) + count * sizeof(typeof(fam))
1064 // 2) offsetof(struct S, fam) + count * sizeof(typeof(fam))
1065 //
1066 // The first will add additional padding after the end of the array
1067 // allocation while the second method is more precise, but not quite expected
1068 // from programmers. See
1069 // https://lore.kernel.org/lkml/ZvV6X5FPBBW7CO1f@archlinux/ for a discussion
1070 // of the topic.
1071 //
1072 // GCC isn't (currently) able to calculate __bdos on a pointer to the whole
1073 // structure. Therefore, because of the above issue, we choose to match what
1074 // GCC does for consistency's sake.
1075
1076 StructFieldAccess Visitor;
1077 E = Visitor.Visit(E);
1078 if (!E)
1079 return nullptr;
1080
1081 const Expr *Idx = Visitor.ArrayIndex;
1082 if (Idx) {
1083 if (Idx->HasSideEffects(getContext()))
1084 // We can't have side-effects.
1085 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1086
1087 if (const auto *IL = dyn_cast<IntegerLiteral>(Idx)) {
1088 int64_t Val = IL->getValue().getSExtValue();
1089 if (Val < 0)
1090 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1091
1092 // The index is 0, so we don't need to take it into account.
1093 if (Val == 0)
1094 Idx = nullptr;
1095 }
1096 }
1097
1098 // __counted_by on either a flexible array member or a pointer into a struct
1099 // with a flexible array member.
1100 if (const auto *ME = dyn_cast<MemberExpr>(E))
1101 return emitCountedByMemberSize(ME, Idx, EmittedE, Visitor.ArrayElementTy,
1102 Type, ResType);
1103
1104 // __counted_by on a pointer in a struct.
1105 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E);
1106 ICE && ICE->getCastKind() == CK_LValueToRValue)
1107 return emitCountedByPointerSize(ICE, Idx, EmittedE, Visitor.ArrayElementTy,
1108 Type, ResType);
1109
1110 return nullptr;
1111}
1112
1114 llvm::Value *Res,
1115 llvm::Value *Index,
1116 llvm::IntegerType *ResType,
1117 bool IsSigned) {
1118 // cmp = (array_size >= 0)
1119 Value *Cmp = CGF.Builder.CreateIsNotNeg(Res);
1120 if (Index)
1121 // cmp = (cmp && index >= 0)
1122 Cmp = CGF.Builder.CreateAnd(CGF.Builder.CreateIsNotNeg(Index), Cmp);
1123
1124 // return cmp ? result : 0
1125 return CGF.Builder.CreateSelect(Cmp, Res,
1126 ConstantInt::get(ResType, 0, IsSigned));
1127}
1128
1129static std::pair<llvm::Value *, llvm::Value *>
1131 const FieldDecl *ArrayFD, const FieldDecl *CountFD,
1132 const Expr *Idx, llvm::IntegerType *ResType,
1133 bool IsSigned) {
1134 // count = ptr->count;
1135 Value *Count = CGF.EmitLoadOfCountedByField(ME, ArrayFD, CountFD);
1136 if (!Count)
1137 return std::make_pair<Value *>(nullptr, nullptr);
1138 Count = CGF.Builder.CreateIntCast(Count, ResType, IsSigned, "count");
1139
1140 // index = ptr->index;
1141 Value *Index = nullptr;
1142 if (Idx) {
1143 bool IdxSigned = Idx->getType()->isSignedIntegerType();
1144 Index = CGF.EmitScalarExpr(Idx);
1145 Index = CGF.Builder.CreateIntCast(Index, ResType, IdxSigned, "index");
1146 }
1147
1148 return std::make_pair(Count, Index);
1149}
1150
1151llvm::Value *CodeGenFunction::emitCountedByPointerSize(
1152 const ImplicitCastExpr *E, const Expr *Idx, llvm::Value *EmittedE,
1153 QualType CastedArrayElementTy, unsigned Type, llvm::IntegerType *ResType) {
1154 assert(E->getCastKind() == CK_LValueToRValue &&
1155 "must be an LValue to RValue cast");
1156
1157 const MemberExpr *ME =
1158 dyn_cast<MemberExpr>(E->getSubExpr()->IgnoreParenNoopCasts(getContext()));
1159 if (!ME)
1160 return nullptr;
1161
1162 const auto *ArrayBaseFD = dyn_cast<FieldDecl>(ME->getMemberDecl());
1163 if (!ArrayBaseFD || !ArrayBaseFD->getType()->isPointerType() ||
1164 !ArrayBaseFD->getType()->isCountAttributedType())
1165 return nullptr;
1166
1167 // Get the 'count' FieldDecl.
1168 const FieldDecl *CountFD = ArrayBaseFD->findCountedByField();
1169 if (!CountFD)
1170 // Can't find the field referenced by the "counted_by" attribute.
1171 return nullptr;
1172
1173 // Calculate the array's object size using these formulae. (Note: if the
1174 // calculation is negative, we return 0.):
1175 //
1176 // struct p;
1177 // struct s {
1178 // /* ... */
1179 // struct p **array __attribute__((counted_by(count)));
1180 // int count;
1181 // };
1182 //
1183 // 1) 'ptr->array':
1184 //
1185 // count = ptr->count;
1186 //
1187 // array_element_size = sizeof (*ptr->array);
1188 // array_size = count * array_element_size;
1189 //
1190 // result = array_size;
1191 //
1192 // cmp = (result >= 0)
1193 // return cmp ? result : 0;
1194 //
1195 // 2) '&((cast) ptr->array)[idx]':
1196 //
1197 // count = ptr->count;
1198 // index = idx;
1199 //
1200 // array_element_size = sizeof (*ptr->array);
1201 // array_size = count * array_element_size;
1202 //
1203 // casted_array_element_size = sizeof (*((cast) ptr->array));
1204 //
1205 // index_size = index * casted_array_element_size;
1206 // result = array_size - index_size;
1207 //
1208 // cmp = (result >= 0)
1209 // if (index)
1210 // cmp = (cmp && index > 0)
1211 // return cmp ? result : 0;
1212
1213 auto GetElementBaseSize = [&](QualType ElementTy) {
1214 CharUnits ElementSize =
1215 getContext().getTypeSizeInChars(ElementTy->getPointeeType());
1216
1217 if (ElementSize.isZero()) {
1218 // This might be a __sized_by (or __counted_by) on a
1219 // 'void *', which counts bytes, not elements.
1220 [[maybe_unused]] auto *CAT = ElementTy->getAs<CountAttributedType>();
1221 assert(CAT && "must have an CountAttributedType");
1222
1223 ElementSize = CharUnits::One();
1224 }
1225
1226 return std::optional<CharUnits>(ElementSize);
1227 };
1228
1229 // Get the sizes of the original array element and the casted array element,
1230 // if different.
1231 std::optional<CharUnits> ArrayElementBaseSize =
1232 GetElementBaseSize(ArrayBaseFD->getType());
1233 if (!ArrayElementBaseSize)
1234 return nullptr;
1235
1236 std::optional<CharUnits> CastedArrayElementBaseSize = ArrayElementBaseSize;
1237 if (!CastedArrayElementTy.isNull() && CastedArrayElementTy->isPointerType()) {
1238 CastedArrayElementBaseSize = GetElementBaseSize(CastedArrayElementTy);
1239 if (!CastedArrayElementBaseSize)
1240 return nullptr;
1241 }
1242
1243 bool IsSigned = CountFD->getType()->isSignedIntegerType();
1244
1245 // count = ptr->count;
1246 // index = ptr->index;
1247 Value *Count, *Index;
1248 std::tie(Count, Index) = GetCountFieldAndIndex(
1249 *this, ME, ArrayBaseFD, CountFD, Idx, ResType, IsSigned);
1250 if (!Count)
1251 return nullptr;
1252
1253 // array_element_size = sizeof (*ptr->array)
1254 auto *ArrayElementSize = llvm::ConstantInt::get(
1255 ResType, ArrayElementBaseSize->getQuantity(), IsSigned);
1256
1257 // casted_array_element_size = sizeof (*((cast) ptr->array));
1258 auto *CastedArrayElementSize = llvm::ConstantInt::get(
1259 ResType, CastedArrayElementBaseSize->getQuantity(), IsSigned);
1260
1261 // array_size = count * array_element_size;
1262 Value *ArraySize = Builder.CreateMul(Count, ArrayElementSize, "array_size",
1263 !IsSigned, IsSigned);
1264
1265 // Option (1) 'ptr->array'
1266 // result = array_size
1267 Value *Result = ArraySize;
1268
1269 if (Idx) { // Option (2) '&((cast) ptr->array)[idx]'
1270 // index_size = index * casted_array_element_size;
1271 Value *IndexSize = Builder.CreateMul(Index, CastedArrayElementSize,
1272 "index_size", !IsSigned, IsSigned);
1273
1274 // result = result - index_size;
1275 Result =
1276 Builder.CreateSub(Result, IndexSize, "result", !IsSigned, IsSigned);
1277 }
1278
1279 return EmitPositiveResultOrZero(*this, Result, Index, ResType, IsSigned);
1280}
1281
1282llvm::Value *CodeGenFunction::emitCountedByMemberSize(
1283 const MemberExpr *ME, const Expr *Idx, llvm::Value *EmittedE,
1284 QualType CastedArrayElementTy, unsigned Type, llvm::IntegerType *ResType) {
1285 const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
1286 if (!FD)
1287 return nullptr;
1288
1289 // Find the flexible array member and check that it has the __counted_by
1290 // attribute.
1291 ASTContext &Ctx = getContext();
1292 const RecordDecl *RD = FD->getDeclContext()->getOuterLexicalRecordContext();
1293 const FieldDecl *FlexibleArrayMemberFD = nullptr;
1294
1296 Ctx, FD, FD->getType(), getLangOpts().getStrictFlexArraysLevel(),
1297 /*IgnoreTemplateOrMacroSubstitution=*/true))
1298 FlexibleArrayMemberFD = FD;
1299 else
1300 FlexibleArrayMemberFD = FindFlexibleArrayMemberField(*this, Ctx, RD);
1301
1302 if (!FlexibleArrayMemberFD ||
1303 !FlexibleArrayMemberFD->getType()->isCountAttributedType())
1304 return nullptr;
1305
1306 // Get the 'count' FieldDecl.
1307 const FieldDecl *CountFD = FlexibleArrayMemberFD->findCountedByField();
1308 if (!CountFD)
1309 // Can't find the field referenced by the "counted_by" attribute.
1310 return nullptr;
1311
1312 // Calculate the flexible array member's object size using these formulae.
1313 // (Note: if the calculation is negative, we return 0.):
1314 //
1315 // struct p;
1316 // struct s {
1317 // /* ... */
1318 // int count;
1319 // struct p *array[] __attribute__((counted_by(count)));
1320 // };
1321 //
1322 // 1) 'ptr->array':
1323 //
1324 // count = ptr->count;
1325 //
1326 // flexible_array_member_element_size = sizeof (*ptr->array);
1327 // flexible_array_member_size =
1328 // count * flexible_array_member_element_size;
1329 //
1330 // result = flexible_array_member_size;
1331 //
1332 // cmp = (result >= 0)
1333 // return cmp ? result : 0;
1334 //
1335 // 2) '&((cast) ptr->array)[idx]':
1336 //
1337 // count = ptr->count;
1338 // index = idx;
1339 //
1340 // flexible_array_member_element_size = sizeof (*ptr->array);
1341 // flexible_array_member_size =
1342 // count * flexible_array_member_element_size;
1343 //
1344 // casted_flexible_array_member_element_size =
1345 // sizeof (*((cast) ptr->array));
1346 // index_size = index * casted_flexible_array_member_element_size;
1347 //
1348 // result = flexible_array_member_size - index_size;
1349 //
1350 // cmp = (result >= 0)
1351 // if (index != 0)
1352 // cmp = (cmp && index >= 0)
1353 // return cmp ? result : 0;
1354 //
1355 // 3) '&ptr->field':
1356 //
1357 // count = ptr->count;
1358 // sizeof_struct = sizeof (struct s);
1359 //
1360 // flexible_array_member_element_size = sizeof (*ptr->array);
1361 // flexible_array_member_size =
1362 // count * flexible_array_member_element_size;
1363 //
1364 // field_offset = offsetof (struct s, field);
1365 // offset_diff = sizeof_struct - field_offset;
1366 //
1367 // result = offset_diff + flexible_array_member_size;
1368 //
1369 // cmp = (result >= 0)
1370 // return cmp ? result : 0;
1371 //
1372 // 4) '&((cast) ptr->field_array)[idx]':
1373 //
1374 // count = ptr->count;
1375 // index = idx;
1376 // sizeof_struct = sizeof (struct s);
1377 //
1378 // flexible_array_member_element_size = sizeof (*ptr->array);
1379 // flexible_array_member_size =
1380 // count * flexible_array_member_element_size;
1381 //
1382 // casted_field_element_size = sizeof (*((cast) ptr->field_array));
1383 // field_offset = offsetof (struct s, field)
1384 // field_offset += index * casted_field_element_size;
1385 //
1386 // offset_diff = sizeof_struct - field_offset;
1387 //
1388 // result = offset_diff + flexible_array_member_size;
1389 //
1390 // cmp = (result >= 0)
1391 // if (index != 0)
1392 // cmp = (cmp && index >= 0)
1393 // return cmp ? result : 0;
1394
1395 bool IsSigned = CountFD->getType()->isSignedIntegerType();
1396
1397 QualType FlexibleArrayMemberTy = FlexibleArrayMemberFD->getType();
1398
1399 // Explicit cast because otherwise the CharWidth will promote an i32's into
1400 // u64's leading to overflows.
1401 int64_t CharWidth = static_cast<int64_t>(CGM.getContext().getCharWidth());
1402
1403 // field_offset = offsetof (struct s, field);
1404 Value *FieldOffset = nullptr;
1405 if (FlexibleArrayMemberFD != FD) {
1406 std::optional<int64_t> Offset = GetFieldOffset(Ctx, RD, FD);
1407 if (!Offset)
1408 return nullptr;
1409 FieldOffset =
1410 llvm::ConstantInt::get(ResType, *Offset / CharWidth, IsSigned);
1411 }
1412
1413 // count = ptr->count;
1414 // index = ptr->index;
1415 Value *Count, *Index;
1416 std::tie(Count, Index) = GetCountFieldAndIndex(
1417 *this, ME, FlexibleArrayMemberFD, CountFD, Idx, ResType, IsSigned);
1418 if (!Count)
1419 return nullptr;
1420
1421 // flexible_array_member_element_size = sizeof (*ptr->array);
1422 const ArrayType *ArrayTy = Ctx.getAsArrayType(FlexibleArrayMemberTy);
1423 CharUnits BaseSize = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
1424 auto *FlexibleArrayMemberElementSize =
1425 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1426
1427 // flexible_array_member_size = count * flexible_array_member_element_size;
1428 Value *FlexibleArrayMemberSize =
1429 Builder.CreateMul(Count, FlexibleArrayMemberElementSize,
1430 "flexible_array_member_size", !IsSigned, IsSigned);
1431
1432 Value *Result = nullptr;
1433 if (FlexibleArrayMemberFD == FD) {
1434 if (Idx) { // Option (2) '&((cast) ptr->array)[idx]'
1435 // casted_flexible_array_member_element_size =
1436 // sizeof (*((cast) ptr->array));
1437 llvm::ConstantInt *CastedFlexibleArrayMemberElementSize =
1438 FlexibleArrayMemberElementSize;
1439 if (!CastedArrayElementTy.isNull() &&
1440 CastedArrayElementTy->isPointerType()) {
1441 CharUnits BaseSize =
1442 Ctx.getTypeSizeInChars(CastedArrayElementTy->getPointeeType());
1443 CastedFlexibleArrayMemberElementSize =
1444 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1445 }
1446
1447 // index_size = index * casted_flexible_array_member_element_size;
1448 Value *IndexSize =
1449 Builder.CreateMul(Index, CastedFlexibleArrayMemberElementSize,
1450 "index_size", !IsSigned, IsSigned);
1451
1452 // result = flexible_array_member_size - index_size;
1453 Result = Builder.CreateSub(FlexibleArrayMemberSize, IndexSize, "result",
1454 !IsSigned, IsSigned);
1455 } else { // Option (1) 'ptr->array'
1456 // result = flexible_array_member_size;
1457 Result = FlexibleArrayMemberSize;
1458 }
1459 } else {
1460 // sizeof_struct = sizeof (struct s);
1461 llvm::StructType *StructTy = getTypes().getCGRecordLayout(RD).getLLVMType();
1462 const llvm::DataLayout &Layout = CGM.getDataLayout();
1463 TypeSize Size = Layout.getTypeSizeInBits(StructTy);
1464 Value *SizeofStruct =
1465 llvm::ConstantInt::get(ResType, Size.getKnownMinValue() / CharWidth);
1466
1467 if (Idx) { // Option (4) '&((cast) ptr->field_array)[idx]'
1468 // casted_field_element_size = sizeof (*((cast) ptr->field_array));
1469 CharUnits BaseSize;
1470 if (!CastedArrayElementTy.isNull() &&
1471 CastedArrayElementTy->isPointerType()) {
1472 BaseSize =
1473 Ctx.getTypeSizeInChars(CastedArrayElementTy->getPointeeType());
1474 } else {
1475 const ArrayType *ArrayTy = Ctx.getAsArrayType(FD->getType());
1476 BaseSize = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
1477 }
1478
1479 llvm::ConstantInt *CastedFieldElementSize =
1480 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1481
1482 // field_offset += index * casted_field_element_size;
1483 Value *Mul = Builder.CreateMul(Index, CastedFieldElementSize,
1484 "field_offset", !IsSigned, IsSigned);
1485 FieldOffset = Builder.CreateAdd(FieldOffset, Mul);
1486 }
1487 // Option (3) '&ptr->field', and Option (4) continuation.
1488 // offset_diff = flexible_array_member_offset - field_offset;
1489 Value *OffsetDiff = Builder.CreateSub(SizeofStruct, FieldOffset,
1490 "offset_diff", !IsSigned, IsSigned);
1491
1492 // result = offset_diff + flexible_array_member_size;
1493 Result = Builder.CreateAdd(FlexibleArrayMemberSize, OffsetDiff, "result");
1494 }
1495
1496 return EmitPositiveResultOrZero(*this, Result, Index, ResType, IsSigned);
1497}
1498
1499/// Returns a Value corresponding to the size of the given expression.
1500/// This Value may be either of the following:
1501/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
1502/// it)
1503/// - A call to the @llvm.objectsize intrinsic
1504///
1505/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
1506/// and we wouldn't otherwise try to reference a pass_object_size parameter,
1507/// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
1508llvm::Value *
1509CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
1510 llvm::IntegerType *ResType,
1511 llvm::Value *EmittedE, bool IsDynamic) {
1512 // We need to reference an argument if the pointer is a parameter with the
1513 // pass_object_size attribute.
1514 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
1515 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
1516 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
1517 if (Param != nullptr && PS != nullptr &&
1518 areBOSTypesCompatible(PS->getType(), Type)) {
1519 auto Iter = SizeArguments.find(Param);
1520 assert(Iter != SizeArguments.end());
1521
1522 const ImplicitParamDecl *D = Iter->second;
1523 auto DIter = LocalDeclMap.find(D);
1524 assert(DIter != LocalDeclMap.end());
1525
1526 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
1527 getContext().getSizeType(), E->getBeginLoc());
1528 }
1529 }
1530
1531 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
1532 // evaluate E for side-effects. In either case, we shouldn't lower to
1533 // @llvm.objectsize.
1534 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
1535 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1536
1537 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
1538 assert(Ptr->getType()->isPointerTy() &&
1539 "Non-pointer passed to __builtin_object_size?");
1540
1541 if (IsDynamic)
1542 // Emit special code for a flexible array member with the "counted_by"
1543 // attribute.
1544 if (Value *V = emitCountedBySize(E, Ptr, Type, ResType))
1545 return V;
1546
1547 Function *F =
1548 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
1549
1550 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
1551 Value *Min = Builder.getInt1((Type & 2) != 0);
1552 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
1553 Value *NullIsUnknown = Builder.getTrue();
1554 Value *Dynamic = Builder.getInt1(IsDynamic);
1555 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
1556}
1557
1558namespace {
1559/// A struct to generically describe a bit test intrinsic.
1560struct BitTest {
1561 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
1562 enum InterlockingKind : uint8_t {
1563 Unlocked,
1564 Sequential,
1565 Acquire,
1566 Release,
1567 NoFence
1568 };
1569
1570 ActionKind Action;
1571 InterlockingKind Interlocking;
1572 bool Is64Bit;
1573
1574 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
1575};
1576
1577} // namespace
1578
1579BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
1580 switch (BuiltinID) {
1581 // Main portable variants.
1582 case Builtin::BI_bittest:
1583 return {TestOnly, Unlocked, false};
1584 case Builtin::BI_bittestandcomplement:
1585 return {Complement, Unlocked, false};
1586 case Builtin::BI_bittestandreset:
1587 return {Reset, Unlocked, false};
1588 case Builtin::BI_bittestandset:
1589 return {Set, Unlocked, false};
1590 case Builtin::BI_interlockedbittestandreset:
1591 return {Reset, Sequential, false};
1592 case Builtin::BI_interlockedbittestandset:
1593 return {Set, Sequential, false};
1594
1595 // 64-bit variants.
1596 case Builtin::BI_bittest64:
1597 return {TestOnly, Unlocked, true};
1598 case Builtin::BI_bittestandcomplement64:
1599 return {Complement, Unlocked, true};
1600 case Builtin::BI_bittestandreset64:
1601 return {Reset, Unlocked, true};
1602 case Builtin::BI_bittestandset64:
1603 return {Set, Unlocked, true};
1604 case Builtin::BI_interlockedbittestandreset64:
1605 return {Reset, Sequential, true};
1606 case Builtin::BI_interlockedbittestandset64:
1607 return {Set, Sequential, true};
1608
1609 // ARM/AArch64-specific ordering variants.
1610 case Builtin::BI_interlockedbittestandset_acq:
1611 return {Set, Acquire, false};
1612 case Builtin::BI_interlockedbittestandset_rel:
1613 return {Set, Release, false};
1614 case Builtin::BI_interlockedbittestandset_nf:
1615 return {Set, NoFence, false};
1616 case Builtin::BI_interlockedbittestandreset_acq:
1617 return {Reset, Acquire, false};
1618 case Builtin::BI_interlockedbittestandreset_rel:
1619 return {Reset, Release, false};
1620 case Builtin::BI_interlockedbittestandreset_nf:
1621 return {Reset, NoFence, false};
1622 case Builtin::BI_interlockedbittestandreset64_acq:
1623 return {Reset, Acquire, false};
1624 case Builtin::BI_interlockedbittestandreset64_rel:
1625 return {Reset, Release, false};
1626 case Builtin::BI_interlockedbittestandreset64_nf:
1627 return {Reset, NoFence, false};
1628 case Builtin::BI_interlockedbittestandset64_acq:
1629 return {Set, Acquire, false};
1630 case Builtin::BI_interlockedbittestandset64_rel:
1631 return {Set, Release, false};
1632 case Builtin::BI_interlockedbittestandset64_nf:
1633 return {Set, NoFence, false};
1634 }
1635 llvm_unreachable("expected only bittest intrinsics");
1636}
1637
1638static char bitActionToX86BTCode(BitTest::ActionKind A) {
1639 switch (A) {
1640 case BitTest::TestOnly: return '\0';
1641 case BitTest::Complement: return 'c';
1642 case BitTest::Reset: return 'r';
1643 case BitTest::Set: return 's';
1644 }
1645 llvm_unreachable("invalid action");
1646}
1647
1649 BitTest BT,
1650 const CallExpr *E, Value *BitBase,
1651 Value *BitPos) {
1652 char Action = bitActionToX86BTCode(BT.Action);
1653 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
1654
1655 // Build the assembly.
1657 raw_svector_ostream AsmOS(Asm);
1658 if (BT.Interlocking != BitTest::Unlocked)
1659 AsmOS << "lock ";
1660 AsmOS << "bt";
1661 if (Action)
1662 AsmOS << Action;
1663 AsmOS << SizeSuffix << " $2, ($1)";
1664
1665 // Build the constraints. FIXME: We should support immediates when possible.
1666 std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
1667 std::string_view MachineClobbers = CGF.getTarget().getClobbers();
1668 if (!MachineClobbers.empty()) {
1669 Constraints += ',';
1670 Constraints += MachineClobbers;
1671 }
1672 llvm::IntegerType *IntType = llvm::IntegerType::get(
1673 CGF.getLLVMContext(),
1674 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
1675 llvm::FunctionType *FTy =
1676 llvm::FunctionType::get(CGF.Int8Ty, {CGF.DefaultPtrTy, IntType}, false);
1677
1678 llvm::InlineAsm *IA =
1679 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1680 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
1681}
1682
1683static llvm::AtomicOrdering
1684getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
1685 switch (I) {
1686 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
1687 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
1688 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
1689 case BitTest::Release: return llvm::AtomicOrdering::Release;
1690 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
1691 }
1692 llvm_unreachable("invalid interlocking");
1693}
1694
1695static llvm::Value *EmitBitCountExpr(CodeGenFunction &CGF, const Expr *E) {
1696 llvm::Value *ArgValue = CGF.EmitScalarExpr(E);
1697 llvm::Type *ArgType = ArgValue->getType();
1698
1699 // Boolean vectors can be casted directly to its bitfield representation. We
1700 // intentionally do not round up to the next power of two size and let LLVM
1701 // handle the trailing bits.
1702 if (auto *VT = dyn_cast<llvm::FixedVectorType>(ArgType);
1703 VT && VT->getElementType()->isIntegerTy(1)) {
1704 llvm::Type *StorageType =
1705 llvm::Type::getIntNTy(CGF.getLLVMContext(), VT->getNumElements());
1706 ArgValue = CGF.Builder.CreateBitCast(ArgValue, StorageType);
1707 }
1708
1709 return ArgValue;
1710}
1711
1712/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
1713/// bits and a bit position and read and optionally modify the bit at that
1714/// position. The position index can be arbitrarily large, i.e. it can be larger
1715/// than 31 or 63, so we need an indexed load in the general case.
1716static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
1717 unsigned BuiltinID,
1718 const CallExpr *E) {
1719 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
1720 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
1721
1722 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
1723
1724 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
1725 // indexing operation internally. Use them if possible.
1726 if (CGF.getTarget().getTriple().isX86())
1727 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
1728
1729 // Otherwise, use generic code to load one byte and test the bit. Use all but
1730 // the bottom three bits as the array index, and the bottom three bits to form
1731 // a mask.
1732 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
1733 Value *ByteIndex = CGF.Builder.CreateAShr(
1734 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
1735 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBase, ByteIndex,
1736 "bittest.byteaddr"),
1737 CGF.Int8Ty, CharUnits::One());
1738 Value *PosLow =
1739 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
1740 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
1741
1742 // The updating instructions will need a mask.
1743 Value *Mask = nullptr;
1744 if (BT.Action != BitTest::TestOnly) {
1745 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
1746 "bittest.mask");
1747 }
1748
1749 // Check the action and ordering of the interlocked intrinsics.
1750 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
1751
1752 Value *OldByte = nullptr;
1753 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
1754 // Emit a combined atomicrmw load/store operation for the interlocked
1755 // intrinsics.
1756 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
1757 if (BT.Action == BitTest::Reset) {
1758 Mask = CGF.Builder.CreateNot(Mask);
1759 RMWOp = llvm::AtomicRMWInst::And;
1760 }
1761 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr, Mask, Ordering);
1762 } else {
1763 // Emit a plain load for the non-interlocked intrinsics.
1764 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
1765 Value *NewByte = nullptr;
1766 switch (BT.Action) {
1767 case BitTest::TestOnly:
1768 // Don't store anything.
1769 break;
1770 case BitTest::Complement:
1771 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
1772 break;
1773 case BitTest::Reset:
1774 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
1775 break;
1776 case BitTest::Set:
1777 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
1778 break;
1779 }
1780 if (NewByte)
1781 CGF.Builder.CreateStore(NewByte, ByteAddr);
1782 }
1783
1784 // However we loaded the old byte, either by plain load or atomicrmw, shift
1785 // the bit into the low position and mask it to 0 or 1.
1786 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
1787 return CGF.Builder.CreateAnd(
1788 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
1789}
1790
1791namespace {
1792enum class MSVCSetJmpKind {
1793 _setjmpex,
1794 _setjmp3,
1795 _setjmp
1796};
1797}
1798
1799/// MSVC handles setjmp a bit differently on different platforms. On every
1800/// architecture except 32-bit x86, the frame address is passed. On x86, extra
1801/// parameters can be passed as variadic arguments, but we always pass none.
1802static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
1803 const CallExpr *E) {
1804 llvm::Value *Arg1 = nullptr;
1805 llvm::Type *Arg1Ty = nullptr;
1806 StringRef Name;
1807 bool IsVarArg = false;
1808 if (SJKind == MSVCSetJmpKind::_setjmp3) {
1809 Name = "_setjmp3";
1810 Arg1Ty = CGF.Int32Ty;
1811 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
1812 IsVarArg = true;
1813 } else {
1814 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
1815 Arg1Ty = CGF.Int8PtrTy;
1816 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
1817 Arg1 = CGF.Builder.CreateCall(
1818 CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
1819 } else
1820 Arg1 = CGF.Builder.CreateCall(
1821 CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
1822 llvm::ConstantInt::get(CGF.Int32Ty, 0));
1823 }
1824
1825 // Mark the call site and declaration with ReturnsTwice.
1826 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
1827 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
1828 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
1829 llvm::Attribute::ReturnsTwice);
1830 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
1831 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
1832 ReturnsTwiceAttr, /*Local=*/true);
1833
1834 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
1835 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
1836 llvm::Value *Args[] = {Buf, Arg1};
1837 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
1838 CB->setAttributes(ReturnsTwiceAttr);
1839 return RValue::get(CB);
1840}
1841
1842// Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated.
1844 const CallExpr *E) {
1845 switch (BuiltinID) {
1848 Address IndexAddress(EmitPointerWithAlignment(E->getArg(0)));
1849 Value *ArgValue = EmitScalarExpr(E->getArg(1));
1850
1851 llvm::Type *ArgType = ArgValue->getType();
1852 llvm::Type *IndexType = IndexAddress.getElementType();
1853 llvm::Type *ResultType = ConvertType(E->getType());
1854
1855 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
1856 Value *ResZero = llvm::Constant::getNullValue(ResultType);
1857 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
1858
1859 BasicBlock *Begin = Builder.GetInsertBlock();
1860 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
1861 Builder.SetInsertPoint(End);
1862 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
1863
1864 Builder.SetInsertPoint(Begin);
1865 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
1866 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
1867 Builder.CreateCondBr(IsZero, End, NotZero);
1868 Result->addIncoming(ResZero, Begin);
1869
1870 Builder.SetInsertPoint(NotZero);
1871
1872 if (BuiltinID == MSVCIntrin::_BitScanForward) {
1873 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1874 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1875 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1876 Builder.CreateStore(ZeroCount, IndexAddress, false);
1877 } else {
1878 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
1879 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
1880
1881 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1882 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1883 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1884 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
1885 Builder.CreateStore(Index, IndexAddress, false);
1886 }
1887 Builder.CreateBr(End);
1888 Result->addIncoming(ResOne, NotZero);
1889
1890 Builder.SetInsertPoint(End);
1891 return Result;
1892 }
1894 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
1896 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
1898 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
1900 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
1902 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
1904 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
1906 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1907 AtomicOrdering::Acquire);
1909 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1910 AtomicOrdering::Release);
1912 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1913 AtomicOrdering::Monotonic);
1915 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1916 AtomicOrdering::Acquire);
1918 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1919 AtomicOrdering::Release);
1921 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1922 AtomicOrdering::Monotonic);
1924 return EmitAtomicCmpXchgForMSIntrin(*this, E);
1926 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
1928 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
1930 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1933 *this, E, AtomicOrdering::SequentiallyConsistent);
1935 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire);
1937 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release);
1939 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1941 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1942 AtomicOrdering::Acquire);
1944 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1945 AtomicOrdering::Release);
1947 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1948 AtomicOrdering::Monotonic);
1950 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1951 AtomicOrdering::Acquire);
1953 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1954 AtomicOrdering::Release);
1956 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1957 AtomicOrdering::Monotonic);
1959 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1960 AtomicOrdering::Acquire);
1962 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1963 AtomicOrdering::Release);
1965 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1966 AtomicOrdering::Monotonic);
1968 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1970 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1972 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1974 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1976 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1978 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1979
1981 return EmitAtomicDecrementValue(*this, E);
1983 return EmitAtomicIncrementValue(*this, E);
1984
1986 // Request immediate process termination from the kernel. The instruction
1987 // sequences to do this are documented on MSDN:
1988 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1989 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1990 StringRef Asm, Constraints;
1991 switch (ISA) {
1992 default:
1993 ErrorUnsupported(E, "__fastfail call for this architecture");
1994 break;
1995 case llvm::Triple::x86:
1996 case llvm::Triple::x86_64:
1997 Asm = "int $$0x29";
1998 Constraints = "{cx}";
1999 break;
2000 case llvm::Triple::thumb:
2001 Asm = "udf #251";
2002 Constraints = "{r0}";
2003 break;
2004 case llvm::Triple::aarch64:
2005 Asm = "brk #0xF003";
2006 Constraints = "{w0}";
2007 }
2008 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
2009 llvm::InlineAsm *IA =
2010 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
2011 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
2012 getLLVMContext(), llvm::AttributeList::FunctionIndex,
2013 llvm::Attribute::NoReturn);
2014 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
2015 CI->setAttributes(NoReturnAttr);
2016 return CI;
2017 }
2018 }
2019 llvm_unreachable("Incorrect MSVC intrinsic!");
2020}
2021
2022namespace {
2023// ARC cleanup for __builtin_os_log_format
2024struct CallObjCArcUse final : EHScopeStack::Cleanup {
2025 CallObjCArcUse(llvm::Value *object) : object(object) {}
2026 llvm::Value *object;
2027
2028 void Emit(CodeGenFunction &CGF, Flags flags) override {
2029 CGF.EmitARCIntrinsicUse(object);
2030 }
2031};
2032}
2033
2035 BuiltinCheckKind Kind) {
2036 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) &&
2037 "Unsupported builtin check kind");
2038
2039 Value *ArgValue = EmitBitCountExpr(*this, E);
2040 if (!SanOpts.has(SanitizerKind::Builtin))
2041 return ArgValue;
2042
2043 auto CheckOrdinal = SanitizerKind::SO_Builtin;
2044 auto CheckHandler = SanitizerHandler::InvalidBuiltin;
2045 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2046 Value *Cond = Builder.CreateICmpNE(
2047 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
2048 EmitCheck(std::make_pair(Cond, CheckOrdinal), CheckHandler,
2050 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
2051 {});
2052 return ArgValue;
2053}
2054
2056 Value *ArgValue = EvaluateExprAsBool(E);
2057 if (!SanOpts.has(SanitizerKind::Builtin))
2058 return ArgValue;
2059
2060 auto CheckOrdinal = SanitizerKind::SO_Builtin;
2061 auto CheckHandler = SanitizerHandler::InvalidBuiltin;
2062 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2063 EmitCheck(
2064 std::make_pair(ArgValue, CheckOrdinal), CheckHandler,
2066 llvm::ConstantInt::get(Builder.getInt8Ty(), BCK_AssumePassedFalse)},
2067 {});
2068 return ArgValue;
2069}
2070
2071static Value *EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW) {
2072 return CGF.Builder.CreateBinaryIntrinsic(
2073 Intrinsic::abs, ArgValue,
2074 ConstantInt::get(CGF.Builder.getInt1Ty(), HasNSW));
2075}
2076
2078 bool SanitizeOverflow) {
2079 Value *ArgValue = CGF.EmitScalarExpr(E->getArg(0));
2080
2081 // Try to eliminate overflow check.
2082 if (const auto *VCI = dyn_cast<llvm::ConstantInt>(ArgValue)) {
2083 if (!VCI->isMinSignedValue())
2084 return EmitAbs(CGF, ArgValue, true);
2085 }
2086
2088 SanitizerHandler CheckHandler;
2089 if (SanitizeOverflow) {
2090 Ordinals.push_back(SanitizerKind::SO_SignedIntegerOverflow);
2091 CheckHandler = SanitizerHandler::NegateOverflow;
2092 } else
2093 CheckHandler = SanitizerHandler::SubOverflow;
2094
2095 SanitizerDebugLocation SanScope(&CGF, Ordinals, CheckHandler);
2096
2097 Constant *Zero = Constant::getNullValue(ArgValue->getType());
2098 Value *ResultAndOverflow = CGF.Builder.CreateBinaryIntrinsic(
2099 Intrinsic::ssub_with_overflow, Zero, ArgValue);
2100 Value *Result = CGF.Builder.CreateExtractValue(ResultAndOverflow, 0);
2101 Value *NotOverflow = CGF.Builder.CreateNot(
2102 CGF.Builder.CreateExtractValue(ResultAndOverflow, 1));
2103
2104 // TODO: support -ftrapv-handler.
2105 if (SanitizeOverflow) {
2106 CGF.EmitCheck({{NotOverflow, SanitizerKind::SO_SignedIntegerOverflow}},
2107 CheckHandler,
2110 {ArgValue});
2111 } else
2112 CGF.EmitTrapCheck(NotOverflow, CheckHandler);
2113
2114 Value *CmpResult = CGF.Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
2115 return CGF.Builder.CreateSelect(CmpResult, Result, ArgValue, "abs");
2116}
2117
2118/// Get the argument type for arguments to os_log_helper.
2120 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
2121 return C.getCanonicalType(UnsignedTy);
2122}
2123
2126 CharUnits BufferAlignment) {
2127 ASTContext &Ctx = getContext();
2128
2130 {
2131 raw_svector_ostream OS(Name);
2132 OS << "__os_log_helper";
2133 OS << "_" << BufferAlignment.getQuantity();
2134 OS << "_" << int(Layout.getSummaryByte());
2135 OS << "_" << int(Layout.getNumArgsByte());
2136 for (const auto &Item : Layout.Items)
2137 OS << "_" << int(Item.getSizeByte()) << "_"
2138 << int(Item.getDescriptorByte());
2139 }
2140
2141 if (llvm::Function *F = CGM.getModule().getFunction(Name))
2142 return F;
2143
2145 FunctionArgList Args;
2146 Args.push_back(ImplicitParamDecl::Create(
2147 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
2149 ArgTys.emplace_back(Ctx.VoidPtrTy);
2150
2151 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
2152 char Size = Layout.Items[I].getSizeByte();
2153 if (!Size)
2154 continue;
2155
2156 QualType ArgTy = getOSLogArgType(Ctx, Size);
2157 Args.push_back(ImplicitParamDecl::Create(
2158 Ctx, nullptr, SourceLocation(),
2159 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
2161 ArgTys.emplace_back(ArgTy);
2162 }
2163
2164 QualType ReturnTy = Ctx.VoidTy;
2165
2166 // The helper function has linkonce_odr linkage to enable the linker to merge
2167 // identical functions. To ensure the merging always happens, 'noinline' is
2168 // attached to the function when compiling with -Oz.
2169 const CGFunctionInfo &FI =
2170 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
2171 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
2172 llvm::Function *Fn = llvm::Function::Create(
2173 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
2174 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
2175 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn, /*IsThunk=*/false);
2176 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
2177 Fn->setDoesNotThrow();
2178
2179 // Attach 'noinline' at -Oz.
2180 if (CGM.getCodeGenOpts().OptimizeSize == 2)
2181 Fn->addFnAttr(llvm::Attribute::NoInline);
2182
2183 auto NL = ApplyDebugLocation::CreateEmpty(*this);
2184 StartFunction(GlobalDecl(), ReturnTy, Fn, FI, Args);
2185
2186 // Create a scope with an artificial location for the body of this function.
2187 auto AL = ApplyDebugLocation::CreateArtificial(*this);
2188
2189 CharUnits Offset;
2191 Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), Ctx.VoidTy,
2192 BufferAlignment);
2193 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
2194 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
2195 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
2196 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
2197
2198 unsigned I = 1;
2199 for (const auto &Item : Layout.Items) {
2200 Builder.CreateStore(
2201 Builder.getInt8(Item.getDescriptorByte()),
2202 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
2203 Builder.CreateStore(
2204 Builder.getInt8(Item.getSizeByte()),
2205 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
2206
2207 CharUnits Size = Item.size();
2208 if (!Size.getQuantity())
2209 continue;
2210
2211 Address Arg = GetAddrOfLocalVar(Args[I]);
2212 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
2213 Addr = Addr.withElementType(Arg.getElementType());
2214 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
2215 Offset += Size;
2216 ++I;
2217 }
2218
2220
2221 return Fn;
2222}
2223
2225 assert(E.getNumArgs() >= 2 &&
2226 "__builtin_os_log_format takes at least 2 arguments");
2227 ASTContext &Ctx = getContext();
2230 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
2231
2232 // Ignore argument 1, the format string. It is not currently used.
2233 CallArgList Args;
2234 Args.add(RValue::get(BufAddr.emitRawPointer(*this)), Ctx.VoidPtrTy);
2235
2236 for (const auto &Item : Layout.Items) {
2237 int Size = Item.getSizeByte();
2238 if (!Size)
2239 continue;
2240
2241 llvm::Value *ArgVal;
2242
2243 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
2244 uint64_t Val = 0;
2245 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
2246 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
2247 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
2248 } else if (const Expr *TheExpr = Item.getExpr()) {
2249 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
2250
2251 // If a temporary object that requires destruction after the full
2252 // expression is passed, push a lifetime-extended cleanup to extend its
2253 // lifetime to the end of the enclosing block scope.
2254 auto LifetimeExtendObject = [&](const Expr *E) {
2255 E = E->IgnoreParenCasts();
2256 // Extend lifetimes of objects returned by function calls and message
2257 // sends.
2258
2259 // FIXME: We should do this in other cases in which temporaries are
2260 // created including arguments of non-ARC types (e.g., C++
2261 // temporaries).
2263 return true;
2264 return false;
2265 };
2266
2267 if (TheExpr->getType()->isObjCRetainableType() &&
2268 getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
2269 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
2270 "Only scalar can be a ObjC retainable type");
2271 if (!isa<Constant>(ArgVal)) {
2272 CleanupKind Cleanup = getARCCleanupKind();
2273 QualType Ty = TheExpr->getType();
2275 RawAddress Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
2276 ArgVal = EmitARCRetain(Ty, ArgVal);
2277 Builder.CreateStore(ArgVal, Addr);
2278 pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
2280 Cleanup & EHCleanup);
2281
2282 // Push a clang.arc.use call to ensure ARC optimizer knows that the
2283 // argument has to be alive.
2284 if (CGM.getCodeGenOpts().OptimizationLevel != 0)
2286 }
2287 }
2288 } else {
2289 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
2290 }
2291
2292 unsigned ArgValSize =
2293 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
2294 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
2295 ArgValSize);
2296 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
2297 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
2298 // If ArgVal has type x86_fp80, zero-extend ArgVal.
2299 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
2300 Args.add(RValue::get(ArgVal), ArgTy);
2301 }
2302
2303 const CGFunctionInfo &FI =
2304 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
2305 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
2306 Layout, BufAddr.getAlignment());
2308 return RValue::get(BufAddr, *this);
2309}
2310
2312 unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info,
2313 WidthAndSignedness ResultInfo) {
2314 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2315 Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width &&
2316 !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed;
2317}
2318
2320 CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info,
2321 const clang::Expr *Op2, WidthAndSignedness Op2Info,
2322 const clang::Expr *ResultArg, QualType ResultQTy,
2323 WidthAndSignedness ResultInfo) {
2325 Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&
2326 "Cannot specialize this multiply");
2327
2328 llvm::Value *V1 = CGF.EmitScalarExpr(Op1);
2329 llvm::Value *V2 = CGF.EmitScalarExpr(Op2);
2330
2331 llvm::Value *HasOverflow;
2332 llvm::Value *Result = EmitOverflowIntrinsic(
2333 CGF, Intrinsic::umul_with_overflow, V1, V2, HasOverflow);
2334
2335 // The intrinsic call will detect overflow when the value is > UINT_MAX,
2336 // however, since the original builtin had a signed result, we need to report
2337 // an overflow when the result is greater than INT_MAX.
2338 auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width);
2339 llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax);
2340
2341 llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(Result, IntMaxValue);
2342 HasOverflow = CGF.Builder.CreateOr(HasOverflow, IntMaxOverflow);
2343
2344 bool isVolatile =
2345 ResultArg->getType()->getPointeeType().isVolatileQualified();
2346 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2347 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2348 isVolatile);
2349 return RValue::get(HasOverflow);
2350}
2351
2352/// Determine if a binop is a checked mixed-sign multiply we can specialize.
2353static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
2354 WidthAndSignedness Op1Info,
2355 WidthAndSignedness Op2Info,
2356 WidthAndSignedness ResultInfo) {
2357 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2358 std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
2359 Op1Info.Signed != Op2Info.Signed;
2360}
2361
2362/// Emit a checked mixed-sign multiply. This is a cheaper specialization of
2363/// the generic checked-binop irgen.
2364static RValue
2366 WidthAndSignedness Op1Info, const clang::Expr *Op2,
2367 WidthAndSignedness Op2Info,
2368 const clang::Expr *ResultArg, QualType ResultQTy,
2369 WidthAndSignedness ResultInfo) {
2370 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,
2371 Op2Info, ResultInfo) &&
2372 "Not a mixed-sign multipliction we can specialize");
2373
2374 // Emit the signed and unsigned operands.
2375 const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
2376 const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
2377 llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
2378 llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
2379 unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
2380 unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
2381
2382 // One of the operands may be smaller than the other. If so, [s|z]ext it.
2383 if (SignedOpWidth < UnsignedOpWidth)
2384 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
2385 if (UnsignedOpWidth < SignedOpWidth)
2386 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
2387
2388 llvm::Type *OpTy = Signed->getType();
2389 llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
2390 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2391 llvm::Type *ResTy = CGF.getTypes().ConvertType(ResultQTy);
2392 unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
2393
2394 // Take the absolute value of the signed operand.
2395 llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
2396 llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
2397 llvm::Value *AbsSigned =
2398 CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
2399
2400 // Perform a checked unsigned multiplication.
2401 llvm::Value *UnsignedOverflow;
2402 llvm::Value *UnsignedResult =
2403 EmitOverflowIntrinsic(CGF, Intrinsic::umul_with_overflow, AbsSigned,
2404 Unsigned, UnsignedOverflow);
2405
2406 llvm::Value *Overflow, *Result;
2407 if (ResultInfo.Signed) {
2408 // Signed overflow occurs if the result is greater than INT_MAX or lesser
2409 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
2410 auto IntMax =
2411 llvm::APInt::getSignedMaxValue(ResultInfo.Width).zext(OpWidth);
2412 llvm::Value *MaxResult =
2413 CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
2414 CGF.Builder.CreateZExt(IsNegative, OpTy));
2415 llvm::Value *SignedOverflow =
2416 CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
2417 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
2418
2419 // Prepare the signed result (possibly by negating it).
2420 llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
2421 llvm::Value *SignedResult =
2422 CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
2423 Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
2424 } else {
2425 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
2426 llvm::Value *Underflow = CGF.Builder.CreateAnd(
2427 IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
2428 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
2429 if (ResultInfo.Width < OpWidth) {
2430 auto IntMax =
2431 llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
2432 llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
2433 UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
2434 Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
2435 }
2436
2437 // Negate the product if it would be negative in infinite precision.
2438 Result = CGF.Builder.CreateSelect(
2439 IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
2440
2441 Result = CGF.Builder.CreateTrunc(Result, ResTy);
2442 }
2443 assert(Overflow && Result && "Missing overflow or result");
2444
2445 bool isVolatile =
2446 ResultArg->getType()->getPointeeType().isVolatileQualified();
2447 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2448 isVolatile);
2449 return RValue::get(Overflow);
2450}
2451
2452static bool
2454 llvm::SmallPtrSetImpl<const Decl *> &Seen) {
2455 if (const auto *Arr = Ctx.getAsArrayType(Ty))
2456 Ty = Ctx.getBaseElementType(Arr);
2457
2458 const auto *Record = Ty->getAsCXXRecordDecl();
2459 if (!Record)
2460 return false;
2461
2462 // We've already checked this type, or are in the process of checking it.
2463 if (!Seen.insert(Record).second)
2464 return false;
2465
2466 assert(Record->hasDefinition() &&
2467 "Incomplete types should already be diagnosed");
2468
2469 if (Record->isDynamicClass())
2470 return true;
2471
2472 for (FieldDecl *F : Record->fields()) {
2473 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
2474 return true;
2475 }
2476 return false;
2477}
2478
2479/// Determine if the specified type requires laundering by checking if it is a
2480/// dynamic class type or contains a subobject which is a dynamic class type.
2482 if (!CGM.getCodeGenOpts().StrictVTablePointers)
2483 return false;
2485 return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
2486}
2487
2488RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
2489 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
2490 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
2491
2492 // The builtin's shift arg may have a different type than the source arg and
2493 // result, but the LLVM intrinsic uses the same type for all values.
2494 llvm::Type *Ty = Src->getType();
2495 llvm::Type *ShiftTy = ShiftAmt->getType();
2496
2497 unsigned BitWidth = Ty->getIntegerBitWidth();
2498
2499 // Normalize shift amount to [0, BitWidth) range to match runtime behavior.
2500 // This matches the algorithm in ExprConstant.cpp for constant evaluation.
2501 if (BitWidth == 1) {
2502 // Rotating a 1-bit value is always a no-op
2503 ShiftAmt = ConstantInt::get(ShiftTy, 0);
2504 } else if (BitWidth == 2) {
2505 // For 2-bit values: rotation amount is 0 or 1 based on
2506 // whether the amount is even or odd. We can't use srem here because
2507 // the divisor (2) would be misinterpreted as -2 in 2-bit signed arithmetic.
2508 llvm::Value *One = ConstantInt::get(ShiftTy, 1);
2509 ShiftAmt = Builder.CreateAnd(ShiftAmt, One);
2510 } else {
2511 unsigned ShiftAmtBitWidth = ShiftTy->getIntegerBitWidth();
2512 bool ShiftAmtIsSigned = E->getArg(1)->getType()->isSignedIntegerType();
2513
2514 // Choose the wider type for the divisor to avoid truncation
2515 llvm::Type *DivisorTy = ShiftAmtBitWidth > BitWidth ? ShiftTy : Ty;
2516 llvm::Value *Divisor = ConstantInt::get(DivisorTy, BitWidth);
2517
2518 // Extend ShiftAmt to match Divisor width if needed
2519 if (ShiftAmtBitWidth < DivisorTy->getIntegerBitWidth()) {
2520 ShiftAmt = Builder.CreateIntCast(ShiftAmt, DivisorTy, ShiftAmtIsSigned);
2521 }
2522
2523 // Normalize to [0, BitWidth)
2524 llvm::Value *RemResult;
2525 if (ShiftAmtIsSigned) {
2526 RemResult = Builder.CreateSRem(ShiftAmt, Divisor);
2527 // Signed remainder can be negative, convert to positive equivalent
2528 llvm::Value *Zero = ConstantInt::get(DivisorTy, 0);
2529 llvm::Value *IsNegative = Builder.CreateICmpSLT(RemResult, Zero);
2530 llvm::Value *PositiveShift = Builder.CreateAdd(RemResult, Divisor);
2531 ShiftAmt = Builder.CreateSelect(IsNegative, PositiveShift, RemResult);
2532 } else {
2533 ShiftAmt = Builder.CreateURem(ShiftAmt, Divisor);
2534 }
2535 }
2536
2537 // Convert to the source type if needed
2538 if (ShiftAmt->getType() != Ty) {
2539 ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
2540 }
2541
2542 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
2543 unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
2544 Function *F = CGM.getIntrinsic(IID, Ty);
2545 return RValue::get(Builder.CreateCall(F, {Src, Src, ShiftAmt}));
2546}
2547
2548// Map math builtins for long-double to f128 version.
2549static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {
2550 switch (BuiltinID) {
2551#define MUTATE_LDBL(func) \
2552 case Builtin::BI__builtin_##func##l: \
2553 return Builtin::BI__builtin_##func##f128;
2584 MUTATE_LDBL(nans)
2585 MUTATE_LDBL(inf)
2604 MUTATE_LDBL(huge_val)
2614#undef MUTATE_LDBL
2615 default:
2616 return BuiltinID;
2617 }
2618}
2619
2620static Value *tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID,
2621 Value *V) {
2622 if (CGF.Builder.getIsFPConstrained() &&
2623 CGF.Builder.getDefaultConstrainedExcept() != fp::ebIgnore) {
2624 if (Value *Result =
2625 CGF.getTargetHooks().testFPKind(V, BuiltinID, CGF.Builder, CGF.CGM))
2626 return Result;
2627 }
2628 return nullptr;
2629}
2630
2632 const FunctionDecl *FD) {
2633 auto Name = FD->getNameAsString() + "__hipstdpar_unsupported";
2634 auto FnTy = CGF->CGM.getTypes().GetFunctionType(FD);
2635 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2636
2638 for (auto &&FormalTy : FnTy->params())
2639 Args.push_back(llvm::PoisonValue::get(FormalTy));
2640
2641 return RValue::get(CGF->Builder.CreateCall(UBF, Args));
2642}
2643
2644// stdc_{leading,trailing}_{zeros,ones} and stdc_count_ones: counts bits using
2645// ctlz, cttz, or ctpop (IsPop). InvertArg flips the input to count the
2646// opposite bit value.
2648 Intrinsic::ID IntID,
2649 bool InvertArg, bool IsPop) {
2650 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2651 llvm::Type *ArgType = ArgValue->getType();
2652 llvm::Type *ResultType = ConvertType(E->getType());
2653 Value *ActualArg = InvertArg ? Builder.CreateNot(ArgValue) : ArgValue;
2654 Function *F = CGM.getIntrinsic(IntID, ArgType);
2655 Value *Result = IsPop
2656 ? Builder.CreateCall(F, ActualArg)
2657 : Builder.CreateCall(F, {ActualArg, Builder.getFalse()});
2658 if (Result->getType() != ResultType)
2659 Result = Builder.CreateIntCast(Result, ResultType, false);
2660 return RValue::get(Result);
2661}
2662
2663// stdc_count_zeros (BitWidth - ctpop) and stdc_bit_width (BitWidth - ctlz).
2664// IsPop selects ctpop; otherwise ctlz is used.
2666 Intrinsic::ID IntID, bool IsPop) {
2667 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2668 llvm::Type *ArgType = ArgValue->getType();
2669 llvm::Type *ResultType = ConvertType(E->getType());
2670 unsigned BitWidth = ArgType->getIntegerBitWidth();
2671 Function *F = CGM.getIntrinsic(IntID, ArgType);
2672 Value *Cnt = IsPop ? Builder.CreateCall(F, ArgValue)
2673 : Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
2674 Value *Result = Builder.CreateSub(ConstantInt::get(ArgType, BitWidth), Cnt);
2675 if (Result->getType() != ResultType)
2676 Result = Builder.CreateIntCast(Result, ResultType, false);
2677 return RValue::get(Result);
2678}
2679
2680// stdc_first_{leading,trailing}_{zero,one}: returns the 1-based position of
2681// the first matching bit, or 0 if no such bit exists. InvertArg flips the
2682// input to search for zeros instead of ones.
2684 bool InvertArg) {
2685 Value *ArgValue = EmitScalarExpr(E->getArg(0));
2686 llvm::Type *ArgType = ArgValue->getType();
2687 llvm::Type *ResultType = ConvertType(E->getType());
2688 Value *Zero = ConstantInt::get(ArgType, 0);
2689 Value *One = ConstantInt::get(ArgType, 1);
2690 Value *ActualArg = InvertArg ? Builder.CreateNot(ArgValue) : ArgValue;
2691 Function *F = CGM.getIntrinsic(IntID, ArgType);
2692 Value *Cnt = Builder.CreateCall(F, {ActualArg, Builder.getFalse()});
2693 Value *Tmp = Builder.CreateAdd(Cnt, One);
2694 Value *IsZero = Builder.CreateICmpEQ(ActualArg, Zero);
2695 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp);
2696 if (Result->getType() != ResultType)
2697 Result = Builder.CreateIntCast(Result, ResultType, false);
2698 return RValue::get(Result);
2699}
2700
2702 const CallExpr *E,
2704 assert(!getContext().BuiltinInfo.isImmediate(BuiltinID) &&
2705 "Should not codegen for consteval builtins");
2706
2707 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
2708 // See if we can constant fold this builtin. If so, don't emit it at all.
2709 // TODO: Extend this handling to all builtin calls that we can constant-fold.
2711 if (E->isPRValue() && E->EvaluateAsRValue(Result, CGM.getContext()) &&
2712 !Result.hasSideEffects()) {
2713 if (Result.Val.isInt())
2714 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
2715 Result.Val.getInt()));
2716 if (Result.Val.isFloat())
2717 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
2718 Result.Val.getFloat()));
2719 }
2720
2721 // If current long-double semantics is IEEE 128-bit, replace math builtins
2722 // of long-double with f128 equivalent.
2723 // TODO: This mutation should also be applied to other targets other than PPC,
2724 // after backend supports IEEE 128-bit style libcalls.
2725 if (getTarget().getTriple().isPPC64() &&
2726 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
2727 BuiltinID = mutateLongDoubleBuiltin(BuiltinID);
2728
2729 // If the builtin has been declared explicitly with an assembler label,
2730 // disable the specialized emitting below. Ideally we should communicate the
2731 // rename in IR, or at least avoid generating the intrinsic calls that are
2732 // likely to get lowered to the renamed library functions.
2733 const unsigned BuiltinIDIfNoAsmLabel =
2734 FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID;
2735
2736 std::optional<bool> ErrnoOverriden;
2737 // ErrnoOverriden is true if math-errno is overriden via the
2738 // '#pragma float_control(precise, on)'. This pragma disables fast-math,
2739 // which implies math-errno.
2740 if (E->hasStoredFPFeatures()) {
2742 if (OP.hasMathErrnoOverride())
2743 ErrnoOverriden = OP.getMathErrnoOverride();
2744 }
2745 // True if 'attribute__((optnone))' is used. This attribute overrides
2746 // fast-math which implies math-errno.
2747 bool OptNone = CurFuncDecl && CurFuncDecl->hasAttr<OptimizeNoneAttr>();
2748
2749 bool IsOptimizationEnabled = CGM.getCodeGenOpts().OptimizationLevel != 0;
2750
2751 bool GenerateFPMathIntrinsics =
2753 BuiltinID, CGM.getTriple(), ErrnoOverriden, getLangOpts().MathErrno,
2754 OptNone, IsOptimizationEnabled);
2755
2756 if (GenerateFPMathIntrinsics) {
2757 switch (BuiltinIDIfNoAsmLabel) {
2758 case Builtin::BIacos:
2759 case Builtin::BIacosf:
2760 case Builtin::BIacosl:
2761 case Builtin::BI__builtin_acos:
2762 case Builtin::BI__builtin_acosf:
2763 case Builtin::BI__builtin_acosf16:
2764 case Builtin::BI__builtin_acosl:
2765 case Builtin::BI__builtin_acosf128:
2766 case Builtin::BI__builtin_elementwise_acos:
2768 *this, E, Intrinsic::acos, Intrinsic::experimental_constrained_acos));
2769
2770 case Builtin::BIasin:
2771 case Builtin::BIasinf:
2772 case Builtin::BIasinl:
2773 case Builtin::BI__builtin_asin:
2774 case Builtin::BI__builtin_asinf:
2775 case Builtin::BI__builtin_asinf16:
2776 case Builtin::BI__builtin_asinl:
2777 case Builtin::BI__builtin_asinf128:
2778 case Builtin::BI__builtin_elementwise_asin:
2780 *this, E, Intrinsic::asin, Intrinsic::experimental_constrained_asin));
2781
2782 case Builtin::BIatan:
2783 case Builtin::BIatanf:
2784 case Builtin::BIatanl:
2785 case Builtin::BI__builtin_atan:
2786 case Builtin::BI__builtin_atanf:
2787 case Builtin::BI__builtin_atanf16:
2788 case Builtin::BI__builtin_atanl:
2789 case Builtin::BI__builtin_atanf128:
2790 case Builtin::BI__builtin_elementwise_atan:
2792 *this, E, Intrinsic::atan, Intrinsic::experimental_constrained_atan));
2793
2794 case Builtin::BIatan2:
2795 case Builtin::BIatan2f:
2796 case Builtin::BIatan2l:
2797 case Builtin::BI__builtin_atan2:
2798 case Builtin::BI__builtin_atan2f:
2799 case Builtin::BI__builtin_atan2f16:
2800 case Builtin::BI__builtin_atan2l:
2801 case Builtin::BI__builtin_atan2f128:
2802 case Builtin::BI__builtin_elementwise_atan2:
2804 *this, E, Intrinsic::atan2,
2805 Intrinsic::experimental_constrained_atan2));
2806
2807 case Builtin::BIceil:
2808 case Builtin::BIceilf:
2809 case Builtin::BIceill:
2810 case Builtin::BI__builtin_ceil:
2811 case Builtin::BI__builtin_ceilf:
2812 case Builtin::BI__builtin_ceilf16:
2813 case Builtin::BI__builtin_ceill:
2814 case Builtin::BI__builtin_ceilf128:
2815 case Builtin::BI__builtin_elementwise_ceil:
2817 Intrinsic::ceil,
2818 Intrinsic::experimental_constrained_ceil));
2819
2820 case Builtin::BIcopysign:
2821 case Builtin::BIcopysignf:
2822 case Builtin::BIcopysignl:
2823 case Builtin::BI__builtin_copysign:
2824 case Builtin::BI__builtin_copysignf:
2825 case Builtin::BI__builtin_copysignf16:
2826 case Builtin::BI__builtin_copysignl:
2827 case Builtin::BI__builtin_copysignf128:
2828 return RValue::get(
2829 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::copysign));
2830
2831 case Builtin::BIcos:
2832 case Builtin::BIcosf:
2833 case Builtin::BIcosl:
2834 case Builtin::BI__builtin_cos:
2835 case Builtin::BI__builtin_cosf:
2836 case Builtin::BI__builtin_cosf16:
2837 case Builtin::BI__builtin_cosl:
2838 case Builtin::BI__builtin_cosf128:
2839 case Builtin::BI__builtin_elementwise_cos:
2841 Intrinsic::cos,
2842 Intrinsic::experimental_constrained_cos));
2843
2844 case Builtin::BIcosh:
2845 case Builtin::BIcoshf:
2846 case Builtin::BIcoshl:
2847 case Builtin::BI__builtin_cosh:
2848 case Builtin::BI__builtin_coshf:
2849 case Builtin::BI__builtin_coshf16:
2850 case Builtin::BI__builtin_coshl:
2851 case Builtin::BI__builtin_coshf128:
2852 case Builtin::BI__builtin_elementwise_cosh:
2854 *this, E, Intrinsic::cosh, Intrinsic::experimental_constrained_cosh));
2855
2856 case Builtin::BIexp:
2857 case Builtin::BIexpf:
2858 case Builtin::BIexpl:
2859 case Builtin::BI__builtin_exp:
2860 case Builtin::BI__builtin_expf:
2861 case Builtin::BI__builtin_expf16:
2862 case Builtin::BI__builtin_expl:
2863 case Builtin::BI__builtin_expf128:
2864 case Builtin::BI__builtin_elementwise_exp:
2866 Intrinsic::exp,
2867 Intrinsic::experimental_constrained_exp));
2868
2869 case Builtin::BIexp2:
2870 case Builtin::BIexp2f:
2871 case Builtin::BIexp2l:
2872 case Builtin::BI__builtin_exp2:
2873 case Builtin::BI__builtin_exp2f:
2874 case Builtin::BI__builtin_exp2f16:
2875 case Builtin::BI__builtin_exp2l:
2876 case Builtin::BI__builtin_exp2f128:
2877 case Builtin::BI__builtin_elementwise_exp2:
2879 Intrinsic::exp2,
2880 Intrinsic::experimental_constrained_exp2));
2881 case Builtin::BI__builtin_exp10:
2882 case Builtin::BI__builtin_exp10f:
2883 case Builtin::BI__builtin_exp10f16:
2884 case Builtin::BI__builtin_exp10l:
2885 case Builtin::BI__builtin_exp10f128:
2886 case Builtin::BI__builtin_elementwise_exp10: {
2887 // TODO: strictfp support
2888 if (Builder.getIsFPConstrained())
2889 break;
2890 return RValue::get(
2891 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::exp10));
2892 }
2893 case Builtin::BIfabs:
2894 case Builtin::BIfabsf:
2895 case Builtin::BIfabsl:
2896 case Builtin::BI__builtin_fabs:
2897 case Builtin::BI__builtin_fabsf:
2898 case Builtin::BI__builtin_fabsf16:
2899 case Builtin::BI__builtin_fabsl:
2900 case Builtin::BI__builtin_fabsf128:
2901 return RValue::get(
2902 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::fabs));
2903
2904 case Builtin::BIfloor:
2905 case Builtin::BIfloorf:
2906 case Builtin::BIfloorl:
2907 case Builtin::BI__builtin_floor:
2908 case Builtin::BI__builtin_floorf:
2909 case Builtin::BI__builtin_floorf16:
2910 case Builtin::BI__builtin_floorl:
2911 case Builtin::BI__builtin_floorf128:
2912 case Builtin::BI__builtin_elementwise_floor:
2914 Intrinsic::floor,
2915 Intrinsic::experimental_constrained_floor));
2916
2917 case Builtin::BIfma:
2918 case Builtin::BIfmaf:
2919 case Builtin::BIfmal:
2920 case Builtin::BI__builtin_fma:
2921 case Builtin::BI__builtin_fmaf:
2922 case Builtin::BI__builtin_fmaf16:
2923 case Builtin::BI__builtin_fmal:
2924 case Builtin::BI__builtin_fmaf128:
2925 case Builtin::BI__builtin_elementwise_fma:
2927 Intrinsic::fma,
2928 Intrinsic::experimental_constrained_fma));
2929
2930 case Builtin::BIfmax:
2931 case Builtin::BIfmaxf:
2932 case Builtin::BIfmaxl:
2933 case Builtin::BI__builtin_fmax:
2934 case Builtin::BI__builtin_fmaxf:
2935 case Builtin::BI__builtin_fmaxf16:
2936 case Builtin::BI__builtin_fmaxl:
2937 case Builtin::BI__builtin_fmaxf128: {
2938 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
2939 Builder.getFastMathFlags().setNoSignedZeros();
2941 *this, E, Intrinsic::maxnum,
2942 Intrinsic::experimental_constrained_maxnum));
2943 }
2944
2945 case Builtin::BIfmin:
2946 case Builtin::BIfminf:
2947 case Builtin::BIfminl:
2948 case Builtin::BI__builtin_fmin:
2949 case Builtin::BI__builtin_fminf:
2950 case Builtin::BI__builtin_fminf16:
2951 case Builtin::BI__builtin_fminl:
2952 case Builtin::BI__builtin_fminf128: {
2953 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
2954 Builder.getFastMathFlags().setNoSignedZeros();
2956 *this, E, Intrinsic::minnum,
2957 Intrinsic::experimental_constrained_minnum));
2958 }
2959
2960 case Builtin::BIfmaximum_num:
2961 case Builtin::BIfmaximum_numf:
2962 case Builtin::BIfmaximum_numl:
2963 case Builtin::BI__builtin_fmaximum_num:
2964 case Builtin::BI__builtin_fmaximum_numf:
2965 case Builtin::BI__builtin_fmaximum_numf16:
2966 case Builtin::BI__builtin_fmaximum_numl:
2967 case Builtin::BI__builtin_fmaximum_numf128:
2968 return RValue::get(
2969 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::maximumnum));
2970
2971 case Builtin::BIfminimum_num:
2972 case Builtin::BIfminimum_numf:
2973 case Builtin::BIfminimum_numl:
2974 case Builtin::BI__builtin_fminimum_num:
2975 case Builtin::BI__builtin_fminimum_numf:
2976 case Builtin::BI__builtin_fminimum_numf16:
2977 case Builtin::BI__builtin_fminimum_numl:
2978 case Builtin::BI__builtin_fminimum_numf128:
2979 return RValue::get(
2980 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::minimumnum));
2981
2982 // fmod() is a special-case. It maps to the frem instruction rather than an
2983 // LLVM intrinsic.
2984 case Builtin::BIfmod:
2985 case Builtin::BIfmodf:
2986 case Builtin::BIfmodl:
2987 case Builtin::BI__builtin_fmod:
2988 case Builtin::BI__builtin_fmodf:
2989 case Builtin::BI__builtin_fmodf16:
2990 case Builtin::BI__builtin_fmodl:
2991 case Builtin::BI__builtin_fmodf128:
2992 case Builtin::BI__builtin_elementwise_fmod: {
2993 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2994 Value *Arg1 = EmitScalarExpr(E->getArg(0));
2995 Value *Arg2 = EmitScalarExpr(E->getArg(1));
2996 if (Builder.getIsFPConstrained()) {
2997 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_frem,
2998 Arg1->getType());
2999 return RValue::get(Builder.CreateConstrainedFPCall(F, {Arg1, Arg2}));
3000 } else {
3001 return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
3002 }
3003 }
3004
3005 case Builtin::BIlog:
3006 case Builtin::BIlogf:
3007 case Builtin::BIlogl:
3008 case Builtin::BI__builtin_log:
3009 case Builtin::BI__builtin_logf:
3010 case Builtin::BI__builtin_logf16:
3011 case Builtin::BI__builtin_logl:
3012 case Builtin::BI__builtin_logf128:
3013 case Builtin::BI__builtin_elementwise_log:
3015 Intrinsic::log,
3016 Intrinsic::experimental_constrained_log));
3017
3018 case Builtin::BIlog10:
3019 case Builtin::BIlog10f:
3020 case Builtin::BIlog10l:
3021 case Builtin::BI__builtin_log10:
3022 case Builtin::BI__builtin_log10f:
3023 case Builtin::BI__builtin_log10f16:
3024 case Builtin::BI__builtin_log10l:
3025 case Builtin::BI__builtin_log10f128:
3026 case Builtin::BI__builtin_elementwise_log10:
3028 Intrinsic::log10,
3029 Intrinsic::experimental_constrained_log10));
3030
3031 case Builtin::BIlog2:
3032 case Builtin::BIlog2f:
3033 case Builtin::BIlog2l:
3034 case Builtin::BI__builtin_log2:
3035 case Builtin::BI__builtin_log2f:
3036 case Builtin::BI__builtin_log2f16:
3037 case Builtin::BI__builtin_log2l:
3038 case Builtin::BI__builtin_log2f128:
3039 case Builtin::BI__builtin_elementwise_log2:
3041 Intrinsic::log2,
3042 Intrinsic::experimental_constrained_log2));
3043
3044 case Builtin::BInearbyint:
3045 case Builtin::BInearbyintf:
3046 case Builtin::BInearbyintl:
3047 case Builtin::BI__builtin_nearbyint:
3048 case Builtin::BI__builtin_nearbyintf:
3049 case Builtin::BI__builtin_nearbyintl:
3050 case Builtin::BI__builtin_nearbyintf128:
3051 case Builtin::BI__builtin_elementwise_nearbyint:
3053 Intrinsic::nearbyint,
3054 Intrinsic::experimental_constrained_nearbyint));
3055
3056 case Builtin::BIpow:
3057 case Builtin::BIpowf:
3058 case Builtin::BIpowl:
3059 case Builtin::BI__builtin_pow:
3060 case Builtin::BI__builtin_powf:
3061 case Builtin::BI__builtin_powf16:
3062 case Builtin::BI__builtin_powl:
3063 case Builtin::BI__builtin_powf128:
3064 case Builtin::BI__builtin_elementwise_pow:
3066 Intrinsic::pow,
3067 Intrinsic::experimental_constrained_pow));
3068
3069 case Builtin::BIrint:
3070 case Builtin::BIrintf:
3071 case Builtin::BIrintl:
3072 case Builtin::BI__builtin_rint:
3073 case Builtin::BI__builtin_rintf:
3074 case Builtin::BI__builtin_rintf16:
3075 case Builtin::BI__builtin_rintl:
3076 case Builtin::BI__builtin_rintf128:
3077 case Builtin::BI__builtin_elementwise_rint:
3079 Intrinsic::rint,
3080 Intrinsic::experimental_constrained_rint));
3081
3082 case Builtin::BIround:
3083 case Builtin::BIroundf:
3084 case Builtin::BIroundl:
3085 case Builtin::BI__builtin_round:
3086 case Builtin::BI__builtin_roundf:
3087 case Builtin::BI__builtin_roundf16:
3088 case Builtin::BI__builtin_roundl:
3089 case Builtin::BI__builtin_roundf128:
3090 case Builtin::BI__builtin_elementwise_round:
3092 Intrinsic::round,
3093 Intrinsic::experimental_constrained_round));
3094
3095 case Builtin::BIroundeven:
3096 case Builtin::BIroundevenf:
3097 case Builtin::BIroundevenl:
3098 case Builtin::BI__builtin_roundeven:
3099 case Builtin::BI__builtin_roundevenf:
3100 case Builtin::BI__builtin_roundevenf16:
3101 case Builtin::BI__builtin_roundevenl:
3102 case Builtin::BI__builtin_roundevenf128:
3103 case Builtin::BI__builtin_elementwise_roundeven:
3105 Intrinsic::roundeven,
3106 Intrinsic::experimental_constrained_roundeven));
3107
3108 case Builtin::BIsin:
3109 case Builtin::BIsinf:
3110 case Builtin::BIsinl:
3111 case Builtin::BI__builtin_sin:
3112 case Builtin::BI__builtin_sinf:
3113 case Builtin::BI__builtin_sinf16:
3114 case Builtin::BI__builtin_sinl:
3115 case Builtin::BI__builtin_sinf128:
3116 case Builtin::BI__builtin_elementwise_sin:
3118 Intrinsic::sin,
3119 Intrinsic::experimental_constrained_sin));
3120
3121 case Builtin::BIsinh:
3122 case Builtin::BIsinhf:
3123 case Builtin::BIsinhl:
3124 case Builtin::BI__builtin_sinh:
3125 case Builtin::BI__builtin_sinhf:
3126 case Builtin::BI__builtin_sinhf16:
3127 case Builtin::BI__builtin_sinhl:
3128 case Builtin::BI__builtin_sinhf128:
3129 case Builtin::BI__builtin_elementwise_sinh:
3131 *this, E, Intrinsic::sinh, Intrinsic::experimental_constrained_sinh));
3132
3133 case Builtin::BI__builtin_sincospi:
3134 case Builtin::BI__builtin_sincospif:
3135 case Builtin::BI__builtin_sincospil:
3136 if (Builder.getIsFPConstrained())
3137 break; // TODO: Emit constrained sincospi intrinsic once one exists.
3138 emitSincosBuiltin(*this, E, Intrinsic::sincospi);
3139 return RValue::get(nullptr);
3140
3141 case Builtin::BIsincos:
3142 case Builtin::BIsincosf:
3143 case Builtin::BIsincosl:
3144 case Builtin::BI__builtin_sincos:
3145 case Builtin::BI__builtin_sincosf:
3146 case Builtin::BI__builtin_sincosf16:
3147 case Builtin::BI__builtin_sincosl:
3148 case Builtin::BI__builtin_sincosf128:
3149 if (Builder.getIsFPConstrained())
3150 break; // TODO: Emit constrained sincos intrinsic once one exists.
3151 emitSincosBuiltin(*this, E, Intrinsic::sincos);
3152 return RValue::get(nullptr);
3153
3154 case Builtin::BIsqrt:
3155 case Builtin::BIsqrtf:
3156 case Builtin::BIsqrtl:
3157 case Builtin::BI__builtin_sqrt:
3158 case Builtin::BI__builtin_sqrtf:
3159 case Builtin::BI__builtin_sqrtf16:
3160 case Builtin::BI__builtin_sqrtl:
3161 case Builtin::BI__builtin_sqrtf128:
3162 case Builtin::BI__builtin_elementwise_sqrt: {
3164 *this, E, Intrinsic::sqrt, Intrinsic::experimental_constrained_sqrt);
3166 return RValue::get(Call);
3167 }
3168
3169 case Builtin::BItan:
3170 case Builtin::BItanf:
3171 case Builtin::BItanl:
3172 case Builtin::BI__builtin_tan:
3173 case Builtin::BI__builtin_tanf:
3174 case Builtin::BI__builtin_tanf16:
3175 case Builtin::BI__builtin_tanl:
3176 case Builtin::BI__builtin_tanf128:
3177 case Builtin::BI__builtin_elementwise_tan:
3179 *this, E, Intrinsic::tan, Intrinsic::experimental_constrained_tan));
3180
3181 case Builtin::BItanh:
3182 case Builtin::BItanhf:
3183 case Builtin::BItanhl:
3184 case Builtin::BI__builtin_tanh:
3185 case Builtin::BI__builtin_tanhf:
3186 case Builtin::BI__builtin_tanhf16:
3187 case Builtin::BI__builtin_tanhl:
3188 case Builtin::BI__builtin_tanhf128:
3189 case Builtin::BI__builtin_elementwise_tanh:
3191 *this, E, Intrinsic::tanh, Intrinsic::experimental_constrained_tanh));
3192
3193 case Builtin::BItrunc:
3194 case Builtin::BItruncf:
3195 case Builtin::BItruncl:
3196 case Builtin::BI__builtin_trunc:
3197 case Builtin::BI__builtin_truncf:
3198 case Builtin::BI__builtin_truncf16:
3199 case Builtin::BI__builtin_truncl:
3200 case Builtin::BI__builtin_truncf128:
3201 case Builtin::BI__builtin_elementwise_trunc:
3203 Intrinsic::trunc,
3204 Intrinsic::experimental_constrained_trunc));
3205
3206 case Builtin::BIlround:
3207 case Builtin::BIlroundf:
3208 case Builtin::BIlroundl:
3209 case Builtin::BI__builtin_lround:
3210 case Builtin::BI__builtin_lroundf:
3211 case Builtin::BI__builtin_lroundl:
3212 case Builtin::BI__builtin_lroundf128:
3214 *this, E, Intrinsic::lround,
3215 Intrinsic::experimental_constrained_lround));
3216
3217 case Builtin::BIllround:
3218 case Builtin::BIllroundf:
3219 case Builtin::BIllroundl:
3220 case Builtin::BI__builtin_llround:
3221 case Builtin::BI__builtin_llroundf:
3222 case Builtin::BI__builtin_llroundl:
3223 case Builtin::BI__builtin_llroundf128:
3225 *this, E, Intrinsic::llround,
3226 Intrinsic::experimental_constrained_llround));
3227
3228 case Builtin::BIlrint:
3229 case Builtin::BIlrintf:
3230 case Builtin::BIlrintl:
3231 case Builtin::BI__builtin_lrint:
3232 case Builtin::BI__builtin_lrintf:
3233 case Builtin::BI__builtin_lrintl:
3234 case Builtin::BI__builtin_lrintf128:
3236 *this, E, Intrinsic::lrint,
3237 Intrinsic::experimental_constrained_lrint));
3238
3239 case Builtin::BIllrint:
3240 case Builtin::BIllrintf:
3241 case Builtin::BIllrintl:
3242 case Builtin::BI__builtin_llrint:
3243 case Builtin::BI__builtin_llrintf:
3244 case Builtin::BI__builtin_llrintl:
3245 case Builtin::BI__builtin_llrintf128:
3247 *this, E, Intrinsic::llrint,
3248 Intrinsic::experimental_constrained_llrint));
3249 case Builtin::BI__builtin_ldexp:
3250 case Builtin::BI__builtin_ldexpf:
3251 case Builtin::BI__builtin_ldexpl:
3252 case Builtin::BI__builtin_ldexpf16:
3253 case Builtin::BI__builtin_ldexpf128:
3254 case Builtin::BI__builtin_elementwise_ldexp:
3256 *this, E, Intrinsic::ldexp,
3257 Intrinsic::experimental_constrained_ldexp));
3258 default:
3259 break;
3260 }
3261 }
3262
3263 // Check NonnullAttribute/NullabilityArg and Alignment.
3264 auto EmitArgCheck = [&](TypeCheckKind Kind, Address A, const Expr *Arg,
3265 unsigned ParmNum) {
3266 Value *Val = A.emitRawPointer(*this);
3267 EmitNonNullArgCheck(RValue::get(Val), Arg->getType(), Arg->getExprLoc(), FD,
3268 ParmNum);
3269
3270 if (SanOpts.has(SanitizerKind::Alignment)) {
3271 SanitizerSet SkippedChecks;
3272 SkippedChecks.set(SanitizerKind::All);
3273 SkippedChecks.clear(SanitizerKind::Alignment);
3274 SourceLocation Loc = Arg->getExprLoc();
3275 // Strip an implicit cast.
3276 if (auto *CE = dyn_cast<ImplicitCastExpr>(Arg))
3277 if (CE->getCastKind() == CK_BitCast)
3278 Arg = CE->getSubExpr();
3279 EmitTypeCheck(Kind, Loc, Val, Arg->getType(), A.getAlignment(),
3280 SkippedChecks);
3281 }
3282 };
3283
3284 switch (BuiltinIDIfNoAsmLabel) {
3285 default: break;
3286 case Builtin::BI__builtin___CFStringMakeConstantString:
3287 case Builtin::BI__builtin___NSStringMakeConstantString:
3288 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
3289 case Builtin::BI__builtin_stdarg_start:
3290 case Builtin::BI__builtin_va_start:
3291 case Builtin::BI__va_start:
3292 case Builtin::BI__builtin_c23_va_start:
3293 case Builtin::BI__builtin_va_end:
3294 EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
3295 ? EmitScalarExpr(E->getArg(0))
3296 : EmitVAListRef(E->getArg(0)).emitRawPointer(*this),
3297 BuiltinID != Builtin::BI__builtin_va_end);
3298 return RValue::get(nullptr);
3299 case Builtin::BI__builtin_va_copy: {
3300 Value *DstPtr = EmitVAListRef(E->getArg(0)).emitRawPointer(*this);
3301 Value *SrcPtr = EmitVAListRef(E->getArg(1)).emitRawPointer(*this);
3302 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy, {DstPtr->getType()}),
3303 {DstPtr, SrcPtr});
3304 return RValue::get(nullptr);
3305 }
3306 case Builtin::BIabs:
3307 case Builtin::BIlabs:
3308 case Builtin::BIllabs:
3309 case Builtin::BI__builtin_abs:
3310 case Builtin::BI__builtin_labs:
3311 case Builtin::BI__builtin_llabs: {
3312 bool SanitizeOverflow = SanOpts.has(SanitizerKind::SignedIntegerOverflow);
3313
3314 Value *Result;
3315 switch (getLangOpts().getSignedOverflowBehavior()) {
3317 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), false);
3318 break;
3320 if (!SanitizeOverflow) {
3321 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), true);
3322 break;
3323 }
3324 [[fallthrough]];
3326 // TODO: Somehow handle the corner case when the address of abs is taken.
3327 Result = EmitOverflowCheckedAbs(*this, E, SanitizeOverflow);
3328 break;
3329 }
3330 return RValue::get(Result);
3331 }
3332 case Builtin::BI__builtin_complex: {
3333 Value *Real = EmitScalarExpr(E->getArg(0));
3334 Value *Imag = EmitScalarExpr(E->getArg(1));
3335 return RValue::getComplex({Real, Imag});
3336 }
3337 case Builtin::BI__builtin_conj:
3338 case Builtin::BI__builtin_conjf:
3339 case Builtin::BI__builtin_conjl:
3340 case Builtin::BIconj:
3341 case Builtin::BIconjf:
3342 case Builtin::BIconjl: {
3343 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3344 Value *Real = ComplexVal.first;
3345 Value *Imag = ComplexVal.second;
3346 Imag = Builder.CreateFNeg(Imag, "neg");
3347 return RValue::getComplex(std::make_pair(Real, Imag));
3348 }
3349 case Builtin::BI__builtin_creal:
3350 case Builtin::BI__builtin_crealf:
3351 case Builtin::BI__builtin_creall:
3352 case Builtin::BIcreal:
3353 case Builtin::BIcrealf:
3354 case Builtin::BIcreall: {
3355 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3356 return RValue::get(ComplexVal.first);
3357 }
3358
3359 case Builtin::BI__builtin_preserve_access_index: {
3360 // Only enabled preserved access index region when debuginfo
3361 // is available as debuginfo is needed to preserve user-level
3362 // access pattern.
3363 if (!getDebugInfo()) {
3364 CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
3365 return RValue::get(EmitScalarExpr(E->getArg(0)));
3366 }
3367
3368 // Nested builtin_preserve_access_index() not supported
3370 CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
3371 return RValue::get(EmitScalarExpr(E->getArg(0)));
3372 }
3373
3374 IsInPreservedAIRegion = true;
3375 Value *Res = EmitScalarExpr(E->getArg(0));
3376 IsInPreservedAIRegion = false;
3377 return RValue::get(Res);
3378 }
3379
3380 case Builtin::BI__builtin_cimag:
3381 case Builtin::BI__builtin_cimagf:
3382 case Builtin::BI__builtin_cimagl:
3383 case Builtin::BIcimag:
3384 case Builtin::BIcimagf:
3385 case Builtin::BIcimagl: {
3386 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3387 return RValue::get(ComplexVal.second);
3388 }
3389
3390 case Builtin::BI__builtin_clrsb:
3391 case Builtin::BI__builtin_clrsbl:
3392 case Builtin::BI__builtin_clrsbll: {
3393 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
3394 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3395
3396 llvm::Type *ArgType = ArgValue->getType();
3397 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3398
3399 llvm::Type *ResultType = ConvertType(E->getType());
3400 Value *Zero = llvm::Constant::getNullValue(ArgType);
3401 Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
3402 Value *Inverse = Builder.CreateNot(ArgValue, "not");
3403 Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
3404 Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
3405 Value *Result =
3406 Builder.CreateNUWSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
3407 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3408 "cast");
3409 return RValue::get(Result);
3410 }
3411 case Builtin::BI__builtin_ctzs:
3412 case Builtin::BI__builtin_ctz:
3413 case Builtin::BI__builtin_ctzl:
3414 case Builtin::BI__builtin_ctzll:
3415 case Builtin::BI__builtin_ctzg:
3416 case Builtin::BI__builtin_elementwise_ctzg: {
3417 bool HasFallback =
3418 (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_ctzg ||
3419 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_ctzg) &&
3420 E->getNumArgs() > 1;
3421
3422 Value *ArgValue =
3423 HasFallback ? EmitBitCountExpr(*this, E->getArg(0))
3425
3426 llvm::Type *ArgType = ArgValue->getType();
3427 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
3428
3429 llvm::Type *ResultType = ConvertType(E->getType());
3430 // The elementwise builtins always exhibit zero-is-undef behaviour
3431 Value *ZeroUndef = Builder.getInt1(
3432 HasFallback || getTarget().isCLZForZeroUndef() ||
3433 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_ctzg);
3434 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
3435 if (Result->getType() != ResultType)
3436 Result =
3437 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3438 if (!HasFallback)
3439 return RValue::get(Result);
3440
3441 Value *Zero = Constant::getNullValue(ArgType);
3442 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3443 Value *FallbackValue = EmitScalarExpr(E->getArg(1));
3444 Value *ResultOrFallback =
3445 Builder.CreateSelect(IsZero, FallbackValue, Result, "ctzg");
3446 return RValue::get(ResultOrFallback);
3447 }
3448 case Builtin::BI__builtin_clzs:
3449 case Builtin::BI__builtin_clz:
3450 case Builtin::BI__builtin_clzl:
3451 case Builtin::BI__builtin_clzll:
3452 case Builtin::BI__builtin_clzg:
3453 case Builtin::BI__builtin_elementwise_clzg: {
3454 bool HasFallback =
3455 (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_clzg ||
3456 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_clzg) &&
3457 E->getNumArgs() > 1;
3458
3459 Value *ArgValue =
3460 HasFallback ? EmitBitCountExpr(*this, E->getArg(0))
3462
3463 llvm::Type *ArgType = ArgValue->getType();
3464 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3465
3466 llvm::Type *ResultType = ConvertType(E->getType());
3467 // The elementwise builtins always exhibit zero-is-undef behaviour
3468 Value *ZeroUndef = Builder.getInt1(
3469 HasFallback || getTarget().isCLZForZeroUndef() ||
3470 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_clzg);
3471 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
3472 if (Result->getType() != ResultType)
3473 Result =
3474 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3475 if (!HasFallback)
3476 return RValue::get(Result);
3477
3478 Value *Zero = Constant::getNullValue(ArgType);
3479 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3480 Value *FallbackValue = EmitScalarExpr(E->getArg(1));
3481 Value *ResultOrFallback =
3482 Builder.CreateSelect(IsZero, FallbackValue, Result, "clzg");
3483 return RValue::get(ResultOrFallback);
3484 }
3485 case Builtin::BI__builtin_ffs:
3486 case Builtin::BI__builtin_ffsl:
3487 case Builtin::BI__builtin_ffsll: {
3488 // ffs(x) -> x ? cttz(x) + 1 : 0
3489 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3490
3491 llvm::Type *ArgType = ArgValue->getType();
3492 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
3493
3494 llvm::Type *ResultType = ConvertType(E->getType());
3495 Value *Tmp =
3496 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
3497 llvm::ConstantInt::get(ArgType, 1));
3498 Value *Zero = llvm::Constant::getNullValue(ArgType);
3499 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3500 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
3501 if (Result->getType() != ResultType)
3502 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3503 "cast");
3504 return RValue::get(Result);
3505 }
3506 case Builtin::BI__builtin_parity:
3507 case Builtin::BI__builtin_parityl:
3508 case Builtin::BI__builtin_parityll: {
3509 // parity(x) -> ctpop(x) & 1
3510 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3511
3512 llvm::Type *ArgType = ArgValue->getType();
3513 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3514
3515 llvm::Type *ResultType = ConvertType(E->getType());
3516 Value *Tmp = Builder.CreateCall(F, ArgValue);
3517 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
3518 if (Result->getType() != ResultType)
3519 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3520 "cast");
3521 return RValue::get(Result);
3522 }
3523 case Builtin::BI__lzcnt16:
3524 case Builtin::BI__lzcnt:
3525 case Builtin::BI__lzcnt64: {
3526 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3527
3528 llvm::Type *ArgType = ArgValue->getType();
3529 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3530
3531 llvm::Type *ResultType = ConvertType(E->getType());
3532 Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
3533 if (Result->getType() != ResultType)
3534 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3535 "cast");
3536 return RValue::get(Result);
3537 }
3538 case Builtin::BI__popcnt16:
3539 case Builtin::BI__popcnt:
3540 case Builtin::BI__popcnt64:
3541 case Builtin::BI__builtin_popcount:
3542 case Builtin::BI__builtin_popcountl:
3543 case Builtin::BI__builtin_popcountll:
3544 case Builtin::BI__builtin_popcountg: {
3545 Value *ArgValue = EmitBitCountExpr(*this, E->getArg(0));
3546
3547 llvm::Type *ArgType = ArgValue->getType();
3548 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3549
3550 llvm::Type *ResultType = ConvertType(E->getType());
3551 Value *Result = Builder.CreateCall(F, ArgValue);
3552 if (Result->getType() != ResultType)
3553 Result =
3554 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3555 return RValue::get(Result);
3556 }
3557 case Builtin::BI__builtin_unpredictable: {
3558 // Always return the argument of __builtin_unpredictable. LLVM does not
3559 // handle this builtin. Metadata for this builtin should be added directly
3560 // to instructions such as branches or switches that use it.
3561 return RValue::get(EmitScalarExpr(E->getArg(0)));
3562 }
3563 case Builtin::BI__builtin_expect: {
3564 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3565 llvm::Type *ArgType = ArgValue->getType();
3566
3567 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
3568 // Don't generate llvm.expect on -O0 as the backend won't use it for
3569 // anything.
3570 // Note, we still IRGen ExpectedValue because it could have side-effects.
3571 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3572 return RValue::get(ArgValue);
3573
3574 Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
3575 Value *Result =
3576 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
3577 return RValue::get(Result);
3578 }
3579 case Builtin::BI__builtin_expect_with_probability: {
3580 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3581 llvm::Type *ArgType = ArgValue->getType();
3582
3583 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
3584 llvm::APFloat Probability(0.0);
3585 const Expr *ProbArg = E->getArg(2);
3586 bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
3587 assert(EvalSucceed && "probability should be able to evaluate as float");
3588 (void)EvalSucceed;
3589 bool LoseInfo = false;
3590 Probability.convert(llvm::APFloat::IEEEdouble(),
3591 llvm::RoundingMode::Dynamic, &LoseInfo);
3592 llvm::Type *Ty = ConvertType(ProbArg->getType());
3593 Constant *Confidence = ConstantFP::get(Ty, Probability);
3594 // Don't generate llvm.expect.with.probability on -O0 as the backend
3595 // won't use it for anything.
3596 // Note, we still IRGen ExpectedValue because it could have side-effects.
3597 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3598 return RValue::get(ArgValue);
3599
3600 Function *FnExpect =
3601 CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
3602 Value *Result = Builder.CreateCall(
3603 FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
3604 return RValue::get(Result);
3605 }
3606 case Builtin::BI__builtin_assume_aligned: {
3607 const Expr *Ptr = E->getArg(0);
3608 Value *PtrValue = EmitScalarExpr(Ptr);
3609 Value *OffsetValue =
3610 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
3611
3612 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
3613 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
3614 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
3615 AlignmentCI = ConstantInt::get(AlignmentCI->getIntegerType(),
3616 llvm::Value::MaximumAlignment);
3617
3618 emitAlignmentAssumption(PtrValue, Ptr,
3619 /*The expr loc is sufficient.*/ SourceLocation(),
3620 AlignmentCI, OffsetValue);
3621 return RValue::get(PtrValue);
3622 }
3623 case Builtin::BI__builtin_assume_dereferenceable: {
3624 const Expr *Ptr = E->getArg(0);
3625 const Expr *Size = E->getArg(1);
3626 Value *PtrValue = EmitScalarExpr(Ptr);
3627 Value *SizeValue = EmitScalarExpr(Size);
3628 if (SizeValue->getType() != IntPtrTy)
3629 SizeValue =
3630 Builder.CreateIntCast(SizeValue, IntPtrTy, false, "casted.size");
3631 Builder.CreateDereferenceableAssumption(PtrValue, SizeValue);
3632 return RValue::get(nullptr);
3633 }
3634 case Builtin::BI__assume:
3635 case Builtin::BI__builtin_assume: {
3636 if (E->getArg(0)->HasSideEffects(getContext()))
3637 return RValue::get(nullptr);
3638
3639 Value *ArgValue = EmitCheckedArgForAssume(E->getArg(0));
3640 Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
3641 Builder.CreateCall(FnAssume, ArgValue);
3642 return RValue::get(nullptr);
3643 }
3644 case Builtin::BI__builtin_assume_separate_storage: {
3645 const Expr *Arg0 = E->getArg(0);
3646 const Expr *Arg1 = E->getArg(1);
3647
3648 Value *Value0 = EmitScalarExpr(Arg0);
3649 Value *Value1 = EmitScalarExpr(Arg1);
3650
3651 Value *Values[] = {Value0, Value1};
3652 OperandBundleDefT<Value *> OBD("separate_storage", Values);
3653 Builder.CreateAssumption({OBD});
3654 return RValue::get(nullptr);
3655 }
3656 case Builtin::BI__builtin_allow_runtime_check: {
3657 StringRef Kind =
3658 cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts())->getString();
3659 LLVMContext &Ctx = CGM.getLLVMContext();
3660 llvm::Value *Allow = Builder.CreateCall(
3661 CGM.getIntrinsic(Intrinsic::allow_runtime_check),
3662 llvm::MetadataAsValue::get(Ctx, llvm::MDString::get(Ctx, Kind)));
3663 return RValue::get(Allow);
3664 }
3665 case Builtin::BI__builtin_allow_sanitize_check: {
3666 Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
3667 StringRef Name =
3668 cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts())->getString();
3669
3670 // We deliberately allow the use of kernel- and non-kernel names
3671 // interchangably, even when one or the other is enabled. This is consistent
3672 // with the no_sanitize-attribute, which allows either kernel- or non-kernel
3673 // name to disable instrumentation (see CodeGenFunction::StartFunction).
3674 if (getLangOpts().Sanitize.hasOneOf(SanitizerKind::Address |
3675 SanitizerKind::KernelAddress) &&
3676 (Name == "address" || Name == "kernel-address")) {
3677 IntrID = Intrinsic::allow_sanitize_address;
3678 } else if (getLangOpts().Sanitize.has(SanitizerKind::Thread) &&
3679 Name == "thread") {
3680 IntrID = Intrinsic::allow_sanitize_thread;
3681 } else if (getLangOpts().Sanitize.hasOneOf(SanitizerKind::Memory |
3682 SanitizerKind::KernelMemory) &&
3683 (Name == "memory" || Name == "kernel-memory")) {
3684 IntrID = Intrinsic::allow_sanitize_memory;
3685 } else if (getLangOpts().Sanitize.hasOneOf(
3686 SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress) &&
3687 (Name == "hwaddress" || Name == "kernel-hwaddress")) {
3688 IntrID = Intrinsic::allow_sanitize_hwaddress;
3689 }
3690
3691 if (IntrID != Intrinsic::not_intrinsic) {
3692 llvm::Value *Allow = Builder.CreateCall(CGM.getIntrinsic(IntrID));
3693 return RValue::get(Allow);
3694 }
3695 // If the checked sanitizer is not enabled, we can safely lower to false
3696 // right away. This is also more efficient, since the LowerAllowCheckPass
3697 // must not always be enabled if none of the above sanitizers are enabled.
3698 return RValue::get(Builder.getFalse());
3699 }
3700 case Builtin::BI__arithmetic_fence: {
3701 // Create the builtin call if FastMath is selected, and the target
3702 // supports the builtin, otherwise just return the argument.
3703 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3704 llvm::FastMathFlags FMF = Builder.getFastMathFlags();
3705 bool isArithmeticFenceEnabled =
3706 FMF.allowReassoc() &&
3708 QualType ArgType = E->getArg(0)->getType();
3709 if (ArgType->isComplexType()) {
3710 if (isArithmeticFenceEnabled) {
3711 QualType ElementType = ArgType->castAs<ComplexType>()->getElementType();
3712 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3713 Value *Real = Builder.CreateArithmeticFence(ComplexVal.first,
3714 ConvertType(ElementType));
3715 Value *Imag = Builder.CreateArithmeticFence(ComplexVal.second,
3716 ConvertType(ElementType));
3717 return RValue::getComplex(std::make_pair(Real, Imag));
3718 }
3719 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3720 Value *Real = ComplexVal.first;
3721 Value *Imag = ComplexVal.second;
3722 return RValue::getComplex(std::make_pair(Real, Imag));
3723 }
3724 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3725 if (isArithmeticFenceEnabled)
3726 return RValue::get(
3727 Builder.CreateArithmeticFence(ArgValue, ConvertType(ArgType)));
3728 return RValue::get(ArgValue);
3729 }
3730 case Builtin::BI__builtin_bswapg: {
3731 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3732 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(ArgValue->getType());
3733 assert(IntTy && "LLVM's __builtin_bswapg only supports integer variants");
3734 if (IntTy->getBitWidth() == 1 || IntTy->getBitWidth() == 8)
3735 return RValue::get(ArgValue);
3736 assert(((IntTy->getBitWidth() % 16 == 0 && IntTy->getBitWidth() != 0)) &&
3737 "LLVM's __builtin_bswapg only supports integer variants that has a "
3738 "multiple of 16 bits as well as a single byte");
3739 return RValue::get(
3740 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bswap));
3741 }
3742 case Builtin::BI__builtin_bswap16:
3743 case Builtin::BI__builtin_bswap32:
3744 case Builtin::BI__builtin_bswap64:
3745 case Builtin::BI_byteswap_ushort:
3746 case Builtin::BI_byteswap_ulong:
3747 case Builtin::BI_byteswap_uint64: {
3748 return RValue::get(
3749 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bswap));
3750 }
3751 case Builtin::BI__builtin_bitreverseg: {
3752 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3753 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(ArgValue->getType());
3754 assert(IntTy &&
3755 "LLVM's __builtin_bitreverseg only support integer variants");
3756 if (IntTy->getBitWidth() == 1)
3757 return RValue::get(ArgValue);
3758 return RValue::get(
3759 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bitreverse));
3760 }
3761 case Builtin::BI__builtin_bitreverse8:
3762 case Builtin::BI__builtin_bitreverse16:
3763 case Builtin::BI__builtin_bitreverse32:
3764 case Builtin::BI__builtin_bitreverse64: {
3765 return RValue::get(
3766 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bitreverse));
3767 }
3768 case Builtin::BI__builtin_rotateleft8:
3769 case Builtin::BI__builtin_rotateleft16:
3770 case Builtin::BI__builtin_rotateleft32:
3771 case Builtin::BI__builtin_rotateleft64:
3772 case Builtin::BI__builtin_stdc_rotate_left:
3773 case Builtin::BIstdc_rotate_left_uc:
3774 case Builtin::BIstdc_rotate_left_us:
3775 case Builtin::BIstdc_rotate_left_ui:
3776 case Builtin::BIstdc_rotate_left_ul:
3777 case Builtin::BIstdc_rotate_left_ull:
3778 case Builtin::BI_rotl8: // Microsoft variants of rotate left
3779 case Builtin::BI_rotl16:
3780 case Builtin::BI_rotl:
3781 case Builtin::BI_lrotl:
3782 case Builtin::BI_rotl64:
3783 return emitRotate(E, false);
3784
3785 case Builtin::BI__builtin_rotateright8:
3786 case Builtin::BI__builtin_rotateright16:
3787 case Builtin::BI__builtin_rotateright32:
3788 case Builtin::BI__builtin_rotateright64:
3789 case Builtin::BI__builtin_stdc_rotate_right:
3790 case Builtin::BIstdc_rotate_right_uc:
3791 case Builtin::BIstdc_rotate_right_us:
3792 case Builtin::BIstdc_rotate_right_ui:
3793 case Builtin::BIstdc_rotate_right_ul:
3794 case Builtin::BIstdc_rotate_right_ull:
3795 case Builtin::BI_rotr8: // Microsoft variants of rotate right
3796 case Builtin::BI_rotr16:
3797 case Builtin::BI_rotr:
3798 case Builtin::BI_lrotr:
3799 case Builtin::BI_rotr64:
3800 return emitRotate(E, true);
3801
3802 case Builtin::BIstdc_leading_zeros_uc:
3803 case Builtin::BIstdc_leading_zeros_us:
3804 case Builtin::BIstdc_leading_zeros_ui:
3805 case Builtin::BIstdc_leading_zeros_ul:
3806 case Builtin::BIstdc_leading_zeros_ull:
3807 case Builtin::BI__builtin_stdc_leading_zeros:
3808 return emitStdcCountIntrinsic(E, Intrinsic::ctlz, /*InvertArg=*/false);
3809 case Builtin::BIstdc_leading_ones_uc:
3810 case Builtin::BIstdc_leading_ones_us:
3811 case Builtin::BIstdc_leading_ones_ui:
3812 case Builtin::BIstdc_leading_ones_ul:
3813 case Builtin::BIstdc_leading_ones_ull:
3814 case Builtin::BI__builtin_stdc_leading_ones:
3815 return emitStdcCountIntrinsic(E, Intrinsic::ctlz, /*InvertArg=*/true);
3816 case Builtin::BIstdc_trailing_zeros_uc:
3817 case Builtin::BIstdc_trailing_zeros_us:
3818 case Builtin::BIstdc_trailing_zeros_ui:
3819 case Builtin::BIstdc_trailing_zeros_ul:
3820 case Builtin::BIstdc_trailing_zeros_ull:
3821 case Builtin::BI__builtin_stdc_trailing_zeros:
3822 return emitStdcCountIntrinsic(E, Intrinsic::cttz, /*InvertArg=*/false);
3823 case Builtin::BIstdc_trailing_ones_uc:
3824 case Builtin::BIstdc_trailing_ones_us:
3825 case Builtin::BIstdc_trailing_ones_ui:
3826 case Builtin::BIstdc_trailing_ones_ul:
3827 case Builtin::BIstdc_trailing_ones_ull:
3828 case Builtin::BI__builtin_stdc_trailing_ones:
3829 return emitStdcCountIntrinsic(E, Intrinsic::cttz, /*InvertArg=*/true);
3830 case Builtin::BIstdc_first_leading_zero_uc:
3831 case Builtin::BIstdc_first_leading_zero_us:
3832 case Builtin::BIstdc_first_leading_zero_ui:
3833 case Builtin::BIstdc_first_leading_zero_ul:
3834 case Builtin::BIstdc_first_leading_zero_ull:
3835 case Builtin::BI__builtin_stdc_first_leading_zero:
3836 return emitStdcFirstBit(E, Intrinsic::ctlz, /*InvertArg=*/true);
3837 case Builtin::BIstdc_first_leading_one_uc:
3838 case Builtin::BIstdc_first_leading_one_us:
3839 case Builtin::BIstdc_first_leading_one_ui:
3840 case Builtin::BIstdc_first_leading_one_ul:
3841 case Builtin::BIstdc_first_leading_one_ull:
3842 case Builtin::BI__builtin_stdc_first_leading_one:
3843 return emitStdcFirstBit(E, Intrinsic::ctlz, /*InvertArg=*/false);
3844 case Builtin::BIstdc_first_trailing_zero_uc:
3845 case Builtin::BIstdc_first_trailing_zero_us:
3846 case Builtin::BIstdc_first_trailing_zero_ui:
3847 case Builtin::BIstdc_first_trailing_zero_ul:
3848 case Builtin::BIstdc_first_trailing_zero_ull:
3849 case Builtin::BI__builtin_stdc_first_trailing_zero:
3850 return emitStdcFirstBit(E, Intrinsic::cttz, /*InvertArg=*/true);
3851 case Builtin::BIstdc_first_trailing_one_uc:
3852 case Builtin::BIstdc_first_trailing_one_us:
3853 case Builtin::BIstdc_first_trailing_one_ui:
3854 case Builtin::BIstdc_first_trailing_one_ul:
3855 case Builtin::BIstdc_first_trailing_one_ull:
3856 case Builtin::BI__builtin_stdc_first_trailing_one:
3857 return emitStdcFirstBit(E, Intrinsic::cttz, /*InvertArg=*/false);
3858 case Builtin::BIstdc_count_zeros_uc:
3859 case Builtin::BIstdc_count_zeros_us:
3860 case Builtin::BIstdc_count_zeros_ui:
3861 case Builtin::BIstdc_count_zeros_ul:
3862 case Builtin::BIstdc_count_zeros_ull:
3863 case Builtin::BI__builtin_stdc_count_zeros:
3864 return emitStdcBitWidthMinus(E, Intrinsic::ctpop, /*IsPop=*/true);
3865 case Builtin::BIstdc_count_ones_uc:
3866 case Builtin::BIstdc_count_ones_us:
3867 case Builtin::BIstdc_count_ones_ui:
3868 case Builtin::BIstdc_count_ones_ul:
3869 case Builtin::BIstdc_count_ones_ull:
3870 case Builtin::BI__builtin_stdc_count_ones:
3871 return emitStdcCountIntrinsic(E, Intrinsic::ctpop, /*InvertArg=*/false,
3872 /*IsPop=*/true);
3873 case Builtin::BIstdc_has_single_bit_uc:
3874 case Builtin::BIstdc_has_single_bit_us:
3875 case Builtin::BIstdc_has_single_bit_ui:
3876 case Builtin::BIstdc_has_single_bit_ul:
3877 case Builtin::BIstdc_has_single_bit_ull:
3878 case Builtin::BI__builtin_stdc_has_single_bit: {
3879 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3880 llvm::Type *ArgType = ArgValue->getType();
3881 Value *One = ConstantInt::get(ArgType, 1);
3882 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3883 Value *PopCnt = Builder.CreateCall(F, ArgValue);
3884 return RValue::get(Builder.CreateICmpEQ(PopCnt, One));
3885 }
3886 case Builtin::BIstdc_bit_width_uc:
3887 case Builtin::BIstdc_bit_width_us:
3888 case Builtin::BIstdc_bit_width_ui:
3889 case Builtin::BIstdc_bit_width_ul:
3890 case Builtin::BIstdc_bit_width_ull:
3891 case Builtin::BI__builtin_stdc_bit_width:
3892 return emitStdcBitWidthMinus(E, Intrinsic::ctlz, /*IsPop=*/false);
3893 case Builtin::BIstdc_bit_floor_uc:
3894 case Builtin::BIstdc_bit_floor_us:
3895 case Builtin::BIstdc_bit_floor_ui:
3896 case Builtin::BIstdc_bit_floor_ul:
3897 case Builtin::BIstdc_bit_floor_ull:
3898 case Builtin::BI__builtin_stdc_bit_floor: {
3899 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3900 llvm::Type *ArgType = ArgValue->getType();
3901 unsigned BitWidth = ArgType->getIntegerBitWidth();
3902 Value *Zero = ConstantInt::get(ArgType, 0);
3903 Value *One = ConstantInt::get(ArgType, 1);
3904 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3905 Value *LZ = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
3906 Value *ShiftAmt =
3907 Builder.CreateSub(ConstantInt::get(ArgType, BitWidth - 1), LZ);
3908 Value *Shifted = Builder.CreateShl(One, ShiftAmt);
3909 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero);
3910 Value *Result = Builder.CreateSelect(IsZero, Zero, Shifted);
3911 return RValue::get(Result);
3912 }
3913 case Builtin::BIstdc_bit_ceil_uc:
3914 case Builtin::BIstdc_bit_ceil_us:
3915 case Builtin::BIstdc_bit_ceil_ui:
3916 case Builtin::BIstdc_bit_ceil_ul:
3917 case Builtin::BIstdc_bit_ceil_ull:
3918 case Builtin::BI__builtin_stdc_bit_ceil: {
3919 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3920 llvm::Type *ArgType = ArgValue->getType();
3921 unsigned BitWidth = ArgType->getIntegerBitWidth();
3922 Value *One = ConstantInt::get(ArgType, 1);
3923 Value *Two = ConstantInt::get(ArgType, 2);
3924
3925 Value *IsLEOne = Builder.CreateICmpULE(ArgValue, One, "isleone");
3926
3927 BasicBlock *EntryBB = Builder.GetInsertBlock();
3928 BasicBlock *CalcBB = createBasicBlock("bitceil.calc", CurFn);
3929 BasicBlock *MergeBB = createBasicBlock("bitceil.merge", CurFn);
3930
3931 Builder.CreateCondBr(IsLEOne, MergeBB, CalcBB);
3932
3933 Builder.SetInsertPoint(CalcBB);
3934 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3935 Value *ArgMinusOne = Builder.CreateSub(ArgValue, One);
3936 Value *LZ = Builder.CreateCall(F, {ArgMinusOne, Builder.getFalse()});
3937 // 2<<(BitWidth-1-LZ) to get the next power of two. The shift
3938 // amount is always in [0, BitWidth-1], so when LZ==0 (argument has its MSB
3939 // set), the result wraps to 0
3940 Value *ShiftAmt =
3941 Builder.CreateSub(ConstantInt::get(ArgType, BitWidth - 1), LZ);
3942 Value *Tmp = Builder.CreateShl(Two, ShiftAmt);
3943 Builder.CreateBr(MergeBB);
3944
3945 Builder.SetInsertPoint(MergeBB);
3946 PHINode *Phi = Builder.CreatePHI(ArgType, 2);
3947 Phi->addIncoming(One, EntryBB);
3948 Phi->addIncoming(Tmp, CalcBB);
3949 return RValue::get(Phi);
3950 }
3951
3952 case Builtin::BI__builtin_constant_p: {
3953 llvm::Type *ResultType = ConvertType(E->getType());
3954
3955 const Expr *Arg = E->getArg(0);
3956 QualType ArgType = Arg->getType();
3957 // FIXME: The allowance for Obj-C pointers and block pointers is historical
3958 // and likely a mistake.
3959 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
3960 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
3961 // Per the GCC documentation, only numeric constants are recognized after
3962 // inlining.
3963 return RValue::get(ConstantInt::get(ResultType, 0));
3964
3965 if (Arg->HasSideEffects(getContext()))
3966 // The argument is unevaluated, so be conservative if it might have
3967 // side-effects.
3968 return RValue::get(ConstantInt::get(ResultType, 0));
3969
3970 Value *ArgValue = EmitScalarExpr(Arg);
3971 if (ArgType->isObjCObjectPointerType()) {
3972 // Convert Objective-C objects to id because we cannot distinguish between
3973 // LLVM types for Obj-C classes as they are opaque.
3974 ArgType = CGM.getContext().getObjCIdType();
3975 ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
3976 }
3977 Function *F =
3978 CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
3979 Value *Result = Builder.CreateCall(F, ArgValue);
3980 if (Result->getType() != ResultType)
3981 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
3982 return RValue::get(Result);
3983 }
3984 case Builtin::BI__builtin_dynamic_object_size:
3985 case Builtin::BI__builtin_object_size: {
3986 unsigned Type =
3987 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
3988 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
3989
3990 // We pass this builtin onto the optimizer so that it can figure out the
3991 // object size in more complex cases.
3992 bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
3993 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
3994 /*EmittedE=*/nullptr, IsDynamic));
3995 }
3996 case Builtin::BI__builtin_counted_by_ref: {
3997 // Default to returning '(void *) 0'.
3998 llvm::Value *Result = llvm::ConstantPointerNull::get(
3999 llvm::PointerType::getUnqual(getLLVMContext()));
4000
4001 const Expr *Arg = E->getArg(0)->IgnoreParenImpCasts();
4002
4003 if (auto *UO = dyn_cast<UnaryOperator>(Arg);
4004 UO && UO->getOpcode() == UO_AddrOf) {
4005 Arg = UO->getSubExpr()->IgnoreParenImpCasts();
4006
4007 if (auto *ASE = dyn_cast<ArraySubscriptExpr>(Arg))
4008 Arg = ASE->getBase()->IgnoreParenImpCasts();
4009 }
4010
4011 if (const MemberExpr *ME = dyn_cast_if_present<MemberExpr>(Arg)) {
4012 if (auto *CATy =
4014 CATy && CATy->getKind() == CountAttributedType::CountedBy) {
4015 const auto *MemberDecl = cast<FieldDecl>(ME->getMemberDecl());
4016 if (const FieldDecl *CountFD = MemberDecl->findCountedByField())
4017 Result = GetCountedByFieldExprGEP(Arg, MemberDecl, CountFD);
4018 else
4019 llvm::report_fatal_error("Cannot find the counted_by 'count' field");
4020 }
4021 }
4022
4023 return RValue::get(Result);
4024 }
4025 case Builtin::BI__builtin_prefetch: {
4026 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
4027 // FIXME: Technically these constants should of type 'int', yes?
4028 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
4029 llvm::ConstantInt::get(Int32Ty, 0);
4030 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
4031 llvm::ConstantInt::get(Int32Ty, 3);
4032 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
4033 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
4034 Builder.CreateCall(F, {Address, RW, Locality, Data});
4035 return RValue::get(nullptr);
4036 }
4037 case Builtin::BI__builtin_readcyclecounter: {
4038 Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
4039 return RValue::get(Builder.CreateCall(F));
4040 }
4041 case Builtin::BI__builtin_readsteadycounter: {
4042 Function *F = CGM.getIntrinsic(Intrinsic::readsteadycounter);
4043 return RValue::get(Builder.CreateCall(F));
4044 }
4045 case Builtin::BI__builtin___clear_cache: {
4046 Value *Begin = EmitScalarExpr(E->getArg(0));
4047 Value *End = EmitScalarExpr(E->getArg(1));
4048 Function *F = CGM.getIntrinsic(Intrinsic::clear_cache, {CGM.DefaultPtrTy});
4049 return RValue::get(Builder.CreateCall(F, {Begin, End}));
4050 }
4051 case Builtin::BI__builtin_trap:
4052 EmitTrapCall(Intrinsic::trap);
4053 return RValue::get(nullptr);
4054 case Builtin::BI__builtin_verbose_trap: {
4055 llvm::DILocation *TrapLocation = Builder.getCurrentDebugLocation();
4056 if (getDebugInfo()) {
4057 TrapLocation = getDebugInfo()->CreateTrapFailureMessageFor(
4058 TrapLocation, *E->getArg(0)->tryEvaluateString(getContext()),
4060 }
4061 ApplyDebugLocation ApplyTrapDI(*this, TrapLocation);
4062 // Currently no attempt is made to prevent traps from being merged.
4063 EmitTrapCall(Intrinsic::trap);
4064 return RValue::get(nullptr);
4065 }
4066 case Builtin::BI__debugbreak:
4067 EmitTrapCall(Intrinsic::debugtrap);
4068 return RValue::get(nullptr);
4069 case Builtin::BI__builtin_unreachable: {
4071
4072 // We do need to preserve an insertion point.
4073 EmitBlock(createBasicBlock("unreachable.cont"));
4074
4075 return RValue::get(nullptr);
4076 }
4077
4078 case Builtin::BI__builtin_powi:
4079 case Builtin::BI__builtin_powif:
4080 case Builtin::BI__builtin_powil: {
4081 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
4082 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
4083
4084 if (Builder.getIsFPConstrained()) {
4085 // FIXME: llvm.powi has 2 mangling types,
4086 // llvm.experimental.constrained.powi has one.
4087 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4088 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi,
4089 Src0->getType());
4090 return RValue::get(Builder.CreateConstrainedFPCall(F, { Src0, Src1 }));
4091 }
4092
4093 Function *F = CGM.getIntrinsic(Intrinsic::powi,
4094 { Src0->getType(), Src1->getType() });
4095 return RValue::get(Builder.CreateCall(F, { Src0, Src1 }));
4096 }
4097 case Builtin::BI__builtin_frexpl: {
4098 // Linux PPC will not be adding additional PPCDoubleDouble support.
4099 // WIP to switch default to IEEE long double. Will emit libcall for
4100 // frexpl instead of legalizing this type in the BE.
4101 if (&getTarget().getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble())
4102 break;
4103 [[fallthrough]];
4104 }
4105 case Builtin::BI__builtin_frexp:
4106 case Builtin::BI__builtin_frexpf:
4107 case Builtin::BI__builtin_frexpf128:
4108 case Builtin::BI__builtin_frexpf16:
4109 return RValue::get(emitFrexpBuiltin(*this, E, Intrinsic::frexp));
4110 case Builtin::BImodf:
4111 case Builtin::BImodff:
4112 case Builtin::BImodfl:
4113 case Builtin::BI__builtin_modf:
4114 case Builtin::BI__builtin_modff:
4115 case Builtin::BI__builtin_modfl:
4116 if (Builder.getIsFPConstrained())
4117 break; // TODO: Emit constrained modf intrinsic once one exists.
4118 return RValue::get(emitModfBuiltin(*this, E, Intrinsic::modf));
4119 case Builtin::BI__builtin_isgreater:
4120 case Builtin::BI__builtin_isgreaterequal:
4121 case Builtin::BI__builtin_isless:
4122 case Builtin::BI__builtin_islessequal:
4123 case Builtin::BI__builtin_islessgreater:
4124 case Builtin::BI__builtin_isunordered: {
4125 // Ordered comparisons: we know the arguments to these are matching scalar
4126 // floating point values.
4127 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4128 Value *LHS = EmitScalarExpr(E->getArg(0));
4129 Value *RHS = EmitScalarExpr(E->getArg(1));
4130
4131 switch (BuiltinID) {
4132 default: llvm_unreachable("Unknown ordered comparison");
4133 case Builtin::BI__builtin_isgreater:
4134 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
4135 break;
4136 case Builtin::BI__builtin_isgreaterequal:
4137 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
4138 break;
4139 case Builtin::BI__builtin_isless:
4140 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
4141 break;
4142 case Builtin::BI__builtin_islessequal:
4143 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
4144 break;
4145 case Builtin::BI__builtin_islessgreater:
4146 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
4147 break;
4148 case Builtin::BI__builtin_isunordered:
4149 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
4150 break;
4151 }
4152 // ZExt bool to int type.
4153 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
4154 }
4155
4156 case Builtin::BI__builtin_isnan: {
4157 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4158 Value *V = EmitScalarExpr(E->getArg(0));
4159 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
4160 return RValue::get(Result);
4161 return RValue::get(
4162 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNan),
4163 ConvertType(E->getType())));
4164 }
4165
4166 case Builtin::BI__builtin_issignaling: {
4167 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4168 Value *V = EmitScalarExpr(E->getArg(0));
4169 return RValue::get(
4170 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSNan),
4171 ConvertType(E->getType())));
4172 }
4173
4174 case Builtin::BI__builtin_isinf: {
4175 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4176 Value *V = EmitScalarExpr(E->getArg(0));
4177 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
4178 return RValue::get(Result);
4179 return RValue::get(
4180 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcInf),
4181 ConvertType(E->getType())));
4182 }
4183
4184 case Builtin::BIfinite:
4185 case Builtin::BI__finite:
4186 case Builtin::BIfinitef:
4187 case Builtin::BI__finitef:
4188 case Builtin::BIfinitel:
4189 case Builtin::BI__finitel:
4190 case Builtin::BI__builtin_isfinite: {
4191 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4192 Value *V = EmitScalarExpr(E->getArg(0));
4193 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
4194 return RValue::get(Result);
4195 return RValue::get(
4196 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcFinite),
4197 ConvertType(E->getType())));
4198 }
4199
4200 case Builtin::BI__builtin_isnormal: {
4201 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4202 Value *V = EmitScalarExpr(E->getArg(0));
4203 return RValue::get(
4204 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNormal),
4205 ConvertType(E->getType())));
4206 }
4207
4208 case Builtin::BI__builtin_issubnormal: {
4209 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4210 Value *V = EmitScalarExpr(E->getArg(0));
4211 return RValue::get(
4212 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSubnormal),
4213 ConvertType(E->getType())));
4214 }
4215
4216 case Builtin::BI__builtin_iszero: {
4217 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4218 Value *V = EmitScalarExpr(E->getArg(0));
4219 return RValue::get(
4220 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcZero),
4221 ConvertType(E->getType())));
4222 }
4223
4224 case Builtin::BI__builtin_isfpclass: {
4226 if (!E->getArg(1)->EvaluateAsInt(Result, CGM.getContext()))
4227 break;
4228 uint64_t Test = Result.Val.getInt().getLimitedValue();
4229 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4230 Value *V = EmitScalarExpr(E->getArg(0));
4231 return RValue::get(Builder.CreateZExt(Builder.createIsFPClass(V, Test),
4232 ConvertType(E->getType())));
4233 }
4234
4235 case Builtin::BI__builtin_nondeterministic_value: {
4236 llvm::Type *Ty = ConvertType(E->getArg(0)->getType());
4237
4238 Value *Result = PoisonValue::get(Ty);
4239 Result = Builder.CreateFreeze(Result);
4240
4241 return RValue::get(Result);
4242 }
4243
4244 case Builtin::BI__builtin_elementwise_abs: {
4245 Value *Result;
4246 QualType QT = E->getArg(0)->getType();
4247
4248 if (auto *VecTy = QT->getAs<VectorType>())
4249 QT = VecTy->getElementType();
4250 if (QT->isIntegerType())
4251 Result = Builder.CreateBinaryIntrinsic(
4252 Intrinsic::abs, EmitScalarExpr(E->getArg(0)), Builder.getFalse(),
4253 nullptr, "elt.abs");
4254 else
4255 Result = emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::fabs,
4256 "elt.abs");
4257
4258 return RValue::get(Result);
4259 }
4260 case Builtin::BI__builtin_elementwise_bitreverse:
4262 *this, E, Intrinsic::bitreverse, "elt.bitreverse"));
4263 case Builtin::BI__builtin_elementwise_popcount:
4265 *this, E, Intrinsic::ctpop, "elt.ctpop"));
4266 case Builtin::BI__builtin_elementwise_canonicalize:
4268 *this, E, Intrinsic::canonicalize, "elt.canonicalize"));
4269 case Builtin::BI__builtin_elementwise_copysign:
4270 return RValue::get(
4271 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::copysign));
4272 case Builtin::BI__builtin_elementwise_fshl:
4273 return RValue::get(
4274 emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fshl));
4275 case Builtin::BI__builtin_elementwise_fshr:
4276 return RValue::get(
4277 emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fshr));
4278
4279 case Builtin::BI__builtin_elementwise_add_sat:
4280 case Builtin::BI__builtin_elementwise_sub_sat: {
4281 Value *Op0 = EmitScalarExpr(E->getArg(0));
4282 Value *Op1 = EmitScalarExpr(E->getArg(1));
4283 Value *Result;
4284 assert(Op0->getType()->isIntOrIntVectorTy() && "integer type expected");
4285 QualType Ty = E->getArg(0)->getType();
4286 if (auto *VecTy = Ty->getAs<VectorType>())
4287 Ty = VecTy->getElementType();
4288 bool IsSigned = Ty->isSignedIntegerType();
4289 unsigned Opc;
4290 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_add_sat)
4291 Opc = IsSigned ? Intrinsic::sadd_sat : Intrinsic::uadd_sat;
4292 else
4293 Opc = IsSigned ? Intrinsic::ssub_sat : Intrinsic::usub_sat;
4294 Result = Builder.CreateBinaryIntrinsic(Opc, Op0, Op1, nullptr, "elt.sat");
4295 return RValue::get(Result);
4296 }
4297
4298 case Builtin::BI__builtin_elementwise_max: {
4299 Value *Op0 = EmitScalarExpr(E->getArg(0));
4300 Value *Op1 = EmitScalarExpr(E->getArg(1));
4301 Value *Result;
4302 if (Op0->getType()->isIntOrIntVectorTy()) {
4303 QualType Ty = E->getArg(0)->getType();
4304 if (auto *VecTy = Ty->getAs<VectorType>())
4305 Ty = VecTy->getElementType();
4306 Result = Builder.CreateBinaryIntrinsic(
4307 Ty->isSignedIntegerType() ? Intrinsic::smax : Intrinsic::umax, Op0,
4308 Op1, nullptr, "elt.max");
4309 } else
4310 Result = Builder.CreateMaxNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.max");
4311 return RValue::get(Result);
4312 }
4313 case Builtin::BI__builtin_elementwise_min: {
4314 Value *Op0 = EmitScalarExpr(E->getArg(0));
4315 Value *Op1 = EmitScalarExpr(E->getArg(1));
4316 Value *Result;
4317 if (Op0->getType()->isIntOrIntVectorTy()) {
4318 QualType Ty = E->getArg(0)->getType();
4319 if (auto *VecTy = Ty->getAs<VectorType>())
4320 Ty = VecTy->getElementType();
4321 Result = Builder.CreateBinaryIntrinsic(
4322 Ty->isSignedIntegerType() ? Intrinsic::smin : Intrinsic::umin, Op0,
4323 Op1, nullptr, "elt.min");
4324 } else
4325 Result = Builder.CreateMinNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.min");
4326 return RValue::get(Result);
4327 }
4328
4329 case Builtin::BI__builtin_elementwise_maxnum: {
4330 Value *Op0 = EmitScalarExpr(E->getArg(0));
4331 Value *Op1 = EmitScalarExpr(E->getArg(1));
4332 Value *Result = Builder.CreateBinaryIntrinsic(llvm::Intrinsic::maxnum, Op0,
4333 Op1, nullptr, "elt.maxnum");
4334 return RValue::get(Result);
4335 }
4336
4337 case Builtin::BI__builtin_elementwise_minnum: {
4338 Value *Op0 = EmitScalarExpr(E->getArg(0));
4339 Value *Op1 = EmitScalarExpr(E->getArg(1));
4340 Value *Result = Builder.CreateBinaryIntrinsic(llvm::Intrinsic::minnum, Op0,
4341 Op1, nullptr, "elt.minnum");
4342 return RValue::get(Result);
4343 }
4344
4345 case Builtin::BI__builtin_elementwise_maximum: {
4346 Value *Op0 = EmitScalarExpr(E->getArg(0));
4347 Value *Op1 = EmitScalarExpr(E->getArg(1));
4348 Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::maximum, Op0, Op1,
4349 nullptr, "elt.maximum");
4350 return RValue::get(Result);
4351 }
4352
4353 case Builtin::BI__builtin_elementwise_minimum: {
4354 Value *Op0 = EmitScalarExpr(E->getArg(0));
4355 Value *Op1 = EmitScalarExpr(E->getArg(1));
4356 Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::minimum, Op0, Op1,
4357 nullptr, "elt.minimum");
4358 return RValue::get(Result);
4359 }
4360
4361 case Builtin::BI__builtin_elementwise_maximumnum: {
4362 Value *Op0 = EmitScalarExpr(E->getArg(0));
4363 Value *Op1 = EmitScalarExpr(E->getArg(1));
4364 Value *Result = Builder.CreateBinaryIntrinsic(
4365 Intrinsic::maximumnum, Op0, Op1, nullptr, "elt.maximumnum");
4366 return RValue::get(Result);
4367 }
4368
4369 case Builtin::BI__builtin_elementwise_minimumnum: {
4370 Value *Op0 = EmitScalarExpr(E->getArg(0));
4371 Value *Op1 = EmitScalarExpr(E->getArg(1));
4372 Value *Result = Builder.CreateBinaryIntrinsic(
4373 Intrinsic::minimumnum, Op0, Op1, nullptr, "elt.minimumnum");
4374 return RValue::get(Result);
4375 }
4376
4377 case Builtin::BI__builtin_reduce_max: {
4378 auto GetIntrinsicID = [this](QualType QT) {
4379 if (auto *VecTy = QT->getAs<VectorType>())
4380 QT = VecTy->getElementType();
4381 else if (QT->isSizelessVectorType())
4382 QT = QT->getSizelessVectorEltType(CGM.getContext());
4383
4384 if (QT->isSignedIntegerType())
4385 return Intrinsic::vector_reduce_smax;
4386 if (QT->isUnsignedIntegerType())
4387 return Intrinsic::vector_reduce_umax;
4388 assert(QT->isFloatingType() && "must have a float here");
4389 return Intrinsic::vector_reduce_fmax;
4390 };
4392 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
4393 }
4394
4395 case Builtin::BI__builtin_reduce_min: {
4396 auto GetIntrinsicID = [this](QualType QT) {
4397 if (auto *VecTy = QT->getAs<VectorType>())
4398 QT = VecTy->getElementType();
4399 else if (QT->isSizelessVectorType())
4400 QT = QT->getSizelessVectorEltType(CGM.getContext());
4401
4402 if (QT->isSignedIntegerType())
4403 return Intrinsic::vector_reduce_smin;
4404 if (QT->isUnsignedIntegerType())
4405 return Intrinsic::vector_reduce_umin;
4406 assert(QT->isFloatingType() && "must have a float here");
4407 return Intrinsic::vector_reduce_fmin;
4408 };
4409
4411 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
4412 }
4413
4414 case Builtin::BI__builtin_reduce_add:
4416 *this, E, Intrinsic::vector_reduce_add, "rdx.add"));
4417 case Builtin::BI__builtin_reduce_mul:
4419 *this, E, Intrinsic::vector_reduce_mul, "rdx.mul"));
4420 case Builtin::BI__builtin_reduce_xor:
4422 *this, E, Intrinsic::vector_reduce_xor, "rdx.xor"));
4423 case Builtin::BI__builtin_reduce_or:
4425 *this, E, Intrinsic::vector_reduce_or, "rdx.or"));
4426 case Builtin::BI__builtin_reduce_and:
4428 *this, E, Intrinsic::vector_reduce_and, "rdx.and"));
4429 case Builtin::BI__builtin_reduce_maximum:
4431 *this, E, Intrinsic::vector_reduce_fmaximum, "rdx.maximum"));
4432 case Builtin::BI__builtin_reduce_minimum:
4434 *this, E, Intrinsic::vector_reduce_fminimum, "rdx.minimum"));
4435 case Builtin::BI__builtin_reduce_assoc_fadd:
4436 case Builtin::BI__builtin_reduce_in_order_fadd: {
4437 llvm::Value *Vector = EmitScalarExpr(E->getArg(0));
4438 llvm::Type *ScalarTy = Vector->getType()->getScalarType();
4439 llvm::Value *StartValue = nullptr;
4440 if (E->getNumArgs() == 2)
4441 StartValue = Builder.CreateFPCast(EmitScalarExpr(E->getArg(1)), ScalarTy);
4442 llvm::Value *Args[] = {/*start_value=*/StartValue
4443 ? StartValue
4444 : llvm::ConstantFP::get(ScalarTy, -0.0F),
4445 /*vector=*/Vector};
4446 llvm::Function *F =
4447 CGM.getIntrinsic(Intrinsic::vector_reduce_fadd, Vector->getType());
4448 llvm::CallBase *Reduce = Builder.CreateCall(F, Args, "rdx.addf");
4449 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_reduce_assoc_fadd) {
4450 // `__builtin_reduce_assoc_fadd` is an associative reduction which
4451 // requires the reassoc FMF flag.
4452 llvm::FastMathFlags FMF;
4453 FMF.setAllowReassoc();
4454 cast<llvm::CallBase>(Reduce)->setFastMathFlags(FMF);
4455 }
4456 return RValue::get(Reduce);
4457 }
4458
4459 case Builtin::BI__builtin_matrix_transpose: {
4460 auto *MatrixTy = E->getArg(0)->getType()->castAs<ConstantMatrixType>();
4461 Value *MatValue = EmitScalarExpr(E->getArg(0));
4462 MatrixBuilder MB(Builder);
4463 Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
4464 MatrixTy->getNumColumns());
4465 return RValue::get(Result);
4466 }
4467
4468 case Builtin::BI__builtin_matrix_column_major_load: {
4469 MatrixBuilder MB(Builder);
4470 // Emit everything that isn't dependent on the first parameter type
4471 Value *Stride = EmitScalarExpr(E->getArg(3));
4472 const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
4473 auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
4474 assert(PtrTy && "arg0 must be of pointer type");
4475 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
4476
4479 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4480 0);
4481 Value *Result = MB.CreateColumnMajorLoad(
4482 Src.getElementType(), Src.emitRawPointer(*this),
4483 Align(Src.getAlignment().getQuantity()), Stride, IsVolatile,
4484 ResultTy->getNumRows(), ResultTy->getNumColumns(), "matrix");
4485 return RValue::get(Result);
4486 }
4487
4488 case Builtin::BI__builtin_matrix_column_major_store: {
4489 MatrixBuilder MB(Builder);
4490 Value *Matrix = EmitScalarExpr(E->getArg(0));
4492 Value *Stride = EmitScalarExpr(E->getArg(2));
4493
4494 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
4495 auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
4496 assert(PtrTy && "arg1 must be of pointer type");
4497 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
4498
4500 E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
4501 0);
4502 Value *Result = MB.CreateColumnMajorStore(
4503 Matrix, Dst.emitRawPointer(*this),
4504 Align(Dst.getAlignment().getQuantity()), Stride, IsVolatile,
4505 MatrixTy->getNumRows(), MatrixTy->getNumColumns());
4507 return RValue::get(Result);
4508 }
4509
4510 case Builtin::BI__builtin_masked_load:
4511 case Builtin::BI__builtin_masked_expand_load: {
4512 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4513 llvm::Value *Ptr = EmitScalarExpr(E->getArg(1));
4514
4515 llvm::Type *RetTy = CGM.getTypes().ConvertType(E->getType());
4516 llvm::Value *PassThru = llvm::PoisonValue::get(RetTy);
4517 if (E->getNumArgs() > 2)
4518 PassThru = EmitScalarExpr(E->getArg(2));
4519
4520 CharUnits Align = CGM.getNaturalTypeAlignment(
4521 E->getType()->getAs<VectorType>()->getElementType(), nullptr);
4522
4523 llvm::Value *Result;
4524 if (BuiltinID == Builtin::BI__builtin_masked_load) {
4525 Result = Builder.CreateMaskedLoad(RetTy, Ptr, Align.getAsAlign(), Mask,
4526 PassThru, "masked_load");
4527 } else {
4528 Function *F = CGM.getIntrinsic(Intrinsic::masked_expandload, {RetTy});
4529 Result =
4530 Builder.CreateCall(F, {Ptr, Mask, PassThru}, "masked_expand_load");
4531 }
4532 return RValue::get(Result);
4533 };
4534 case Builtin::BI__builtin_masked_gather: {
4535 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4536 llvm::Value *Idx = EmitScalarExpr(E->getArg(1));
4537 llvm::Value *Ptr = EmitScalarExpr(E->getArg(2));
4538
4539 llvm::Type *RetTy = CGM.getTypes().ConvertType(E->getType());
4540 CharUnits Align = CGM.getNaturalTypeAlignment(
4541 E->getType()->getAs<VectorType>()->getElementType(), nullptr);
4542
4543 llvm::Value *PassThru = llvm::PoisonValue::get(RetTy);
4544 if (E->getNumArgs() > 3)
4545 PassThru = EmitScalarExpr(E->getArg(3));
4546
4547 llvm::Type *ElemTy = CGM.getTypes().ConvertType(
4549 llvm::Value *PtrVec = Builder.CreateGEP(ElemTy, Ptr, Idx);
4550
4551 llvm::Value *Result = Builder.CreateMaskedGather(
4552 RetTy, PtrVec, Align.getAsAlign(), Mask, PassThru, "masked_gather");
4553 return RValue::get(Result);
4554 }
4555 case Builtin::BI__builtin_masked_store:
4556 case Builtin::BI__builtin_masked_compress_store: {
4557 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4558 llvm::Value *Val = EmitScalarExpr(E->getArg(1));
4559 llvm::Value *Ptr = EmitScalarExpr(E->getArg(2));
4560
4561 QualType ValTy = E->getArg(1)->getType();
4562 llvm::Type *ValLLTy = CGM.getTypes().ConvertType(ValTy);
4563
4564 CharUnits Align = CGM.getNaturalTypeAlignment(
4566 nullptr);
4567
4568 if (BuiltinID == Builtin::BI__builtin_masked_store) {
4569 Builder.CreateMaskedStore(Val, Ptr, Align.getAsAlign(), Mask);
4570 } else {
4571 llvm::Function *F =
4572 CGM.getIntrinsic(llvm::Intrinsic::masked_compressstore, {ValLLTy});
4573 Builder.CreateCall(F, {Val, Ptr, Mask});
4574 }
4575 return RValue::get(nullptr);
4576 }
4577 case Builtin::BI__builtin_masked_scatter: {
4578 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4579 llvm::Value *Idx = EmitScalarExpr(E->getArg(1));
4580 llvm::Value *Val = EmitScalarExpr(E->getArg(2));
4581 llvm::Value *Ptr = EmitScalarExpr(E->getArg(3));
4582
4583 CharUnits Align = CGM.getNaturalTypeAlignment(
4585 nullptr);
4586
4587 llvm::Type *ElemTy = CGM.getTypes().ConvertType(
4588 E->getArg(1)->getType()->getAs<VectorType>()->getElementType());
4589 llvm::Value *PtrVec = Builder.CreateGEP(ElemTy, Ptr, Idx);
4590
4591 Builder.CreateMaskedScatter(Val, PtrVec, Align.getAsAlign(), Mask);
4592 return RValue();
4593 }
4594 case Builtin::BI__builtin_isinf_sign: {
4595 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
4596 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4597 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
4598 Value *Arg = EmitScalarExpr(E->getArg(0));
4599 Value *AbsArg = EmitFAbs(*this, Arg);
4600 Value *IsInf = Builder.CreateFCmpOEQ(
4601 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
4602 Value *IsNeg = EmitSignBit(*this, Arg);
4603
4604 llvm::Type *IntTy = ConvertType(E->getType());
4605 Value *Zero = Constant::getNullValue(IntTy);
4606 Value *One = ConstantInt::get(IntTy, 1);
4607 Value *NegativeOne = ConstantInt::getAllOnesValue(IntTy);
4608 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
4609 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
4610 return RValue::get(Result);
4611 }
4612
4613 case Builtin::BI__builtin_flt_rounds: {
4614 Function *F = CGM.getIntrinsic(Intrinsic::get_rounding);
4615
4616 llvm::Type *ResultType = ConvertType(E->getType());
4617 Value *Result = Builder.CreateCall(F);
4618 if (Result->getType() != ResultType)
4619 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
4620 "cast");
4621 return RValue::get(Result);
4622 }
4623
4624 case Builtin::BI__builtin_set_flt_rounds: {
4625 Function *F = CGM.getIntrinsic(Intrinsic::set_rounding);
4626
4627 Value *V = EmitScalarExpr(E->getArg(0));
4628 Builder.CreateCall(F, V);
4629 return RValue::get(nullptr);
4630 }
4631
4632 case Builtin::BI__builtin_fpclassify: {
4633 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4634 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
4635 Value *V = EmitScalarExpr(E->getArg(5));
4636 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
4637
4638 // Create Result
4639 BasicBlock *Begin = Builder.GetInsertBlock();
4640 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
4641 Builder.SetInsertPoint(End);
4642 PHINode *Result =
4643 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
4644 "fpclassify_result");
4645
4646 // if (V==0) return FP_ZERO
4647 Builder.SetInsertPoint(Begin);
4648 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
4649 "iszero");
4650 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
4651 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
4652 Builder.CreateCondBr(IsZero, End, NotZero);
4653 Result->addIncoming(ZeroLiteral, Begin);
4654
4655 // if (V != V) return FP_NAN
4656 Builder.SetInsertPoint(NotZero);
4657 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
4658 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
4659 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
4660 Builder.CreateCondBr(IsNan, End, NotNan);
4661 Result->addIncoming(NanLiteral, NotZero);
4662
4663 // if (fabs(V) == infinity) return FP_INFINITY
4664 Builder.SetInsertPoint(NotNan);
4665 Value *VAbs = EmitFAbs(*this, V);
4666 Value *IsInf =
4667 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
4668 "isinf");
4669 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
4670 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
4671 Builder.CreateCondBr(IsInf, End, NotInf);
4672 Result->addIncoming(InfLiteral, NotNan);
4673
4674 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
4675 Builder.SetInsertPoint(NotInf);
4676 APFloat Smallest = APFloat::getSmallestNormalized(
4677 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
4678 Value *IsNormal =
4679 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
4680 "isnormal");
4681 Value *NormalResult =
4682 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
4683 EmitScalarExpr(E->getArg(3)));
4684 Builder.CreateBr(End);
4685 Result->addIncoming(NormalResult, NotInf);
4686
4687 // return Result
4688 Builder.SetInsertPoint(End);
4689 return RValue::get(Result);
4690 }
4691
4692 // An alloca will always return a pointer to the alloca (stack) address
4693 // space. This address space need not be the same as the AST / Language
4694 // default (e.g. in C / C++ auto vars are in the generic address space). At
4695 // the AST level this is handled within CreateTempAlloca et al., but for the
4696 // builtin / dynamic alloca we have to handle it here. We use an explicit cast
4697 // instead of passing an AS to CreateAlloca so as to not inhibit optimisation.
4698 case Builtin::BIalloca:
4699 case Builtin::BI_alloca:
4700 case Builtin::BI__builtin_alloca_uninitialized:
4701 case Builtin::BI__builtin_alloca: {
4702 Value *Size = EmitScalarExpr(E->getArg(0));
4703 const TargetInfo &TI = getContext().getTargetInfo();
4704 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
4705 const Align SuitableAlignmentInBytes =
4706 CGM.getContext()
4707 .toCharUnitsFromBits(TI.getSuitableAlign())
4708 .getAsAlign();
4709 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
4710 AI->setAlignment(SuitableAlignmentInBytes);
4711 if (BuiltinID != Builtin::BI__builtin_alloca_uninitialized)
4712 initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
4713 if (AI->getAddressSpace() !=
4714 CGM.getContext().getTargetAddressSpace(
4716 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
4717 return RValue::get(performAddrSpaceCast(AI, Ty));
4718 }
4719 return RValue::get(AI);
4720 }
4721
4722 case Builtin::BI__builtin_alloca_with_align_uninitialized:
4723 case Builtin::BI__builtin_alloca_with_align: {
4724 Value *Size = EmitScalarExpr(E->getArg(0));
4725 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
4726 auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
4727 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
4728 const Align AlignmentInBytes =
4729 CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
4730 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
4731 AI->setAlignment(AlignmentInBytes);
4732 if (BuiltinID != Builtin::BI__builtin_alloca_with_align_uninitialized)
4733 initializeAlloca(*this, AI, Size, AlignmentInBytes);
4734 if (AI->getAddressSpace() !=
4735 CGM.getContext().getTargetAddressSpace(
4737 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
4738 return RValue::get(performAddrSpaceCast(AI, Ty));
4739 }
4740 return RValue::get(AI);
4741 }
4742
4743 case Builtin::BI__builtin_infer_alloc_token: {
4744 llvm::MDNode *MDN = buildAllocToken(E);
4745 llvm::Value *MDV = MetadataAsValue::get(getLLVMContext(), MDN);
4746 llvm::Function *F =
4747 CGM.getIntrinsic(llvm::Intrinsic::alloc_token_id, {IntPtrTy});
4748 llvm::CallBase *TokenID = Builder.CreateCall(F, MDV);
4749 return RValue::get(TokenID);
4750 }
4751
4752 case Builtin::BIbzero:
4753 case Builtin::BI__builtin_bzero: {
4755 Value *SizeVal = EmitScalarExpr(E->getArg(1));
4756 EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
4757 E->getArg(0)->getExprLoc(), FD, 0);
4758 auto *I = Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
4759 addInstToNewSourceAtom(I, nullptr);
4760 return RValue::get(nullptr);
4761 }
4762
4763 case Builtin::BIbcopy:
4764 case Builtin::BI__builtin_bcopy: {
4767 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4769 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4770 0);
4772 E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
4773 0);
4774 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4775 addInstToNewSourceAtom(I, nullptr);
4776 return RValue::get(nullptr);
4777 }
4778
4779 case Builtin::BImemcpy:
4780 case Builtin::BI__builtin_memcpy:
4781 case Builtin::BImempcpy:
4782 case Builtin::BI__builtin_mempcpy: {
4785 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4786 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4787 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4788 auto *I = Builder.CreateMemCpy(Dest, Src, SizeVal, false);
4789 addInstToNewSourceAtom(I, nullptr);
4790 if (BuiltinID == Builtin::BImempcpy ||
4791 BuiltinID == Builtin::BI__builtin_mempcpy)
4792 return RValue::get(Builder.CreateInBoundsGEP(
4793 Dest.getElementType(), Dest.emitRawPointer(*this), SizeVal));
4794 else
4795 return RValue::get(Dest, *this);
4796 }
4797
4798 case Builtin::BI__builtin_memcpy_inline: {
4801 uint64_t Size =
4802 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
4803 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4804 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4805 auto *I = Builder.CreateMemCpyInline(Dest, Src, Size);
4806 addInstToNewSourceAtom(I, nullptr);
4807 return RValue::get(nullptr);
4808 }
4809
4810 case Builtin::BI__builtin_char_memchr:
4811 BuiltinID = Builtin::BI__builtin_memchr;
4812 break;
4813
4814 case Builtin::BI__builtin___memcpy_chk: {
4815 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
4816 Expr::EvalResult SizeResult, DstSizeResult;
4817 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4818 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4819 break;
4820 llvm::APSInt Size = SizeResult.Val.getInt();
4821 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4822 if (Size.ugt(DstSize))
4823 break;
4826 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4827 auto *I = Builder.CreateMemCpy(Dest, Src, SizeVal, false);
4828 addInstToNewSourceAtom(I, nullptr);
4829 return RValue::get(Dest, *this);
4830 }
4831
4832 case Builtin::BI__builtin_objc_memmove_collectable: {
4833 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
4834 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
4835 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4836 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
4837 DestAddr, SrcAddr, SizeVal);
4838 return RValue::get(DestAddr, *this);
4839 }
4840
4841 case Builtin::BI__builtin___memmove_chk: {
4842 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
4843 Expr::EvalResult SizeResult, DstSizeResult;
4844 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4845 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4846 break;
4847 llvm::APSInt Size = SizeResult.Val.getInt();
4848 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4849 if (Size.ugt(DstSize))
4850 break;
4853 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4854 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4855 addInstToNewSourceAtom(I, nullptr);
4856 return RValue::get(Dest, *this);
4857 }
4858
4859 case Builtin::BI__builtin_trivially_relocate:
4860 case Builtin::BImemmove:
4861 case Builtin::BI__builtin_memmove: {
4864 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4865 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_trivially_relocate)
4866 SizeVal = Builder.CreateMul(
4867 SizeVal,
4868 ConstantInt::get(
4869 SizeVal->getType(),
4870 getContext()
4871 .getTypeSizeInChars(E->getArg(0)->getType()->getPointeeType())
4872 .getQuantity()));
4873 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4874 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4875 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4876 addInstToNewSourceAtom(I, nullptr);
4877 return RValue::get(Dest, *this);
4878 }
4879 case Builtin::BImemset:
4880 case Builtin::BI__builtin_memset: {
4882 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
4883 Builder.getInt8Ty());
4884 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4885 EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
4886 E->getArg(0)->getExprLoc(), FD, 0);
4887 auto *I = Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
4888 addInstToNewSourceAtom(I, ByteVal);
4889 return RValue::get(Dest, *this);
4890 }
4891 case Builtin::BI__builtin_memset_inline: {
4893 Value *ByteVal =
4894 Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty());
4895 uint64_t Size =
4896 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
4898 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4899 0);
4900 auto *I = Builder.CreateMemSetInline(Dest, ByteVal, Size);
4901 addInstToNewSourceAtom(I, nullptr);
4902 return RValue::get(nullptr);
4903 }
4904 case Builtin::BI__builtin___memset_chk: {
4905 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
4906 Expr::EvalResult SizeResult, DstSizeResult;
4907 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4908 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4909 break;
4910 llvm::APSInt Size = SizeResult.Val.getInt();
4911 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4912 if (Size.ugt(DstSize))
4913 break;
4915 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
4916 Builder.getInt8Ty());
4917 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4918 auto *I = Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
4919 addInstToNewSourceAtom(I, nullptr);
4920 return RValue::get(Dest, *this);
4921 }
4922 case Builtin::BI__builtin_wmemchr: {
4923 // The MSVC runtime library does not provide a definition of wmemchr, so we
4924 // need an inline implementation.
4925 if (!getTarget().getTriple().isOSMSVCRT())
4926 break;
4927
4928 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
4929 Value *Str = EmitScalarExpr(E->getArg(0));
4930 Value *Chr = EmitScalarExpr(E->getArg(1));
4931 Value *Size = EmitScalarExpr(E->getArg(2));
4932
4933 BasicBlock *Entry = Builder.GetInsertBlock();
4934 BasicBlock *CmpEq = createBasicBlock("wmemchr.eq");
4935 BasicBlock *Next = createBasicBlock("wmemchr.next");
4936 BasicBlock *Exit = createBasicBlock("wmemchr.exit");
4937 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
4938 Builder.CreateCondBr(SizeEq0, Exit, CmpEq);
4939
4940 EmitBlock(CmpEq);
4941 PHINode *StrPhi = Builder.CreatePHI(Str->getType(), 2);
4942 StrPhi->addIncoming(Str, Entry);
4943 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
4944 SizePhi->addIncoming(Size, Entry);
4945 CharUnits WCharAlign =
4947 Value *StrCh = Builder.CreateAlignedLoad(WCharTy, StrPhi, WCharAlign);
4948 Value *FoundChr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 0);
4949 Value *StrEqChr = Builder.CreateICmpEQ(StrCh, Chr);
4950 Builder.CreateCondBr(StrEqChr, Exit, Next);
4951
4952 EmitBlock(Next);
4953 Value *NextStr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 1);
4954 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
4955 Value *NextSizeEq0 =
4956 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
4957 Builder.CreateCondBr(NextSizeEq0, Exit, CmpEq);
4958 StrPhi->addIncoming(NextStr, Next);
4959 SizePhi->addIncoming(NextSize, Next);
4960
4961 EmitBlock(Exit);
4962 PHINode *Ret = Builder.CreatePHI(Str->getType(), 3);
4963 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Entry);
4964 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Next);
4965 Ret->addIncoming(FoundChr, CmpEq);
4966 return RValue::get(Ret);
4967 }
4968 case Builtin::BI__builtin_wmemcmp: {
4969 // The MSVC runtime library does not provide a definition of wmemcmp, so we
4970 // need an inline implementation.
4971 if (!getTarget().getTriple().isOSMSVCRT())
4972 break;
4973
4974 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
4975
4976 Value *Dst = EmitScalarExpr(E->getArg(0));
4977 Value *Src = EmitScalarExpr(E->getArg(1));
4978 Value *Size = EmitScalarExpr(E->getArg(2));
4979
4980 BasicBlock *Entry = Builder.GetInsertBlock();
4981 BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
4982 BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
4983 BasicBlock *Next = createBasicBlock("wmemcmp.next");
4984 BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
4985 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
4986 Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
4987
4988 EmitBlock(CmpGT);
4989 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
4990 DstPhi->addIncoming(Dst, Entry);
4991 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
4992 SrcPhi->addIncoming(Src, Entry);
4993 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
4994 SizePhi->addIncoming(Size, Entry);
4995 CharUnits WCharAlign =
4997 Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
4998 Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
4999 Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
5000 Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
5001
5002 EmitBlock(CmpLT);
5003 Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
5004 Builder.CreateCondBr(DstLtSrc, Exit, Next);
5005
5006 EmitBlock(Next);
5007 Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
5008 Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
5009 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
5010 Value *NextSizeEq0 =
5011 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
5012 Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
5013 DstPhi->addIncoming(NextDst, Next);
5014 SrcPhi->addIncoming(NextSrc, Next);
5015 SizePhi->addIncoming(NextSize, Next);
5016
5017 EmitBlock(Exit);
5018 PHINode *Ret = Builder.CreatePHI(IntTy, 4);
5019 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
5020 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
5021 Ret->addIncoming(ConstantInt::getAllOnesValue(IntTy), CmpLT);
5022 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
5023 return RValue::get(Ret);
5024 }
5025 case Builtin::BI__builtin_dwarf_cfa: {
5026 // The offset in bytes from the first argument to the CFA.
5027 //
5028 // Why on earth is this in the frontend? Is there any reason at
5029 // all that the backend can't reasonably determine this while
5030 // lowering llvm.eh.dwarf.cfa()?
5031 //
5032 // TODO: If there's a satisfactory reason, add a target hook for
5033 // this instead of hard-coding 0, which is correct for most targets.
5034 int32_t Offset = 0;
5035
5036 Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
5037 return RValue::get(Builder.CreateCall(F,
5038 llvm::ConstantInt::get(Int32Ty, Offset)));
5039 }
5040 case Builtin::BI__builtin_return_address: {
5041 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
5042 getContext().UnsignedIntTy);
5043 Function *F =
5044 CGM.getIntrinsic(Intrinsic::returnaddress, {CGM.ProgramPtrTy});
5045 return RValue::get(Builder.CreateCall(F, Depth));
5046 }
5047 case Builtin::BI_ReturnAddress: {
5048 Function *F =
5049 CGM.getIntrinsic(Intrinsic::returnaddress, {CGM.ProgramPtrTy});
5050 return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
5051 }
5052 case Builtin::BI__builtin_frame_address: {
5053 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
5054 getContext().UnsignedIntTy);
5055 Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
5056 return RValue::get(Builder.CreateCall(F, Depth));
5057 }
5058 case Builtin::BI__builtin_stack_address: {
5059 return RValue::get(Builder.CreateCall(
5060 CGM.getIntrinsic(Intrinsic::stackaddress, AllocaInt8PtrTy)));
5061 }
5062 case Builtin::BI__builtin_extract_return_addr: {
5065 return RValue::get(Result);
5066 }
5067 case Builtin::BI__builtin_frob_return_addr: {
5070 return RValue::get(Result);
5071 }
5072 case Builtin::BI__builtin_dwarf_sp_column: {
5073 llvm::IntegerType *Ty
5076 if (Column == -1) {
5077 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
5078 return RValue::get(llvm::UndefValue::get(Ty));
5079 }
5080 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
5081 }
5082 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
5084 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
5085 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
5086 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
5087 }
5088 case Builtin::BI__builtin_eh_return: {
5089 Value *Int = EmitScalarExpr(E->getArg(0));
5090 Value *Ptr = EmitScalarExpr(E->getArg(1));
5091
5092 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
5093 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
5094 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
5095 Function *F =
5096 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
5097 : Intrinsic::eh_return_i64);
5098 Builder.CreateCall(F, {Int, Ptr});
5099 Builder.CreateUnreachable();
5100
5101 // We do need to preserve an insertion point.
5102 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
5103
5104 return RValue::get(nullptr);
5105 }
5106 case Builtin::BI__builtin_unwind_init: {
5107 Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
5108 Builder.CreateCall(F);
5109 return RValue::get(nullptr);
5110 }
5111 case Builtin::BI__builtin_extend_pointer: {
5112 // Extends a pointer to the size of an _Unwind_Word, which is
5113 // uint64_t on all platforms. Generally this gets poked into a
5114 // register and eventually used as an address, so if the
5115 // addressing registers are wider than pointers and the platform
5116 // doesn't implicitly ignore high-order bits when doing
5117 // addressing, we need to make sure we zext / sext based on
5118 // the platform's expectations.
5119 //
5120 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
5121
5122 // Cast the pointer to intptr_t.
5123 Value *Ptr = EmitScalarExpr(E->getArg(0));
5124 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
5125
5126 // If that's 64 bits, we're done.
5127 if (IntPtrTy->getBitWidth() == 64)
5128 return RValue::get(Result);
5129
5130 // Otherwise, ask the codegen data what to do.
5131 if (getTargetHooks().extendPointerWithSExt())
5132 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
5133 else
5134 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
5135 }
5136 case Builtin::BI__builtin_setjmp: {
5137 // Buffer is a void**.
5139
5140 if (getTarget().getTriple().getArch() == llvm::Triple::systemz) {
5141 // On this target, the back end fills in the context buffer completely.
5142 // It doesn't really matter if the frontend stores to the buffer before
5143 // calling setjmp, the back-end is going to overwrite them anyway.
5144 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
5145 return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
5146 }
5147
5148 // Store the frame pointer to the setjmp buffer.
5149 Value *FrameAddr = Builder.CreateCall(
5150 CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
5151 ConstantInt::get(Int32Ty, 0));
5152 Builder.CreateStore(FrameAddr, Buf);
5153
5154 // Store the stack pointer to the setjmp buffer.
5155 Value *StackAddr = Builder.CreateStackSave();
5156 assert(Buf.emitRawPointer(*this)->getType() == StackAddr->getType());
5157
5158 Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
5159 Builder.CreateStore(StackAddr, StackSaveSlot);
5160
5161 // Call LLVM's EH setjmp, which is lightweight.
5162 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
5163 return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
5164 }
5165 case Builtin::BI__builtin_longjmp: {
5166 Value *Buf = EmitScalarExpr(E->getArg(0));
5167
5168 // Call LLVM's EH longjmp, which is lightweight.
5169 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
5170
5171 // longjmp doesn't return; mark this as unreachable.
5172 Builder.CreateUnreachable();
5173
5174 // We do need to preserve an insertion point.
5175 EmitBlock(createBasicBlock("longjmp.cont"));
5176
5177 return RValue::get(nullptr);
5178 }
5179 case Builtin::BI__builtin_launder: {
5180 const Expr *Arg = E->getArg(0);
5181 QualType ArgTy = Arg->getType()->getPointeeType();
5182 Value *Ptr = EmitScalarExpr(Arg);
5183 if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
5184 Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
5185
5186 return RValue::get(Ptr);
5187 }
5188 case Builtin::BI__sync_fetch_and_add:
5189 case Builtin::BI__sync_fetch_and_sub:
5190 case Builtin::BI__sync_fetch_and_or:
5191 case Builtin::BI__sync_fetch_and_and:
5192 case Builtin::BI__sync_fetch_and_xor:
5193 case Builtin::BI__sync_fetch_and_nand:
5194 case Builtin::BI__sync_add_and_fetch:
5195 case Builtin::BI__sync_sub_and_fetch:
5196 case Builtin::BI__sync_and_and_fetch:
5197 case Builtin::BI__sync_or_and_fetch:
5198 case Builtin::BI__sync_xor_and_fetch:
5199 case Builtin::BI__sync_nand_and_fetch:
5200 case Builtin::BI__sync_val_compare_and_swap:
5201 case Builtin::BI__sync_bool_compare_and_swap:
5202 case Builtin::BI__sync_lock_test_and_set:
5203 case Builtin::BI__sync_lock_release:
5204 case Builtin::BI__sync_swap:
5205 llvm_unreachable("Shouldn't make it through sema");
5206 case Builtin::BI__sync_fetch_and_add_1:
5207 case Builtin::BI__sync_fetch_and_add_2:
5208 case Builtin::BI__sync_fetch_and_add_4:
5209 case Builtin::BI__sync_fetch_and_add_8:
5210 case Builtin::BI__sync_fetch_and_add_16:
5211 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
5212 case Builtin::BI__sync_fetch_and_sub_1:
5213 case Builtin::BI__sync_fetch_and_sub_2:
5214 case Builtin::BI__sync_fetch_and_sub_4:
5215 case Builtin::BI__sync_fetch_and_sub_8:
5216 case Builtin::BI__sync_fetch_and_sub_16:
5217 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
5218 case Builtin::BI__sync_fetch_and_or_1:
5219 case Builtin::BI__sync_fetch_and_or_2:
5220 case Builtin::BI__sync_fetch_and_or_4:
5221 case Builtin::BI__sync_fetch_and_or_8:
5222 case Builtin::BI__sync_fetch_and_or_16:
5223 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
5224 case Builtin::BI__sync_fetch_and_and_1:
5225 case Builtin::BI__sync_fetch_and_and_2:
5226 case Builtin::BI__sync_fetch_and_and_4:
5227 case Builtin::BI__sync_fetch_and_and_8:
5228 case Builtin::BI__sync_fetch_and_and_16:
5229 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
5230 case Builtin::BI__sync_fetch_and_xor_1:
5231 case Builtin::BI__sync_fetch_and_xor_2:
5232 case Builtin::BI__sync_fetch_and_xor_4:
5233 case Builtin::BI__sync_fetch_and_xor_8:
5234 case Builtin::BI__sync_fetch_and_xor_16:
5235 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
5236 case Builtin::BI__sync_fetch_and_nand_1:
5237 case Builtin::BI__sync_fetch_and_nand_2:
5238 case Builtin::BI__sync_fetch_and_nand_4:
5239 case Builtin::BI__sync_fetch_and_nand_8:
5240 case Builtin::BI__sync_fetch_and_nand_16:
5241 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
5242
5243 // Clang extensions: not overloaded yet.
5244 case Builtin::BI__sync_fetch_and_min:
5245 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
5246 case Builtin::BI__sync_fetch_and_max:
5247 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
5248 case Builtin::BI__sync_fetch_and_umin:
5249 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
5250 case Builtin::BI__sync_fetch_and_umax:
5251 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
5252
5253 case Builtin::BI__sync_add_and_fetch_1:
5254 case Builtin::BI__sync_add_and_fetch_2:
5255 case Builtin::BI__sync_add_and_fetch_4:
5256 case Builtin::BI__sync_add_and_fetch_8:
5257 case Builtin::BI__sync_add_and_fetch_16:
5258 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
5259 llvm::Instruction::Add);
5260 case Builtin::BI__sync_sub_and_fetch_1:
5261 case Builtin::BI__sync_sub_and_fetch_2:
5262 case Builtin::BI__sync_sub_and_fetch_4:
5263 case Builtin::BI__sync_sub_and_fetch_8:
5264 case Builtin::BI__sync_sub_and_fetch_16:
5265 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
5266 llvm::Instruction::Sub);
5267 case Builtin::BI__sync_and_and_fetch_1:
5268 case Builtin::BI__sync_and_and_fetch_2:
5269 case Builtin::BI__sync_and_and_fetch_4:
5270 case Builtin::BI__sync_and_and_fetch_8:
5271 case Builtin::BI__sync_and_and_fetch_16:
5272 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
5273 llvm::Instruction::And);
5274 case Builtin::BI__sync_or_and_fetch_1:
5275 case Builtin::BI__sync_or_and_fetch_2:
5276 case Builtin::BI__sync_or_and_fetch_4:
5277 case Builtin::BI__sync_or_and_fetch_8:
5278 case Builtin::BI__sync_or_and_fetch_16:
5279 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
5280 llvm::Instruction::Or);
5281 case Builtin::BI__sync_xor_and_fetch_1:
5282 case Builtin::BI__sync_xor_and_fetch_2:
5283 case Builtin::BI__sync_xor_and_fetch_4:
5284 case Builtin::BI__sync_xor_and_fetch_8:
5285 case Builtin::BI__sync_xor_and_fetch_16:
5286 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
5287 llvm::Instruction::Xor);
5288 case Builtin::BI__sync_nand_and_fetch_1:
5289 case Builtin::BI__sync_nand_and_fetch_2:
5290 case Builtin::BI__sync_nand_and_fetch_4:
5291 case Builtin::BI__sync_nand_and_fetch_8:
5292 case Builtin::BI__sync_nand_and_fetch_16:
5293 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
5294 llvm::Instruction::And, true);
5295
5296 case Builtin::BI__sync_val_compare_and_swap_1:
5297 case Builtin::BI__sync_val_compare_and_swap_2:
5298 case Builtin::BI__sync_val_compare_and_swap_4:
5299 case Builtin::BI__sync_val_compare_and_swap_8:
5300 case Builtin::BI__sync_val_compare_and_swap_16:
5302 *this, E, false, AtomicOrdering::SequentiallyConsistent,
5303 AtomicOrdering::SequentiallyConsistent));
5304
5305 case Builtin::BI__sync_bool_compare_and_swap_1:
5306 case Builtin::BI__sync_bool_compare_and_swap_2:
5307 case Builtin::BI__sync_bool_compare_and_swap_4:
5308 case Builtin::BI__sync_bool_compare_and_swap_8:
5309 case Builtin::BI__sync_bool_compare_and_swap_16:
5311 *this, E, true, AtomicOrdering::SequentiallyConsistent,
5312 AtomicOrdering::SequentiallyConsistent));
5313
5314 case Builtin::BI__sync_swap_1:
5315 case Builtin::BI__sync_swap_2:
5316 case Builtin::BI__sync_swap_4:
5317 case Builtin::BI__sync_swap_8:
5318 case Builtin::BI__sync_swap_16:
5319 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
5320
5321 case Builtin::BI__sync_lock_test_and_set_1:
5322 case Builtin::BI__sync_lock_test_and_set_2:
5323 case Builtin::BI__sync_lock_test_and_set_4:
5324 case Builtin::BI__sync_lock_test_and_set_8:
5325 case Builtin::BI__sync_lock_test_and_set_16:
5326 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
5327
5328 case Builtin::BI__sync_lock_release_1:
5329 case Builtin::BI__sync_lock_release_2:
5330 case Builtin::BI__sync_lock_release_4:
5331 case Builtin::BI__sync_lock_release_8:
5332 case Builtin::BI__sync_lock_release_16: {
5333 Address Ptr = CheckAtomicAlignment(*this, E);
5334 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
5335
5336 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
5337 getContext().getTypeSize(ElTy));
5338 llvm::StoreInst *Store =
5339 Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
5340 Store->setAtomic(llvm::AtomicOrdering::Release);
5341 return RValue::get(nullptr);
5342 }
5343
5344 case Builtin::BI__sync_synchronize: {
5345 // We assume this is supposed to correspond to a C++0x-style
5346 // sequentially-consistent fence (i.e. this is only usable for
5347 // synchronization, not device I/O or anything like that). This intrinsic
5348 // is really badly designed in the sense that in theory, there isn't
5349 // any way to safely use it... but in practice, it mostly works
5350 // to use it with non-atomic loads and stores to get acquire/release
5351 // semantics.
5352 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
5353 return RValue::get(nullptr);
5354 }
5355
5356 case Builtin::BI__builtin_nontemporal_load:
5357 return RValue::get(EmitNontemporalLoad(*this, E));
5358 case Builtin::BI__builtin_nontemporal_store:
5359 return RValue::get(EmitNontemporalStore(*this, E));
5360 case Builtin::BI__c11_atomic_is_lock_free:
5361 case Builtin::BI__atomic_is_lock_free: {
5362 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
5363 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
5364 // _Atomic(T) is always properly-aligned.
5365 const char *LibCallName = "__atomic_is_lock_free";
5366 CallArgList Args;
5367 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
5368 getContext().getSizeType());
5369 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
5370 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
5372 else
5373 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
5375 const CGFunctionInfo &FuncInfo =
5376 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
5377 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
5378 llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
5379 return EmitCall(FuncInfo, CGCallee::forDirect(Func),
5380 ReturnValueSlot(), Args);
5381 }
5382
5383 case Builtin::BI__atomic_thread_fence:
5384 case Builtin::BI__atomic_signal_fence:
5385 case Builtin::BI__c11_atomic_thread_fence:
5386 case Builtin::BI__c11_atomic_signal_fence: {
5387 llvm::SyncScope::ID SSID;
5388 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
5389 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
5390 SSID = llvm::SyncScope::SingleThread;
5391 else
5392 SSID = llvm::SyncScope::System;
5393 Value *Order = EmitScalarExpr(E->getArg(0));
5394 if (isa<llvm::ConstantInt>(Order)) {
5395 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
5396 switch (ord) {
5397 case 0: // memory_order_relaxed
5398 default: // invalid order
5399 break;
5400 case 1: // memory_order_consume
5401 case 2: // memory_order_acquire
5402 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
5403 break;
5404 case 3: // memory_order_release
5405 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
5406 break;
5407 case 4: // memory_order_acq_rel
5408 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
5409 break;
5410 case 5: // memory_order_seq_cst
5411 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
5412 break;
5413 }
5414 return RValue::get(nullptr);
5415 }
5416
5417 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
5418 AcquireBB = createBasicBlock("acquire", CurFn);
5419 ReleaseBB = createBasicBlock("release", CurFn);
5420 AcqRelBB = createBasicBlock("acqrel", CurFn);
5421 SeqCstBB = createBasicBlock("seqcst", CurFn);
5422 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
5423
5424 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
5425 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
5426
5427 Builder.SetInsertPoint(AcquireBB);
5428 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
5429 Builder.CreateBr(ContBB);
5430 SI->addCase(Builder.getInt32(1), AcquireBB);
5431 SI->addCase(Builder.getInt32(2), AcquireBB);
5432
5433 Builder.SetInsertPoint(ReleaseBB);
5434 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
5435 Builder.CreateBr(ContBB);
5436 SI->addCase(Builder.getInt32(3), ReleaseBB);
5437
5438 Builder.SetInsertPoint(AcqRelBB);
5439 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
5440 Builder.CreateBr(ContBB);
5441 SI->addCase(Builder.getInt32(4), AcqRelBB);
5442
5443 Builder.SetInsertPoint(SeqCstBB);
5444 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
5445 Builder.CreateBr(ContBB);
5446 SI->addCase(Builder.getInt32(5), SeqCstBB);
5447
5448 Builder.SetInsertPoint(ContBB);
5449 return RValue::get(nullptr);
5450 }
5451 case Builtin::BI__scoped_atomic_thread_fence: {
5453
5454 Value *Order = EmitScalarExpr(E->getArg(0));
5455 Value *Scope = EmitScalarExpr(E->getArg(1));
5456 auto Ord = dyn_cast<llvm::ConstantInt>(Order);
5457 auto Scp = dyn_cast<llvm::ConstantInt>(Scope);
5458 if (Ord && Scp) {
5459 SyncScope SS = ScopeModel->isValid(Scp->getZExtValue())
5460 ? ScopeModel->map(Scp->getZExtValue())
5461 : ScopeModel->map(ScopeModel->getFallBackValue());
5462 switch (Ord->getZExtValue()) {
5463 case 0: // memory_order_relaxed
5464 default: // invalid order
5465 break;
5466 case 1: // memory_order_consume
5467 case 2: // memory_order_acquire
5468 Builder.CreateFence(
5469 llvm::AtomicOrdering::Acquire,
5470 getTargetHooks().getLLVMSyncScopeID(getLangOpts(), SS,
5471 llvm::AtomicOrdering::Acquire,
5472 getLLVMContext()));
5473 break;
5474 case 3: // memory_order_release
5475 Builder.CreateFence(
5476 llvm::AtomicOrdering::Release,
5477 getTargetHooks().getLLVMSyncScopeID(getLangOpts(), SS,
5478 llvm::AtomicOrdering::Release,
5479 getLLVMContext()));
5480 break;
5481 case 4: // memory_order_acq_rel
5482 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease,
5483 getTargetHooks().getLLVMSyncScopeID(
5484 getLangOpts(), SS,
5485 llvm::AtomicOrdering::AcquireRelease,
5486 getLLVMContext()));
5487 break;
5488 case 5: // memory_order_seq_cst
5489 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
5490 getTargetHooks().getLLVMSyncScopeID(
5491 getLangOpts(), SS,
5492 llvm::AtomicOrdering::SequentiallyConsistent,
5493 getLLVMContext()));
5494 break;
5495 }
5496 return RValue::get(nullptr);
5497 }
5498
5499 llvm::BasicBlock *ContBB = createBasicBlock("atomic.scope.continue", CurFn);
5500
5502 OrderBBs;
5503 if (Ord) {
5504 switch (Ord->getZExtValue()) {
5505 case 0: // memory_order_relaxed
5506 default: // invalid order
5507 ContBB->eraseFromParent();
5508 return RValue::get(nullptr);
5509 case 1: // memory_order_consume
5510 case 2: // memory_order_acquire
5511 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5512 llvm::AtomicOrdering::Acquire);
5513 break;
5514 case 3: // memory_order_release
5515 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5516 llvm::AtomicOrdering::Release);
5517 break;
5518 case 4: // memory_order_acq_rel
5519 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5520 llvm::AtomicOrdering::AcquireRelease);
5521 break;
5522 case 5: // memory_order_seq_cst
5523 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5524 llvm::AtomicOrdering::SequentiallyConsistent);
5525 break;
5526 }
5527 } else {
5528 llvm::BasicBlock *AcquireBB = createBasicBlock("acquire", CurFn);
5529 llvm::BasicBlock *ReleaseBB = createBasicBlock("release", CurFn);
5530 llvm::BasicBlock *AcqRelBB = createBasicBlock("acqrel", CurFn);
5531 llvm::BasicBlock *SeqCstBB = createBasicBlock("seqcst", CurFn);
5532
5533 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
5534 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
5535 SI->addCase(Builder.getInt32(1), AcquireBB);
5536 SI->addCase(Builder.getInt32(2), AcquireBB);
5537 SI->addCase(Builder.getInt32(3), ReleaseBB);
5538 SI->addCase(Builder.getInt32(4), AcqRelBB);
5539 SI->addCase(Builder.getInt32(5), SeqCstBB);
5540
5541 OrderBBs.emplace_back(AcquireBB, llvm::AtomicOrdering::Acquire);
5542 OrderBBs.emplace_back(ReleaseBB, llvm::AtomicOrdering::Release);
5543 OrderBBs.emplace_back(AcqRelBB, llvm::AtomicOrdering::AcquireRelease);
5544 OrderBBs.emplace_back(SeqCstBB,
5545 llvm::AtomicOrdering::SequentiallyConsistent);
5546 }
5547
5548 for (auto &[OrderBB, Ordering] : OrderBBs) {
5549 Builder.SetInsertPoint(OrderBB);
5550 if (Scp) {
5551 SyncScope SS = ScopeModel->isValid(Scp->getZExtValue())
5552 ? ScopeModel->map(Scp->getZExtValue())
5553 : ScopeModel->map(ScopeModel->getFallBackValue());
5554 Builder.CreateFence(Ordering,
5555 getTargetHooks().getLLVMSyncScopeID(
5556 getLangOpts(), SS, Ordering, getLLVMContext()));
5557 Builder.CreateBr(ContBB);
5558 } else {
5559 llvm::DenseMap<unsigned, llvm::BasicBlock *> BBs;
5560 for (unsigned Scp : ScopeModel->getRuntimeValues())
5561 BBs[Scp] = createBasicBlock(getAsString(ScopeModel->map(Scp)), CurFn);
5562
5563 auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
5564 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, ContBB);
5565 for (unsigned Scp : ScopeModel->getRuntimeValues()) {
5566 auto *B = BBs[Scp];
5567 SI->addCase(Builder.getInt32(Scp), B);
5568
5569 Builder.SetInsertPoint(B);
5570 Builder.CreateFence(Ordering, getTargetHooks().getLLVMSyncScopeID(
5571 getLangOpts(), ScopeModel->map(Scp),
5572 Ordering, getLLVMContext()));
5573 Builder.CreateBr(ContBB);
5574 }
5575 }
5576 }
5577
5578 Builder.SetInsertPoint(ContBB);
5579 return RValue::get(nullptr);
5580 }
5581
5582 case Builtin::BI__builtin_signbit:
5583 case Builtin::BI__builtin_signbitf:
5584 case Builtin::BI__builtin_signbitl: {
5585 return RValue::get(
5586 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
5587 ConvertType(E->getType())));
5588 }
5589 case Builtin::BI__warn_memset_zero_len:
5590 return RValue::getIgnored();
5591 case Builtin::BI__annotation: {
5592 // Re-encode each wide string to UTF8 and make an MDString.
5594 for (const Expr *Arg : E->arguments()) {
5595 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
5596 assert(Str->getCharByteWidth() == 2 || Str->getCharByteWidth() == 4);
5597 StringRef WideBytes = Str->getBytes();
5598 std::string StrUtf8;
5599 bool Converted =
5600 (Str->getCharByteWidth() == 2)
5601 ? convertUTF16ToUTF8String(
5602 ArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)
5603 : convertUTF32ToUTF8String(
5604 ArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8);
5605 if (!Converted) {
5606 CGM.ErrorUnsupported(E, "non-Unicode __annotation argument");
5607 continue;
5608 }
5609 Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
5610 }
5611
5612 // Build and MDTuple of MDStrings and emit the intrinsic call.
5613 llvm::Function *F = CGM.getIntrinsic(Intrinsic::codeview_annotation, {});
5614 MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
5615 Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
5616 return RValue::getIgnored();
5617 }
5618 case Builtin::BI__builtin_annotation: {
5619 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
5620 llvm::Function *F = CGM.getIntrinsic(
5621 Intrinsic::annotation, {AnnVal->getType(), CGM.ConstGlobalsPtrTy});
5622
5623 // Get the annotation string, go through casts. Sema requires this to be a
5624 // non-wide string literal, potentially casted, so the cast<> is safe.
5625 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
5626 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
5627 return RValue::get(
5628 EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr));
5629 }
5630 case Builtin::BI__builtin_addcb:
5631 case Builtin::BI__builtin_addcs:
5632 case Builtin::BI__builtin_addc:
5633 case Builtin::BI__builtin_addcl:
5634 case Builtin::BI__builtin_addcll:
5635 case Builtin::BI__builtin_subcb:
5636 case Builtin::BI__builtin_subcs:
5637 case Builtin::BI__builtin_subc:
5638 case Builtin::BI__builtin_subcl:
5639 case Builtin::BI__builtin_subcll: {
5640
5641 // We translate all of these builtins from expressions of the form:
5642 // int x = ..., y = ..., carryin = ..., carryout, result;
5643 // result = __builtin_addc(x, y, carryin, &carryout);
5644 //
5645 // to LLVM IR of the form:
5646 //
5647 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
5648 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
5649 // %carry1 = extractvalue {i32, i1} %tmp1, 1
5650 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
5651 // i32 %carryin)
5652 // %result = extractvalue {i32, i1} %tmp2, 0
5653 // %carry2 = extractvalue {i32, i1} %tmp2, 1
5654 // %tmp3 = or i1 %carry1, %carry2
5655 // %tmp4 = zext i1 %tmp3 to i32
5656 // store i32 %tmp4, i32* %carryout
5657
5658 // Scalarize our inputs.
5659 llvm::Value *X = EmitScalarExpr(E->getArg(0));
5660 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
5661 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
5662 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
5663
5664 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
5665 Intrinsic::ID IntrinsicId;
5666 switch (BuiltinID) {
5667 default: llvm_unreachable("Unknown multiprecision builtin id.");
5668 case Builtin::BI__builtin_addcb:
5669 case Builtin::BI__builtin_addcs:
5670 case Builtin::BI__builtin_addc:
5671 case Builtin::BI__builtin_addcl:
5672 case Builtin::BI__builtin_addcll:
5673 IntrinsicId = Intrinsic::uadd_with_overflow;
5674 break;
5675 case Builtin::BI__builtin_subcb:
5676 case Builtin::BI__builtin_subcs:
5677 case Builtin::BI__builtin_subc:
5678 case Builtin::BI__builtin_subcl:
5679 case Builtin::BI__builtin_subcll:
5680 IntrinsicId = Intrinsic::usub_with_overflow;
5681 break;
5682 }
5683
5684 // Construct our resulting LLVM IR expression.
5685 llvm::Value *Carry1;
5686 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
5687 X, Y, Carry1);
5688 llvm::Value *Carry2;
5689 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
5690 Sum1, Carryin, Carry2);
5691 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
5692 X->getType());
5693 Builder.CreateStore(CarryOut, CarryOutPtr);
5694 return RValue::get(Sum2);
5695 }
5696
5697 case Builtin::BI__builtin_add_overflow:
5698 case Builtin::BI__builtin_sub_overflow:
5699 case Builtin::BI__builtin_mul_overflow: {
5700 const clang::Expr *LeftArg = E->getArg(0);
5701 const clang::Expr *RightArg = E->getArg(1);
5702 const clang::Expr *ResultArg = E->getArg(2);
5703
5704 clang::QualType ResultQTy =
5705 ResultArg->getType()->castAs<PointerType>()->getPointeeType();
5706
5707 WidthAndSignedness LeftInfo =
5708 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
5709 WidthAndSignedness RightInfo =
5710 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
5711 WidthAndSignedness ResultInfo =
5712 getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
5713
5714 // Handle mixed-sign multiplication as a special case, because adding
5715 // runtime or backend support for our generic irgen would be too expensive.
5716 if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
5717 return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
5718 RightInfo, ResultArg, ResultQTy,
5719 ResultInfo);
5720
5721 if (isSpecialUnsignedMultiplySignedResult(BuiltinID, LeftInfo, RightInfo,
5722 ResultInfo))
5724 *this, LeftArg, LeftInfo, RightArg, RightInfo, ResultArg, ResultQTy,
5725 ResultInfo);
5726
5727 WidthAndSignedness EncompassingInfo =
5728 EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
5729
5730 llvm::Type *EncompassingLLVMTy =
5731 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
5732
5733 llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
5734
5735 Intrinsic::ID IntrinsicId;
5736 switch (BuiltinID) {
5737 default:
5738 llvm_unreachable("Unknown overflow builtin id.");
5739 case Builtin::BI__builtin_add_overflow:
5740 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::sadd_with_overflow
5741 : Intrinsic::uadd_with_overflow;
5742 break;
5743 case Builtin::BI__builtin_sub_overflow:
5744 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::ssub_with_overflow
5745 : Intrinsic::usub_with_overflow;
5746 break;
5747 case Builtin::BI__builtin_mul_overflow:
5748 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::smul_with_overflow
5749 : Intrinsic::umul_with_overflow;
5750 break;
5751 }
5752
5753 llvm::Value *Left = EmitScalarExpr(LeftArg);
5754 llvm::Value *Right = EmitScalarExpr(RightArg);
5755 Address ResultPtr = EmitPointerWithAlignment(ResultArg);
5756
5757 // Extend each operand to the encompassing type.
5758 Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
5759 Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
5760
5761 // Perform the operation on the extended values.
5762 llvm::Value *Overflow, *Result;
5763 Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
5764
5765 if (EncompassingInfo.Width > ResultInfo.Width) {
5766 // The encompassing type is wider than the result type, so we need to
5767 // truncate it.
5768 llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
5769
5770 // To see if the truncation caused an overflow, we will extend
5771 // the result and then compare it to the original result.
5772 llvm::Value *ResultTruncExt = Builder.CreateIntCast(
5773 ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
5774 llvm::Value *TruncationOverflow =
5775 Builder.CreateICmpNE(Result, ResultTruncExt);
5776
5777 Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
5778 Result = ResultTrunc;
5779 }
5780
5781 // Finally, store the result using the pointer.
5782 bool isVolatile =
5783 ResultArg->getType()->getPointeeType().isVolatileQualified();
5784 Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
5785
5786 return RValue::get(Overflow);
5787 }
5788
5789 case Builtin::BI__builtin_uadd_overflow:
5790 case Builtin::BI__builtin_uaddl_overflow:
5791 case Builtin::BI__builtin_uaddll_overflow:
5792 case Builtin::BI__builtin_usub_overflow:
5793 case Builtin::BI__builtin_usubl_overflow:
5794 case Builtin::BI__builtin_usubll_overflow:
5795 case Builtin::BI__builtin_umul_overflow:
5796 case Builtin::BI__builtin_umull_overflow:
5797 case Builtin::BI__builtin_umulll_overflow:
5798 case Builtin::BI__builtin_sadd_overflow:
5799 case Builtin::BI__builtin_saddl_overflow:
5800 case Builtin::BI__builtin_saddll_overflow:
5801 case Builtin::BI__builtin_ssub_overflow:
5802 case Builtin::BI__builtin_ssubl_overflow:
5803 case Builtin::BI__builtin_ssubll_overflow:
5804 case Builtin::BI__builtin_smul_overflow:
5805 case Builtin::BI__builtin_smull_overflow:
5806 case Builtin::BI__builtin_smulll_overflow: {
5807
5808 // We translate all of these builtins directly to the relevant llvm IR node.
5809
5810 // Scalarize our inputs.
5811 llvm::Value *X = EmitScalarExpr(E->getArg(0));
5812 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
5813 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
5814
5815 // Decide which of the overflow intrinsics we are lowering to:
5816 Intrinsic::ID IntrinsicId;
5817 switch (BuiltinID) {
5818 default: llvm_unreachable("Unknown overflow builtin id.");
5819 case Builtin::BI__builtin_uadd_overflow:
5820 case Builtin::BI__builtin_uaddl_overflow:
5821 case Builtin::BI__builtin_uaddll_overflow:
5822 IntrinsicId = Intrinsic::uadd_with_overflow;
5823 break;
5824 case Builtin::BI__builtin_usub_overflow:
5825 case Builtin::BI__builtin_usubl_overflow:
5826 case Builtin::BI__builtin_usubll_overflow:
5827 IntrinsicId = Intrinsic::usub_with_overflow;
5828 break;
5829 case Builtin::BI__builtin_umul_overflow:
5830 case Builtin::BI__builtin_umull_overflow:
5831 case Builtin::BI__builtin_umulll_overflow:
5832 IntrinsicId = Intrinsic::umul_with_overflow;
5833 break;
5834 case Builtin::BI__builtin_sadd_overflow:
5835 case Builtin::BI__builtin_saddl_overflow:
5836 case Builtin::BI__builtin_saddll_overflow:
5837 IntrinsicId = Intrinsic::sadd_with_overflow;
5838 break;
5839 case Builtin::BI__builtin_ssub_overflow:
5840 case Builtin::BI__builtin_ssubl_overflow:
5841 case Builtin::BI__builtin_ssubll_overflow:
5842 IntrinsicId = Intrinsic::ssub_with_overflow;
5843 break;
5844 case Builtin::BI__builtin_smul_overflow:
5845 case Builtin::BI__builtin_smull_overflow:
5846 case Builtin::BI__builtin_smulll_overflow:
5847 IntrinsicId = Intrinsic::smul_with_overflow;
5848 break;
5849 }
5850
5851
5852 llvm::Value *Carry;
5853 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
5854 Builder.CreateStore(Sum, SumOutPtr);
5855
5856 return RValue::get(Carry);
5857 }
5858 case Builtin::BIaddressof:
5859 case Builtin::BI__addressof:
5860 case Builtin::BI__builtin_addressof:
5861 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
5862 case Builtin::BI__builtin_function_start:
5863 return RValue::get(CGM.GetFunctionStart(
5864 E->getArg(0)->getAsBuiltinConstantDeclRef(CGM.getContext())));
5865 case Builtin::BI__builtin_operator_new:
5867 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
5868 case Builtin::BI__builtin_operator_delete:
5870 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
5871 return RValue::get(nullptr);
5872
5873 case Builtin::BI__builtin_is_aligned:
5874 return EmitBuiltinIsAligned(E);
5875 case Builtin::BI__builtin_align_up:
5876 return EmitBuiltinAlignTo(E, true);
5877 case Builtin::BI__builtin_align_down:
5878 return EmitBuiltinAlignTo(E, false);
5879
5880 case Builtin::BI__noop:
5881 // __noop always evaluates to an integer literal zero.
5882 return RValue::get(ConstantInt::get(IntTy, 0));
5883 case Builtin::BI__builtin_call_with_static_chain: {
5884 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
5885 const Expr *Chain = E->getArg(1);
5886 return EmitCall(Call->getCallee()->getType(),
5887 EmitCallee(Call->getCallee()), Call, ReturnValue,
5888 EmitScalarExpr(Chain));
5889 }
5890 case Builtin::BI_InterlockedExchange8:
5891 case Builtin::BI_InterlockedExchange16:
5892 case Builtin::BI_InterlockedExchange:
5893 case Builtin::BI_InterlockedExchangePointer:
5894 return RValue::get(
5896 case Builtin::BI_InterlockedCompareExchangePointer:
5897 return RValue::get(
5899 case Builtin::BI_InterlockedCompareExchangePointer_nf:
5900 return RValue::get(
5902 case Builtin::BI_InterlockedCompareExchange8:
5903 case Builtin::BI_InterlockedCompareExchange16:
5904 case Builtin::BI_InterlockedCompareExchange:
5905 case Builtin::BI_InterlockedCompareExchange64:
5906 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
5907 case Builtin::BI_InterlockedIncrement16:
5908 case Builtin::BI_InterlockedIncrement:
5909 return RValue::get(
5911 case Builtin::BI_InterlockedDecrement16:
5912 case Builtin::BI_InterlockedDecrement:
5913 return RValue::get(
5915 case Builtin::BI_InterlockedAnd8:
5916 case Builtin::BI_InterlockedAnd16:
5917 case Builtin::BI_InterlockedAnd:
5919 case Builtin::BI_InterlockedExchangeAdd8:
5920 case Builtin::BI_InterlockedExchangeAdd16:
5921 case Builtin::BI_InterlockedExchangeAdd:
5922 return RValue::get(
5924 case Builtin::BI_InterlockedExchangeSub8:
5925 case Builtin::BI_InterlockedExchangeSub16:
5926 case Builtin::BI_InterlockedExchangeSub:
5927 return RValue::get(
5929 case Builtin::BI_InterlockedOr8:
5930 case Builtin::BI_InterlockedOr16:
5931 case Builtin::BI_InterlockedOr:
5933 case Builtin::BI_InterlockedXor8:
5934 case Builtin::BI_InterlockedXor16:
5935 case Builtin::BI_InterlockedXor:
5937
5938 case Builtin::BI_bittest64:
5939 case Builtin::BI_bittest:
5940 case Builtin::BI_bittestandcomplement64:
5941 case Builtin::BI_bittestandcomplement:
5942 case Builtin::BI_bittestandreset64:
5943 case Builtin::BI_bittestandreset:
5944 case Builtin::BI_bittestandset64:
5945 case Builtin::BI_bittestandset:
5946 case Builtin::BI_interlockedbittestandreset:
5947 case Builtin::BI_interlockedbittestandreset64:
5948 case Builtin::BI_interlockedbittestandreset64_acq:
5949 case Builtin::BI_interlockedbittestandreset64_rel:
5950 case Builtin::BI_interlockedbittestandreset64_nf:
5951 case Builtin::BI_interlockedbittestandset64:
5952 case Builtin::BI_interlockedbittestandset64_acq:
5953 case Builtin::BI_interlockedbittestandset64_rel:
5954 case Builtin::BI_interlockedbittestandset64_nf:
5955 case Builtin::BI_interlockedbittestandset:
5956 case Builtin::BI_interlockedbittestandset_acq:
5957 case Builtin::BI_interlockedbittestandset_rel:
5958 case Builtin::BI_interlockedbittestandset_nf:
5959 case Builtin::BI_interlockedbittestandreset_acq:
5960 case Builtin::BI_interlockedbittestandreset_rel:
5961 case Builtin::BI_interlockedbittestandreset_nf:
5962 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
5963
5964 // These builtins exist to emit regular volatile loads and stores not
5965 // affected by the -fms-volatile setting.
5966 case Builtin::BI__iso_volatile_load8:
5967 case Builtin::BI__iso_volatile_load16:
5968 case Builtin::BI__iso_volatile_load32:
5969 case Builtin::BI__iso_volatile_load64:
5970 return RValue::get(EmitISOVolatileLoad(*this, E));
5971 case Builtin::BI__iso_volatile_store8:
5972 case Builtin::BI__iso_volatile_store16:
5973 case Builtin::BI__iso_volatile_store32:
5974 case Builtin::BI__iso_volatile_store64:
5975 return RValue::get(EmitISOVolatileStore(*this, E));
5976
5977 case Builtin::BI__builtin_ptrauth_sign_constant:
5978 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
5979
5980 case Builtin::BI__builtin_ptrauth_auth:
5981 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5982 case Builtin::BI__builtin_ptrauth_auth_load_relative_and_sign:
5983 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5984 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5985 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5986 case Builtin::BI__builtin_ptrauth_strip: {
5987 // Emit the arguments.
5989 for (auto argExpr : E->arguments())
5990 Args.push_back(EmitScalarExpr(argExpr));
5991
5992 // Cast the value to intptr_t, saving its original type.
5993 llvm::Type *OrigValueType = Args[0]->getType();
5994 if (OrigValueType->isPointerTy())
5995 Args[0] = Builder.CreatePtrToInt(Args[0], IntPtrTy);
5996
5997 switch (BuiltinID) {
5998 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5999 case Builtin::BI__builtin_ptrauth_auth_load_relative_and_sign:
6000 if (Args[4]->getType()->isPointerTy())
6001 Args[4] = Builder.CreatePtrToInt(Args[4], IntPtrTy);
6002 [[fallthrough]];
6003
6004 case Builtin::BI__builtin_ptrauth_auth:
6005 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
6006 if (Args[2]->getType()->isPointerTy())
6007 Args[2] = Builder.CreatePtrToInt(Args[2], IntPtrTy);
6008 break;
6009
6010 case Builtin::BI__builtin_ptrauth_sign_generic_data:
6011 if (Args[1]->getType()->isPointerTy())
6012 Args[1] = Builder.CreatePtrToInt(Args[1], IntPtrTy);
6013 break;
6014
6015 case Builtin::BI__builtin_ptrauth_blend_discriminator:
6016 case Builtin::BI__builtin_ptrauth_strip:
6017 break;
6018 }
6019
6020 // Call the intrinsic.
6021 auto IntrinsicID = [&]() -> unsigned {
6022 switch (BuiltinID) {
6023 case Builtin::BI__builtin_ptrauth_auth:
6024 return Intrinsic::ptrauth_auth;
6025 case Builtin::BI__builtin_ptrauth_auth_and_resign:
6026 return Intrinsic::ptrauth_resign;
6027 case Builtin::BI__builtin_ptrauth_auth_load_relative_and_sign:
6028 return Intrinsic::ptrauth_resign_load_relative;
6029 case Builtin::BI__builtin_ptrauth_blend_discriminator:
6030 return Intrinsic::ptrauth_blend;
6031 case Builtin::BI__builtin_ptrauth_sign_generic_data:
6032 return Intrinsic::ptrauth_sign_generic;
6033 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
6034 return Intrinsic::ptrauth_sign;
6035 case Builtin::BI__builtin_ptrauth_strip:
6036 return Intrinsic::ptrauth_strip;
6037 }
6038 llvm_unreachable("bad ptrauth intrinsic");
6039 }();
6040 auto Intrinsic = CGM.getIntrinsic(IntrinsicID);
6041 llvm::Value *Result = EmitRuntimeCall(Intrinsic, Args);
6042
6043 if (BuiltinID != Builtin::BI__builtin_ptrauth_sign_generic_data &&
6044 BuiltinID != Builtin::BI__builtin_ptrauth_blend_discriminator &&
6045 OrigValueType->isPointerTy()) {
6046 Result = Builder.CreateIntToPtr(Result, OrigValueType);
6047 }
6048 return RValue::get(Result);
6049 }
6050
6051 case Builtin::BI__builtin_get_vtable_pointer: {
6052 const Expr *Target = E->getArg(0);
6053 QualType TargetType = Target->getType();
6054 const CXXRecordDecl *Decl = TargetType->getPointeeCXXRecordDecl();
6055 assert(Decl);
6056 auto ThisAddress = EmitPointerWithAlignment(Target);
6057 assert(ThisAddress.isValid());
6058 llvm::Value *VTablePointer =
6060 return RValue::get(VTablePointer);
6061 }
6062
6063 case Builtin::BI__exception_code:
6064 case Builtin::BI_exception_code:
6066 case Builtin::BI__exception_info:
6067 case Builtin::BI_exception_info:
6069 case Builtin::BI__abnormal_termination:
6070 case Builtin::BI_abnormal_termination:
6072 case Builtin::BI_setjmpex:
6073 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
6074 E->getArg(0)->getType()->isPointerType())
6075 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
6076 break;
6077 case Builtin::BI_setjmp:
6078 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
6079 E->getArg(0)->getType()->isPointerType()) {
6080 if (getTarget().getTriple().getArch() == llvm::Triple::x86)
6081 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
6082 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
6083 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
6084 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
6085 }
6086 break;
6087
6088 // C++ std:: builtins.
6089 case Builtin::BImove:
6090 case Builtin::BImove_if_noexcept:
6091 case Builtin::BIforward:
6092 case Builtin::BIforward_like:
6093 case Builtin::BIas_const:
6094 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
6095 case Builtin::BI__GetExceptionInfo: {
6096 if (llvm::GlobalVariable *GV =
6097 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
6098 return RValue::get(GV);
6099 break;
6100 }
6101
6102 case Builtin::BI__fastfail:
6104
6105 case Builtin::BI__builtin_coro_id:
6106 return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
6107 case Builtin::BI__builtin_coro_promise:
6108 return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
6109 case Builtin::BI__builtin_coro_resume:
6110 EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
6111 return RValue::get(nullptr);
6112 case Builtin::BI__builtin_coro_frame:
6113 return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
6114 case Builtin::BI__builtin_coro_noop:
6115 return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
6116 case Builtin::BI__builtin_coro_free:
6117 return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
6118 case Builtin::BI__builtin_coro_destroy:
6119 EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
6120 return RValue::get(nullptr);
6121 case Builtin::BI__builtin_coro_done:
6122 return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
6123 case Builtin::BI__builtin_coro_alloc:
6124 return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
6125 case Builtin::BI__builtin_coro_begin:
6126 return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
6127 case Builtin::BI__builtin_coro_end:
6128 return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
6129 case Builtin::BI__builtin_coro_suspend:
6130 return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
6131 case Builtin::BI__builtin_coro_size:
6132 return EmitCoroutineIntrinsic(E, Intrinsic::coro_size);
6133 case Builtin::BI__builtin_coro_align:
6134 return EmitCoroutineIntrinsic(E, Intrinsic::coro_align);
6135
6136 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
6137 case Builtin::BIread_pipe:
6138 case Builtin::BIwrite_pipe: {
6139 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
6140 *Arg1 = EmitScalarExpr(E->getArg(1));
6141 CGOpenCLRuntime OpenCLRT(CGM);
6142 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
6143 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
6144
6145 // Type of the generic packet parameter.
6146 unsigned GenericAS =
6148 llvm::Type *I8PTy = llvm::PointerType::get(getLLVMContext(), GenericAS);
6149
6150 // Testing which overloaded version we should generate the call for.
6151 if (2U == E->getNumArgs()) {
6152 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
6153 : "__write_pipe_2";
6154 // Creating a generic function type to be able to call with any builtin or
6155 // user defined type.
6156 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
6157 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6158 Value *ACast = Builder.CreateAddrSpaceCast(Arg1, I8PTy);
6159 return RValue::get(
6160 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6161 {Arg0, ACast, PacketSize, PacketAlign}));
6162 } else {
6163 assert(4 == E->getNumArgs() &&
6164 "Illegal number of parameters to pipe function");
6165 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
6166 : "__write_pipe_4";
6167
6168 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
6169 Int32Ty, Int32Ty};
6170 Value *Arg2 = EmitScalarExpr(E->getArg(2)),
6171 *Arg3 = EmitScalarExpr(E->getArg(3));
6172 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6173 Value *ACast = Builder.CreateAddrSpaceCast(Arg3, I8PTy);
6174 // We know the third argument is an integer type, but we may need to cast
6175 // it to i32.
6176 if (Arg2->getType() != Int32Ty)
6177 Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
6178 return RValue::get(
6179 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6180 {Arg0, Arg1, Arg2, ACast, PacketSize, PacketAlign}));
6181 }
6182 }
6183 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
6184 // functions
6185 case Builtin::BIreserve_read_pipe:
6186 case Builtin::BIreserve_write_pipe:
6187 case Builtin::BIwork_group_reserve_read_pipe:
6188 case Builtin::BIwork_group_reserve_write_pipe:
6189 case Builtin::BIsub_group_reserve_read_pipe:
6190 case Builtin::BIsub_group_reserve_write_pipe: {
6191 // Composing the mangled name for the function.
6192 const char *Name;
6193 if (BuiltinID == Builtin::BIreserve_read_pipe)
6194 Name = "__reserve_read_pipe";
6195 else if (BuiltinID == Builtin::BIreserve_write_pipe)
6196 Name = "__reserve_write_pipe";
6197 else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
6198 Name = "__work_group_reserve_read_pipe";
6199 else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
6200 Name = "__work_group_reserve_write_pipe";
6201 else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
6202 Name = "__sub_group_reserve_read_pipe";
6203 else
6204 Name = "__sub_group_reserve_write_pipe";
6205
6206 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
6207 *Arg1 = EmitScalarExpr(E->getArg(1));
6208 llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
6209 CGOpenCLRuntime OpenCLRT(CGM);
6210 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
6211 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
6212
6213 // Building the generic function prototype.
6214 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
6215 llvm::FunctionType *FTy =
6216 llvm::FunctionType::get(ReservedIDTy, ArgTys, false);
6217 // We know the second argument is an integer type, but we may need to cast
6218 // it to i32.
6219 if (Arg1->getType() != Int32Ty)
6220 Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
6221 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6222 {Arg0, Arg1, PacketSize, PacketAlign}));
6223 }
6224 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
6225 // functions
6226 case Builtin::BIcommit_read_pipe:
6227 case Builtin::BIcommit_write_pipe:
6228 case Builtin::BIwork_group_commit_read_pipe:
6229 case Builtin::BIwork_group_commit_write_pipe:
6230 case Builtin::BIsub_group_commit_read_pipe:
6231 case Builtin::BIsub_group_commit_write_pipe: {
6232 const char *Name;
6233 if (BuiltinID == Builtin::BIcommit_read_pipe)
6234 Name = "__commit_read_pipe";
6235 else if (BuiltinID == Builtin::BIcommit_write_pipe)
6236 Name = "__commit_write_pipe";
6237 else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
6238 Name = "__work_group_commit_read_pipe";
6239 else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
6240 Name = "__work_group_commit_write_pipe";
6241 else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
6242 Name = "__sub_group_commit_read_pipe";
6243 else
6244 Name = "__sub_group_commit_write_pipe";
6245
6246 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
6247 *Arg1 = EmitScalarExpr(E->getArg(1));
6248 CGOpenCLRuntime OpenCLRT(CGM);
6249 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
6250 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
6251
6252 // Building the generic function prototype.
6253 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
6254 llvm::FunctionType *FTy = llvm::FunctionType::get(
6255 llvm::Type::getVoidTy(getLLVMContext()), ArgTys, false);
6256
6257 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6258 {Arg0, Arg1, PacketSize, PacketAlign}));
6259 }
6260 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
6261 case Builtin::BIget_pipe_num_packets:
6262 case Builtin::BIget_pipe_max_packets: {
6263 const char *BaseName;
6264 const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
6265 if (BuiltinID == Builtin::BIget_pipe_num_packets)
6266 BaseName = "__get_pipe_num_packets";
6267 else
6268 BaseName = "__get_pipe_max_packets";
6269 std::string Name = std::string(BaseName) +
6270 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
6271
6272 // Building the generic function prototype.
6273 Value *Arg0 = EmitScalarExpr(E->getArg(0));
6274 CGOpenCLRuntime OpenCLRT(CGM);
6275 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
6276 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
6277 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
6278 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6279
6280 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6281 {Arg0, PacketSize, PacketAlign}));
6282 }
6283
6284 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
6285 case Builtin::BIto_global:
6286 case Builtin::BIto_local:
6287 case Builtin::BIto_private: {
6288 auto Arg0 = EmitScalarExpr(E->getArg(0));
6289 auto NewArgT = llvm::PointerType::get(
6291 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
6292 auto NewRetT = llvm::PointerType::get(
6294 CGM.getContext().getTargetAddressSpace(
6296 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
6297 llvm::Value *NewArg;
6298 if (Arg0->getType()->getPointerAddressSpace() !=
6299 NewArgT->getPointerAddressSpace())
6300 NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
6301 else
6302 NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
6303 auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
6304 auto NewCall =
6305 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
6306 return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
6307 ConvertType(E->getType())));
6308 }
6309
6310 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
6311 // Table 6.13.17.1 specifies four overload forms of enqueue_kernel.
6312 // The code below expands the builtin call to a call to one of the following
6313 // functions that an OpenCL runtime library will have to provide:
6314 // __enqueue_kernel_basic
6315 // __enqueue_kernel_varargs
6316 // __enqueue_kernel_basic_events
6317 // __enqueue_kernel_events_varargs
6318 case Builtin::BIenqueue_kernel: {
6319 StringRef Name; // Generated function call name
6320 unsigned NumArgs = E->getNumArgs();
6321
6322 llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
6323 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6324 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6325
6326 llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
6327 llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
6328 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
6329 llvm::Value *Range = NDRangeL.getAddress().emitRawPointer(*this);
6330
6331 // FIXME: Look through the addrspacecast which may exist to the stack
6332 // temporary as a hack.
6333 //
6334 // This is hardcoding the assumed ABI of the target function. This assumes
6335 // direct passing for every argument except NDRange, which is assumed to be
6336 // byval or byref indirect passed.
6337 //
6338 // This should be fixed to query a signature from CGOpenCLRuntime, and go
6339 // through EmitCallArgs to get the correct target ABI.
6340 Range = Range->stripPointerCasts();
6341
6342 llvm::Type *RangePtrTy = Range->getType();
6343
6344 if (NumArgs == 4) {
6345 // The most basic form of the call with parameters:
6346 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
6347 Name = "__enqueue_kernel_basic";
6348 llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangePtrTy, GenericVoidPtrTy,
6349 GenericVoidPtrTy};
6350 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6351
6352 auto Info =
6353 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
6354 llvm::Value *Kernel =
6355 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6356 llvm::Value *Block =
6357 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6358
6359 auto RTCall = EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6360 {Queue, Flags, Range, Kernel, Block});
6361 return RValue::get(RTCall);
6362 }
6363 assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
6364
6365 // Create a temporary array to hold the sizes of local pointer arguments
6366 // for the block. \p First is the position of the first size argument.
6367 auto CreateArrayForSizeVar =
6368 [=](unsigned First) -> std::pair<llvm::Value *, llvm::Value *> {
6369 llvm::APInt ArraySize(32, NumArgs - First);
6371 getContext().getSizeType(), ArraySize, nullptr,
6373 /*IndexTypeQuals=*/0);
6374 auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
6375 llvm::Value *TmpPtr = Tmp.getPointer();
6376 // The EmitLifetime* pair expect a naked Alloca as their last argument,
6377 // however for cases where the default AS is not the Alloca AS, Tmp is
6378 // actually the Alloca ascasted to the default AS, hence the
6379 // stripPointerCasts()
6380 llvm::Value *Alloca = TmpPtr->stripPointerCasts();
6381 llvm::Value *ElemPtr;
6382 EmitLifetimeStart(Alloca);
6383 // Each of the following arguments specifies the size of the corresponding
6384 // argument passed to the enqueued block.
6385 auto *Zero = llvm::ConstantInt::get(IntTy, 0);
6386 for (unsigned I = First; I < NumArgs; ++I) {
6387 auto *Index = llvm::ConstantInt::get(IntTy, I - First);
6388 auto *GEP =
6389 Builder.CreateGEP(Tmp.getElementType(), Alloca, {Zero, Index});
6390 if (I == First)
6391 ElemPtr = GEP;
6392 auto *V =
6393 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
6394 Builder.CreateAlignedStore(
6395 V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
6396 }
6397 // Return the Alloca itself rather than a potential ascast as this is only
6398 // used by the paired EmitLifetimeEnd.
6399 return {ElemPtr, Alloca};
6400 };
6401
6402 // Could have events and/or varargs.
6403 if (E->getArg(3)->getType()->isBlockPointerType()) {
6404 // No events passed, but has variadic arguments.
6405 Name = "__enqueue_kernel_varargs";
6406 auto Info =
6407 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
6408 llvm::Value *Kernel =
6409 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6410 auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6411 auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(4);
6412
6413 // Create a vector of the arguments, as well as a constant value to
6414 // express to the runtime the number of variadic arguments.
6415 llvm::Value *const Args[] = {Queue, Flags,
6416 Range, Kernel,
6417 Block, ConstantInt::get(IntTy, NumArgs - 4),
6418 ElemPtr};
6419 llvm::Type *const ArgTys[] = {
6420 QueueTy, IntTy, RangePtrTy, GenericVoidPtrTy,
6421 GenericVoidPtrTy, IntTy, ElemPtr->getType()};
6422
6423 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6424 auto Call = RValue::get(
6425 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6426 EmitLifetimeEnd(TmpPtr);
6427 return Call;
6428 }
6429 // Any calls now have event arguments passed.
6430 if (NumArgs >= 7) {
6431 llvm::PointerType *PtrTy = llvm::PointerType::get(
6432 CGM.getLLVMContext(),
6433 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
6434
6435 llvm::Value *NumEvents =
6436 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
6437
6438 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
6439 // to be a null pointer constant (including `0` literal), we can take it
6440 // into account and emit null pointer directly.
6441 llvm::Value *EventWaitList = nullptr;
6442 if (E->getArg(4)->isNullPointerConstant(
6444 EventWaitList = llvm::ConstantPointerNull::get(PtrTy);
6445 } else {
6446 EventWaitList =
6447 E->getArg(4)->getType()->isArrayType()
6449 : EmitScalarExpr(E->getArg(4));
6450 // Convert to generic address space.
6451 EventWaitList = Builder.CreatePointerCast(EventWaitList, PtrTy);
6452 }
6453 llvm::Value *EventRet = nullptr;
6454 if (E->getArg(5)->isNullPointerConstant(
6456 EventRet = llvm::ConstantPointerNull::get(PtrTy);
6457 } else {
6458 EventRet =
6459 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), PtrTy);
6460 }
6461
6462 auto Info =
6463 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
6464 llvm::Value *Kernel =
6465 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6466 llvm::Value *Block =
6467 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6468
6469 std::vector<llvm::Type *> ArgTys = {
6470 QueueTy, Int32Ty, RangePtrTy, Int32Ty,
6471 PtrTy, PtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
6472
6473 std::vector<llvm::Value *> Args = {Queue, Flags, Range,
6474 NumEvents, EventWaitList, EventRet,
6475 Kernel, Block};
6476
6477 if (NumArgs == 7) {
6478 // Has events but no variadics.
6479 Name = "__enqueue_kernel_basic_events";
6480 llvm::FunctionType *FTy =
6481 llvm::FunctionType::get(Int32Ty, ArgTys, false);
6482 return RValue::get(
6483 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6484 }
6485 // Has event info and variadics
6486 // Pass the number of variadics to the runtime function too.
6487 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
6488 ArgTys.push_back(Int32Ty);
6489 Name = "__enqueue_kernel_events_varargs";
6490
6491 auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(7);
6492 Args.push_back(ElemPtr);
6493 ArgTys.push_back(ElemPtr->getType());
6494
6495 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6496 auto Call = RValue::get(
6497 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6498 EmitLifetimeEnd(TmpPtr);
6499 return Call;
6500 }
6501 llvm_unreachable("Unexpected enqueue_kernel signature");
6502 }
6503 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
6504 // parameter.
6505 case Builtin::BIget_kernel_work_group_size: {
6506 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6507 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6508 auto Info =
6509 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
6510 Value *Kernel =
6511 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6512 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6514 CGM.CreateRuntimeFunction(
6515 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
6516 false),
6517 "__get_kernel_work_group_size_impl"),
6518 {Kernel, Arg}));
6519 }
6520 case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
6521 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6522 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6523 auto Info =
6524 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
6525 Value *Kernel =
6526 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6527 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6529 CGM.CreateRuntimeFunction(
6530 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
6531 false),
6532 "__get_kernel_preferred_work_group_size_multiple_impl"),
6533 {Kernel, Arg}));
6534 }
6535 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
6536 case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
6537 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6538 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6539 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
6540 llvm::Value *NDRange = NDRangeL.getAddress().emitRawPointer(*this);
6541 auto Info =
6542 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
6543 Value *Kernel =
6544 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6545 Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6546 const char *Name =
6547 BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
6548 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
6549 : "__get_kernel_sub_group_count_for_ndrange_impl";
6551 CGM.CreateRuntimeFunction(
6552 llvm::FunctionType::get(
6553 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
6554 false),
6555 Name),
6556 {NDRange, Kernel, Block}));
6557 }
6558 case Builtin::BI__builtin_store_half:
6559 case Builtin::BI__builtin_store_halff: {
6560 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
6561 Value *Val = EmitScalarExpr(E->getArg(0));
6563 Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
6564 Builder.CreateStore(HalfVal, Address);
6565 return RValue::get(nullptr);
6566 }
6567 case Builtin::BI__builtin_load_half: {
6569 Value *HalfVal = Builder.CreateLoad(Address);
6570 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
6571 }
6572 case Builtin::BI__builtin_load_halff: {
6574 Value *HalfVal = Builder.CreateLoad(Address);
6575 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
6576 }
6577 case Builtin::BI__builtin_printf:
6578 case Builtin::BIprintf:
6579 if (getTarget().getTriple().isNVPTX() ||
6580 getTarget().getTriple().isAMDGCN() ||
6581 (getTarget().getTriple().isSPIRV() &&
6582 getTarget().getTriple().getVendor() == Triple::VendorType::AMD)) {
6583 if (getTarget().getTriple().isNVPTX())
6585 if ((getTarget().getTriple().isAMDGCN() ||
6586 getTarget().getTriple().isSPIRV()) &&
6587 getLangOpts().HIP)
6589 }
6590
6591 break;
6592 case Builtin::BI__builtin_canonicalize:
6593 case Builtin::BI__builtin_canonicalizef:
6594 case Builtin::BI__builtin_canonicalizef16:
6595 case Builtin::BI__builtin_canonicalizel:
6596 return RValue::get(
6597 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::canonicalize));
6598
6599 case Builtin::BI__builtin_thread_pointer: {
6600 if (!getContext().getTargetInfo().isTLSSupported())
6601 CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
6602
6603 return RValue::get(Builder.CreateIntrinsic(llvm::Intrinsic::thread_pointer,
6604 {GlobalsInt8PtrTy}, {}));
6605 }
6606 case Builtin::BI__builtin_os_log_format:
6607 return emitBuiltinOSLogFormat(*E);
6608
6609 case Builtin::BI__xray_customevent: {
6611 return RValue::getIgnored();
6612
6613 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
6615 return RValue::getIgnored();
6616
6617 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
6618 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
6619 return RValue::getIgnored();
6620
6621 Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
6622 auto FTy = F->getFunctionType();
6623 auto Arg0 = E->getArg(0);
6624 auto Arg0Val = EmitScalarExpr(Arg0);
6625 auto Arg0Ty = Arg0->getType();
6626 auto PTy0 = FTy->getParamType(0);
6627 if (PTy0 != Arg0Val->getType()) {
6628 if (Arg0Ty->isArrayType())
6629 Arg0Val = EmitArrayToPointerDecay(Arg0).emitRawPointer(*this);
6630 else
6631 Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
6632 }
6633 auto Arg1 = EmitScalarExpr(E->getArg(1));
6634 auto PTy1 = FTy->getParamType(1);
6635 if (PTy1 != Arg1->getType())
6636 Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
6637 return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
6638 }
6639
6640 case Builtin::BI__xray_typedevent: {
6641 // TODO: There should be a way to always emit events even if the current
6642 // function is not instrumented. Losing events in a stream can cripple
6643 // a trace.
6645 return RValue::getIgnored();
6646
6647 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
6649 return RValue::getIgnored();
6650
6651 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
6652 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
6653 return RValue::getIgnored();
6654
6655 Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
6656 auto FTy = F->getFunctionType();
6657 auto Arg0 = EmitScalarExpr(E->getArg(0));
6658 auto PTy0 = FTy->getParamType(0);
6659 if (PTy0 != Arg0->getType())
6660 Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
6661 auto Arg1 = E->getArg(1);
6662 auto Arg1Val = EmitScalarExpr(Arg1);
6663 auto Arg1Ty = Arg1->getType();
6664 auto PTy1 = FTy->getParamType(1);
6665 if (PTy1 != Arg1Val->getType()) {
6666 if (Arg1Ty->isArrayType())
6667 Arg1Val = EmitArrayToPointerDecay(Arg1).emitRawPointer(*this);
6668 else
6669 Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
6670 }
6671 auto Arg2 = EmitScalarExpr(E->getArg(2));
6672 auto PTy2 = FTy->getParamType(2);
6673 if (PTy2 != Arg2->getType())
6674 Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
6675 return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
6676 }
6677
6678 case Builtin::BI__builtin_ms_va_start:
6679 case Builtin::BI__builtin_ms_va_end:
6680 return RValue::get(
6682 BuiltinID == Builtin::BI__builtin_ms_va_start));
6683
6684 case Builtin::BI__builtin_ms_va_copy: {
6685 // Lower this manually. We can't reliably determine whether or not any
6686 // given va_copy() is for a Win64 va_list from the calling convention
6687 // alone, because it's legal to do this from a System V ABI function.
6688 // With opaque pointer types, we won't have enough information in LLVM
6689 // IR to determine this from the argument types, either. Best to do it
6690 // now, while we have enough information.
6691 Address DestAddr = EmitMSVAListRef(E->getArg(0));
6692 Address SrcAddr = EmitMSVAListRef(E->getArg(1));
6693
6694 DestAddr = DestAddr.withElementType(Int8PtrTy);
6695 SrcAddr = SrcAddr.withElementType(Int8PtrTy);
6696
6697 Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
6698 return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
6699 }
6700
6701 case Builtin::BI__builtin_get_device_side_mangled_name: {
6702 auto Name = CGM.getCUDARuntime().getDeviceSideName(
6703 cast<DeclRefExpr>(E->getArg(0)->IgnoreImpCasts())->getDecl());
6704 auto Str = CGM.GetAddrOfConstantCString(Name, "");
6705 return RValue::get(Str.getPointer());
6706 }
6707 }
6708
6709 // If this is an alias for a lib function (e.g. __builtin_sin), emit
6710 // the call using the normal call path, but using the unmangled
6711 // version of the function name.
6712 const auto &BI = getContext().BuiltinInfo;
6713 if (!shouldEmitBuiltinAsIR(BuiltinID, BI, *this) &&
6714 BI.isLibFunction(BuiltinID))
6715 return emitLibraryCall(*this, FD, E,
6716 CGM.getBuiltinLibFunction(FD, BuiltinID));
6717
6718 // If this is a predefined lib function (e.g. malloc), emit the call
6719 // using exactly the normal call path.
6720 if (BI.isPredefinedLibFunction(BuiltinID))
6721 return emitLibraryCall(*this, FD, E, CGM.getRawFunctionPointer(FD));
6722
6723 // Check that a call to a target specific builtin has the correct target
6724 // features.
6725 // This is down here to avoid non-target specific builtins, however, if
6726 // generic builtins start to require generic target features then we
6727 // can move this up to the beginning of the function.
6728 checkTargetFeatures(E, FD);
6729
6730 if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
6731 LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
6732
6733 // See if we have a target specific intrinsic.
6734 std::string Name = getContext().BuiltinInfo.getName(BuiltinID);
6735 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
6736 StringRef Prefix =
6737 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
6738 if (!Prefix.empty()) {
6739 IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin(Prefix.data(), Name);
6740 if (IntrinsicID == Intrinsic::not_intrinsic && Prefix == "spv" &&
6741 getTarget().getTriple().getOS() == llvm::Triple::OSType::AMDHSA)
6742 IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin("amdgcn", Name);
6743 // NOTE we don't need to perform a compatibility flag check here since the
6744 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
6745 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
6746 if (IntrinsicID == Intrinsic::not_intrinsic)
6747 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
6748 }
6749
6750 if (IntrinsicID != Intrinsic::not_intrinsic) {
6752
6753 // Find out if any arguments are required to be integer constant
6754 // expressions.
6755 unsigned ICEArguments = 0;
6757 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
6758 assert(Error == ASTContext::GE_None && "Should not codegen an error");
6759
6760 Function *F = CGM.getIntrinsic(IntrinsicID);
6761 llvm::FunctionType *FTy = F->getFunctionType();
6762
6763 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
6764 Value *ArgValue = EmitScalarOrConstFoldImmArg(ICEArguments, i, E);
6765 // If the intrinsic arg type is different from the builtin arg type
6766 // we need to do a bit cast.
6767 llvm::Type *PTy = FTy->getParamType(i);
6768 if (PTy != ArgValue->getType()) {
6769 // XXX - vector of pointers?
6770 if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
6771 if (PtrTy->getAddressSpace() !=
6772 ArgValue->getType()->getPointerAddressSpace()) {
6773 ArgValue = Builder.CreateAddrSpaceCast(
6774 ArgValue, llvm::PointerType::get(getLLVMContext(),
6775 PtrTy->getAddressSpace()));
6776 }
6777 }
6778
6779 // Cast vector type (e.g., v256i32) to x86_amx, this only happen
6780 // in amx intrinsics.
6781 if (PTy->isX86_AMXTy())
6782 ArgValue = Builder.CreateIntrinsic(Intrinsic::x86_cast_vector_to_tile,
6783 {ArgValue->getType()}, {ArgValue});
6784 else
6785 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
6786 }
6787
6788 Args.push_back(ArgValue);
6789 }
6790
6791 Value *V = Builder.CreateCall(F, Args);
6792 QualType BuiltinRetType = E->getType();
6793
6794 llvm::Type *RetTy = VoidTy;
6795 if (!BuiltinRetType->isVoidType())
6796 RetTy = ConvertType(BuiltinRetType);
6797
6798 if (RetTy != V->getType()) {
6799 // XXX - vector of pointers?
6800 if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
6801 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
6802 V = Builder.CreateAddrSpaceCast(
6803 V, llvm::PointerType::get(getLLVMContext(),
6804 PtrTy->getAddressSpace()));
6805 }
6806 }
6807
6808 // Cast x86_amx to vector type (e.g., v256i32), this only happen
6809 // in amx intrinsics.
6810 if (V->getType()->isX86_AMXTy())
6811 V = Builder.CreateIntrinsic(Intrinsic::x86_cast_tile_to_vector, {RetTy},
6812 {V});
6813 else
6814 V = Builder.CreateBitCast(V, RetTy);
6815 }
6816
6817 if (RetTy->isVoidTy())
6818 return RValue::get(nullptr);
6819
6820 return RValue::get(V);
6821 }
6822
6823 // Some target-specific builtins can have aggregate return values, e.g.
6824 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
6825 // ReturnValue to be non-null, so that the target-specific emission code can
6826 // always just emit into it.
6828 if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) {
6829 Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp");
6830 ReturnValue = ReturnValueSlot(DestPtr, false);
6831 }
6832
6833 // Now see if we can emit a target-specific builtin.
6834 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
6835 switch (EvalKind) {
6836 case TEK_Scalar:
6837 if (V->getType()->isVoidTy())
6838 return RValue::get(nullptr);
6839 return RValue::get(V);
6840 case TEK_Aggregate:
6841 return RValue::getAggregate(ReturnValue.getAddress(),
6842 ReturnValue.isVolatile());
6843 case TEK_Complex:
6844 llvm_unreachable("No current target builtin returns complex");
6845 }
6846 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
6847 }
6848
6849 // EmitHLSLBuiltinExpr will check getLangOpts().HLSL
6850 if (Value *V = EmitHLSLBuiltinExpr(BuiltinID, E, ReturnValue)) {
6851 switch (EvalKind) {
6852 case TEK_Scalar:
6853 if (V->getType()->isVoidTy())
6854 return RValue::get(nullptr);
6855 return RValue::get(V);
6856 case TEK_Aggregate:
6857 return RValue::getAggregate(ReturnValue.getAddress(),
6858 ReturnValue.isVolatile());
6859 case TEK_Complex:
6860 llvm_unreachable("No current hlsl builtin returns complex");
6861 }
6862 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
6863 }
6864
6865 if (getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice)
6866 return EmitHipStdParUnsupportedBuiltin(this, FD);
6867
6868 ErrorUnsupported(E, "builtin function");
6869
6870 // Unknown builtin, for now just dump it out and return undef.
6871 return GetUndefRValue(E->getType());
6872}
6873
6874namespace {
6875struct BuiltinAlignArgs {
6876 llvm::Value *Src = nullptr;
6877 llvm::Type *SrcType = nullptr;
6878 llvm::Value *Alignment = nullptr;
6879 llvm::Value *Mask = nullptr;
6880 llvm::IntegerType *IntType = nullptr;
6881
6882 BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
6883 QualType AstType = E->getArg(0)->getType();
6884 if (AstType->isArrayType())
6885 Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(CGF);
6886 else
6887 Src = CGF.EmitScalarExpr(E->getArg(0));
6888 SrcType = Src->getType();
6889 if (SrcType->isPointerTy()) {
6890 IntType = IntegerType::get(
6891 CGF.getLLVMContext(),
6892 CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType));
6893 } else {
6894 assert(SrcType->isIntegerTy());
6895 IntType = cast<llvm::IntegerType>(SrcType);
6896 }
6897 Alignment = CGF.EmitScalarExpr(E->getArg(1));
6898 Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment");
6899 auto *One = llvm::ConstantInt::get(IntType, 1);
6900 Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
6901 }
6902};
6903} // namespace
6904
6905/// Generate (x & (y-1)) == 0.
6907 BuiltinAlignArgs Args(E, *this);
6908 llvm::Value *SrcAddress = Args.Src;
6909 if (Args.SrcType->isPointerTy())
6910 SrcAddress =
6911 Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr");
6912 return RValue::get(Builder.CreateICmpEQ(
6913 Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"),
6914 llvm::Constant::getNullValue(Args.IntType), "is_aligned"));
6915}
6916
6917/// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
6918/// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
6919/// llvm.ptrmask intrinsic (with a GEP before in the align_up case).
6921 BuiltinAlignArgs Args(E, *this);
6922 llvm::Value *SrcForMask = Args.Src;
6923 if (AlignUp) {
6924 // When aligning up we have to first add the mask to ensure we go over the
6925 // next alignment value and then align down to the next valid multiple.
6926 // By adding the mask, we ensure that align_up on an already aligned
6927 // value will not change the value.
6928 if (Args.Src->getType()->isPointerTy()) {
6929 if (getLangOpts().PointerOverflowDefined)
6930 SrcForMask =
6931 Builder.CreateGEP(Int8Ty, SrcForMask, Args.Mask, "over_boundary");
6932 else
6933 SrcForMask = EmitCheckedInBoundsGEP(Int8Ty, SrcForMask, Args.Mask,
6934 /*SignedIndices=*/true,
6935 /*isSubtraction=*/false,
6936 E->getExprLoc(), "over_boundary");
6937 } else {
6938 SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
6939 }
6940 }
6941 // Invert the mask to only clear the lower bits.
6942 llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
6943 llvm::Value *Result = nullptr;
6944 if (Args.Src->getType()->isPointerTy()) {
6945 Result = Builder.CreateIntrinsic(
6946 Intrinsic::ptrmask, {Args.SrcType, Args.IntType},
6947 {SrcForMask, InvertedMask}, nullptr, "aligned_result");
6948 } else {
6949 Result = Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
6950 }
6951 assert(Result->getType() == Args.SrcType);
6952 return RValue::get(Result);
6953}
#define V(N, I)
static char bitActionToX86BTCode(BitTest::ActionKind A)
static Value * EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering SuccessOrdering)
static void emitSincosBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static CanQualType getOSLogArgType(ASTContext &C, int Size)
Get the argument type for arguments to os_log_helper.
static Value * EmitOverflowCheckedAbs(CodeGenFunction &CGF, const CallExpr *E, bool SanitizeOverflow)
static llvm::Value * EmitBitCountExpr(CodeGenFunction &CGF, const Expr *E)
static Value * tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID, Value *V)
static bool areBOSTypesCompatible(int From, int To)
Checks if using the result of __builtin_object_size(p, From) in place of __builtin_object_size(p,...
static std::pair< llvm::Value *, llvm::Value * > GetCountFieldAndIndex(CodeGenFunction &CGF, const MemberExpr *ME, const FieldDecl *ArrayFD, const FieldDecl *CountFD, const Expr *Idx, llvm::IntegerType *ResType, bool IsSigned)
Value * EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, QualType T, llvm::Type *ResultType)
static bool TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty, llvm::SmallPtrSetImpl< const Decl * > &Seen)
Value * MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E, bool ReturnBool, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering)
Utility to insert an atomic cmpxchg instruction.
static Value * EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)
static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind, const CallExpr *E)
MSVC handles setjmp a bit differently on different platforms.
#define MUTATE_LDBL(func)
static Value * emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty)
Determine if the specified type requires laundering by checking if it is a dynamic class type or cont...
static Value * EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E)
static Value * EmitTargetArchBuiltinExpr(CodeGenFunction *CGF, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
Definition CGBuiltin.cpp:72
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, Instruction::BinaryOps Op, bool Invert=false)
Utility to insert an atomic instruction based Intrinsic::ID and the expression node,...
static bool HasNoIndirectArgumentsOrResults(CGFunctionInfo const &FnInfo)
Checks no arguments or results are passed indirectly in the ABI (i.e.
Value * EmitToInt(CodeGenFunction &CGF, llvm::Value *V, QualType T, llvm::IntegerType *IntType)
Emit the conversions required to turn the given value into an integer of the given size.
static llvm::Value * EmitBitTestIntrinsic(CodeGenFunction &CGF, unsigned BuiltinID, const CallExpr *E)
Emit a _bittest* intrinsic.
static Value * EmitSignBit(CodeGenFunction &CGF, Value *V)
Emit the computation of the sign bit for a floating point value.
static Value * EmitFAbs(CodeGenFunction &CGF, Value *V)
EmitFAbs - Emit a call to @llvm.fabs().
static llvm::Value * EmitPositiveResultOrZero(CodeGenFunction &CGF, llvm::Value *Res, llvm::Value *Index, llvm::IntegerType *ResType, bool IsSigned)
static bool shouldEmitBuiltinAsIR(unsigned BuiltinID, const Builtin::Context &BI, const CodeGenFunction &CGF)
Some builtins do not have library implementation on some targets and are instead emitted as LLVM IRs ...
Definition CGBuiltin.cpp:48
static bool isSpecialUnsignedMultiplySignedResult(unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, WidthAndSignedness ResultInfo)
static llvm::Value * getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType)
static Value * emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static RValue EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, const clang::Expr *Op2, WidthAndSignedness Op2Info, const clang::Expr *ResultArg, QualType ResultQTy, WidthAndSignedness ResultInfo)
Emit a checked mixed-sign multiply.
static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID)
static RValue EmitBinaryAtomic(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E)
static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size, Align AlignmentInBytes)
static Value * EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering SuccessOrdering=AtomicOrdering::SequentiallyConsistent)
This function should be invoked to emit atomic cmpxchg for Microsoft's _InterlockedCompareExchange* i...
static bool isSpecialMixedSignMultiply(unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, WidthAndSignedness ResultInfo)
Determine if a binop is a checked mixed-sign multiply we can specialize.
static Value * emitFrexpBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static llvm::Value * emitModfBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static Value * EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E)
static const FieldDecl * FindFlexibleArrayMemberField(CodeGenFunction &CGF, ASTContext &Ctx, const RecordDecl *RD)
Find a struct's flexible array member.
static Value * EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E)
static RValue EmitHipStdParUnsupportedBuiltin(CodeGenFunction *CGF, const FunctionDecl *FD)
static llvm::Value * EmitX86BitTestIntrinsic(CodeGenFunction &CGF, BitTest BT, const CallExpr *E, Value *BitBase, Value *BitPos)
static RValue EmitCheckedUnsignedMultiplySignedResult(CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, const clang::Expr *Op2, WidthAndSignedness Op2Info, const clang::Expr *ResultArg, QualType ResultQTy, WidthAndSignedness ResultInfo)
Address CheckAtomicAlignment(CodeGenFunction &CGF, const CallExpr *E)
static Value * EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E)
static llvm::AtomicOrdering getBitTestAtomicOrdering(BitTest::InterlockingKind I)
static bool GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FD, int64_t &Offset)
Calculate the offset of a struct field.
Value * MakeBinaryAtomicValue(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, AtomicOrdering Ordering)
Utility to insert an atomic instruction based on Intrinsic::ID and the expression node.
llvm::Value * EmitOverflowIntrinsic(CodeGenFunction &CGF, const Intrinsic::ID IntrinsicID, llvm::Value *X, llvm::Value *Y, llvm::Value *&Carry)
Emit a call to llvm.
static Value * EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW)
static Value * EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)
llvm::Value * emitBuiltinWithOneOverloadedType(clang::CodeGen::CodeGenFunction &CGF, const clang::CallExpr *E, unsigned IntrinsicID, llvm::StringRef Name="")
Definition CGBuiltin.h:63
static mlir::Value emitBinaryExpMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr *e, llvm::StringRef intrinsicName, llvm::StringRef constrainedIntrinsicName)
static mlir::Value emitBinaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitUnaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static bool shouldEmitBuiltinAsIR(unsigned builtinID, const Builtin::Context &bi, const CIRGenFunction &cgf)
static RValue emitLibraryCall(CIRGenFunction &cgf, const FunctionDecl *fd, const CallExpr *e, mlir::Operation *calleeValue)
static WidthAndSignedness getIntegerWidthAndSignedness(const clang::ASTContext &astContext, const clang::QualType type)
static struct WidthAndSignedness EncompassingIntegerType(ArrayRef< struct WidthAndSignedness > types)
TokenType getType() const
Returns the token's type, e.g.
FormatToken * Next
The next token in the unwrapped line.
Result
Implement __builtin_bit_cast and related operations.
#define X(type, name)
Definition Value.h:97
static unsigned getCharWidth(tok::TokenKind kind, const TargetInfo &Target)
llvm::MachO::Record Record
Definition MachO.h:31
static StringRef getTriple(const Command &Job)
SanitizerHandler
static QualType getPointeeType(const MemRegion *R)
__DEVICE__ float modf(float __x, float *__iptr)
__DEVICE__ double nan(const char *)
APSInt & getInt()
Definition APValue.h:508
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:227
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType VoidPtrTy
IdentifierTable & Idents
Definition ASTContext.h:805
Builtin::Context & BuiltinInfo
Definition ASTContext.h:807
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, const Expr *SizeExpr, ArraySizeModifier ASM, unsigned IndexTypeQuals) const
Return the unique reference to the type for a constant array of the specified element type.
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CanQualType VoidTy
QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error, unsigned *IntegerConstantArgs=nullptr) const
Return the type for the specified builtin.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:924
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
@ GE_None
No error.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
QualType getElementType() const
Definition TypeBase.h:3789
static std::unique_ptr< AtomicScopeModel > create(AtomicScopeModelKind K)
Create an atomic scope model by AtomicScopeModelKind.
Definition SyncScope.h:298
static bool isCommaOp(Opcode Opc)
Definition Expr.h:4144
Expr * getRHS() const
Definition Expr.h:4093
Holds information about both target-independent and target-specific builtins, allowing easy queries b...
Definition Builtins.h:236
bool shouldGenerateFPMathIntrinsic(unsigned BuiltinID, llvm::Triple Trip, std::optional< bool > ErrnoOverwritten, bool MathErrnoEnabled, bool HasOptNoneAttr, bool IsOptimizationEnabled) const
Determine whether we can generate LLVM intrinsics for the given builtin ID, based on whether it has s...
Definition Builtins.cpp:228
bool isConstWithoutErrnoAndExceptions(unsigned ID) const
Return true if this function has no side effects and doesn't read memory, except for possibly errno o...
Definition Builtins.h:413
std::string getName(unsigned ID) const
Return the identifier name for the specified builtin, e.g.
Definition Builtins.cpp:80
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3150
bool hasStoredFPFeatures() const
Definition Expr.h:3105
SourceLocation getBeginLoc() const
Definition Expr.h:3280
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3129
Expr * getCallee()
Definition Expr.h:3093
FPOptionsOverride getFPFeatures() const
Definition Expr.h:3245
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3137
arg_range arguments()
Definition Expr.h:3198
CastKind getCastKind() const
Definition Expr.h:3723
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
Address withAlignment(CharUnits NewAlignment) const
Return address with different alignment, but same pointer and element type.
Definition Address.h:269
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition CGBuilder.h:146
llvm::StoreInst * CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr, CharUnits Align, bool IsVolatile=false)
Definition CGBuilder.h:153
llvm::AtomicRMWInst * CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Ordering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition CGBuilder.h:190
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition CGBuilder.h:430
llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(Address Addr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition CGBuilder.h:179
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:118
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition CGBuilder.h:138
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition CGBuilder.h:356
All available information about a concrete callee.
Definition CGCall.h:63
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition CGCall.h:137
llvm::DILocation * CreateTrapFailureMessageFor(llvm::DebugLoc TrapLocation, StringRef Category, StringRef FailureMsg)
Create a debug location from TrapLocation that adds an artificial inline frame where the frame name i...
CGFunctionInfo - Class to encapsulate the information about a function definition.
MutableArrayRef< ArgInfo > arguments()
llvm::Value * getPipeElemAlign(const Expr *PipeArg)
llvm::Value * getPipeElemSize(const Expr *PipeArg)
llvm::StructType * getLLVMType() const
Return the "complete object" LLVM type associated with this record.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition CGCall.h:274
void add(RValue rvalue, QualType type)
Definition CGCall.h:302
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E)
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Definition CGClass.cpp:2812
RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E)
RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID)
llvm::Value * performAddrSpaceCast(llvm::Value *Src, llvm::Type *DestTy)
llvm::Value * EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, const CallExpr *E)
SanitizerSet SanOpts
Sanitizers enabled for this function.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
llvm::Value * GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Definition CGExpr.cpp:1199
llvm::Type * ConvertType(QualType T)
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
BuiltinCheckKind
Specifies which type of sanitizer check to apply when handling a particular builtin.
llvm::Value * EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition SystemZ.cpp:39
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
Definition CGCall.cpp:5271
llvm::Value * EmitSEHAbnormalTermination()
RValue emitStdcFirstBit(const CallExpr *E, llvm::Intrinsic::ID IntID, bool InvertArg)
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
Produce the code to do a retain.
Definition CGObjC.cpp:2360
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
llvm::Value * EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart)
Emits a call to an LLVM variable-argument intrinsic, either llvm.va_start or llvm....
RValue emitStdcBitWidthMinus(const CallExpr *E, llvm::Intrinsic::ID IntID, bool IsPop)
llvm::Value * EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition AMDGPU.cpp:521
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:4035
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:7210
RValue emitBuiltinOSLogFormat(const CallExpr &E)
Emit IR for __builtin_os_log_format.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
llvm::Function * generateBuiltinOSLogHelperFunction(const analyze_os_log::OSLogBufferLayout &Layout, CharUnits BufferAlignment)
const LangOptions & getLangOpts() const
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
llvm::Value * EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
Definition RISCV.cpp:1073
llvm::Value * EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind)
Emits an argument for a call to a builtin.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3925
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
Definition CGCall.cpp:4767
const TargetInfo & getTarget() const
RValue emitRotate(const CallExpr *E, bool IsRotateRight)
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location, const AnnotateAttr *Attr)
Emit an annotation call (intrinsic).
llvm::Value * EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
Definition ARM.cpp:2164
CGCallee EmitCallee(const Expr *E)
Definition CGExpr.cpp:6571
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression, potentially with an active ...
llvm::Value * EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition ARM.cpp:7162
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:238
llvm::Value * EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition PPC.cpp:205
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:4183
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
Definition CGDecl.cpp:1357
llvm::MDNode * buildAllocToken(QualType AllocType)
Build metadata used by the AllocToken instrumentation.
Definition CGExpr.cpp:1322
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2243
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition CGCall.cpp:5427
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp)
Emit IR for __builtin_align_up/__builtin_align_down.
void EmitLifetimeEnd(llvm::Value *Addr)
Definition CGDecl.cpp:1369
llvm::Value * EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
llvm::Value * EmitDirectXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition DirectX.cpp:22
llvm::Value * EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm::Triple::ArchType Arch)
Definition ARM.cpp:4495
llvm::Value * EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4615
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
Definition CGDecl.cpp:2352
llvm::Value * EmitSPIRVBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition SPIR.cpp:22
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
Address EmitVAListRef(const Expr *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
Definition CGExpr.cpp:1614
RValue EmitBuiltinIsAligned(const CallExpr *E)
Emit IR for __builtin_is_aligned.
RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, const CallExpr *TheCallExpr, bool IsDelete)
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition Hexagon.cpp:77
CodeGenTypes & getTypes() const
RValue emitStdcCountIntrinsic(const CallExpr *E, llvm::Intrinsic::ID IntID, bool InvertArg, bool IsPop=false)
llvm::Value * EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition X86.cpp:793
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1597
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:194
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
Definition CGExpr.cpp:4600
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4522
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2277
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
Definition CGExpr.cpp:1253
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
llvm::Value * EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition NVPTX.cpp:428
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
Definition CGExpr.cpp:4510
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1713
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
EmitTargetBuiltinExpr - Emit the given builtin call.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
llvm::Value * EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Given a number of pointers, inform the optimizer that they're being intrinsically used up until this ...
Definition CGObjC.cpp:2199
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:643
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
llvm::Constant * getBuiltinLibFunction(const FunctionDecl *FD, unsigned BuiltinID)
Given a builtin id for a function like "__builtin_fabsf", return a Function* for "fabsf".
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
const llvm::Triple & getTriple() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition CGCall.cpp:1873
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
LValue - This represents an lvalue references.
Definition CGValue.h:183
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition CGValue.h:373
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
static RValue getIgnored()
Definition CGValue.h:94
static RValue get(llvm::Value *V)
Definition CGValue.h:99
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition CGValue.h:126
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition CGValue.h:109
An abstract representation of an aligned address.
Definition Address.h:42
static RawAddress invalid()
Definition Address.h:61
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition CGCall.h:381
virtual bool supportsLibCall() const
supportsLibCall - Query to whether or not target supports all lib calls.
Definition TargetInfo.h:79
virtual llvm::Value * encodeReturnAddress(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Performs the code-generation required to convert the address of an instruction into a return address ...
Definition TargetInfo.h:177
virtual llvm::Value * decodeReturnAddress(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Performs the code-generation required to convert a return address as stored by the system into the ac...
Definition TargetInfo.h:167
virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const
Determines the DWARF register number for the stack pointer, for exception-handling purposes.
Definition TargetInfo.h:149
virtual llvm::Value * testFPKind(llvm::Value *V, unsigned BuiltinID, CGBuilderTy &Builder, CodeGenModule &CGM) const
Performs a target specific test of a floating point value for things like IsNaN, Infinity,...
Definition TargetInfo.h:186
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3330
Represents a concrete matrix type with constant number of rows and columns.
Definition TypeBase.h:4442
Represents a sugar type with __counted_by or __sized_by annotations, including their _or_null variant...
Definition TypeBase.h:3491
DynamicCountPointerKind getKind() const
Definition TypeBase.h:3521
static bool isFlexibleArrayMemberLike(const ASTContext &Context, const Decl *D, QualType Ty, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, bool IgnoreTemplateOrMacroSubstitution)
Whether it resembles a flexible array member.
Definition DeclBase.cpp:460
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition DeclBase.h:601
FunctionDecl * getAsFunction() LLVM_READONLY
Returns the function itself, or the templated function if this is a function template.
Definition DeclBase.cpp:273
bool hasAttr() const
Definition DeclBase.h:585
Concrete class used by the front-end to report problems and issues.
Definition Diagnostic.h:233
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3122
Expr * IgnoreParenCasts() LLVM_READONLY
Skip past any parentheses and casts which might surround this expression until reaching a fixed point...
Definition Expr.cpp:3100
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3095
bool EvaluateAsFloat(llvm::APFloat &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsFloat - Return true if this is a constant which we can fold and convert to a floating point...
bool isPRValue() const
Definition Expr.h:285
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
Definition Expr.h:838
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3693
std::optional< std::string > tryEvaluateString(ASTContext &Ctx) const
If the current Expr can be evaluated to a pointer to a null-terminated constant string,...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3075
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Definition Expr.cpp:4073
std::optional< uint64_t > tryEvaluateObjectSize(const ASTContext &Ctx, unsigned Type) const
If the current Expr is a pointer, this will try to statically determine the number of bytes available...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:282
QualType getType() const
Definition Expr.h:144
const ValueDecl * getAsBuiltinConstantDeclRef(const ASTContext &Context) const
If this expression is an unambiguous reference to a single declaration, in the style of __builtin_fun...
Definition Expr.cpp:231
Represents difference between two FPOptions values.
LangOptions::FPExceptionModeKind getExceptionMode() const
Represents a member of a struct/union/class.
Definition Decl.h:3178
const FieldDecl * findCountedByField() const
Find the FieldDecl specified in a FAM's "counted_by" attribute.
Definition Decl.cpp:4850
Represents a function declaration or definition.
Definition Decl.h:2018
const ParmVarDecl * getParamDecl(unsigned i) const
Definition Decl.h:2815
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition Decl.cpp:3757
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5362
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
IdentifierInfo & get(StringRef Name)
Return the identifier token info for the specified named identifier.
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, const IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition Decl.cpp:5597
@ FPE_Ignore
Assume that floating-point exceptions are masked.
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3367
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3450
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
std::string getNameAsString() const
Get a human-readable name for the declaration, even if it is one of the special kinds of names (C++ c...
Definition Decl.h:317
const Expr * getSubExpr() const
Definition Expr.h:2202
PipeType - OpenCL20.
Definition TypeBase.h:8254
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3383
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8520
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8562
Represents a struct/union/class.
Definition Decl.h:4343
field_range fields() const
Definition Decl.h:4546
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
Encodes a location in the source.
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:355
bool isUnion() const
Definition Decl.h:3946
Exposes information about the current target.
Definition TargetInfo.h:227
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool isBigEndian() const
virtual bool checkArithmeticFenceSupported() const
Controls if __arithmetic_fence is supported in the targeted backend.
unsigned getSuitableAlign() const
Return the alignment that is the largest alignment ever used for any scalar/SIMD data type on the tar...
Definition TargetInfo.h:748
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
The base class of the type hierarchy.
Definition TypeBase.h:1871
bool isBlockPointerType() const
Definition TypeBase.h:8693
bool isVoidType() const
Definition TypeBase.h:9039
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2266
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isArrayType() const
Definition TypeBase.h:8772
bool isCountAttributedType() const
Definition Type.cpp:778
bool isPointerType() const
Definition TypeBase.h:8673
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:9083
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9333
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1958
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:789
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9266
Expr * getSubExpr() const
Definition Expr.h:2288
QualType getType() const
Definition Decl.h:723
QualType getType() const
Definition Value.cpp:237
Represents a GCC generic vector type.
Definition TypeBase.h:4230
QualType getElementType() const
Definition TypeBase.h:4244
SmallVector< OSLogBufferItem, 4 > Items
Definition OSLog.h:113
unsigned char getNumArgsByte() const
Definition OSLog.h:148
unsigned char getSummaryByte() const
Definition OSLog.h:139
Defines the clang::TargetInfo interface.
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
Definition CGValue.h:155
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:146
llvm::Constant * initializationPatternFor(CodeGenModule &, llvm::Type *)
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
constexpr XRayInstrMask Typed
Definition XRayInstr.h:42
constexpr XRayInstrMask Custom
Definition XRayInstr.h:41
bool computeOSLogBufferLayout(clang::ASTContext &Ctx, const clang::CallExpr *E, OSLogBufferLayout &layout)
Definition OSLog.cpp:192
bool Mul(InterpState &S, CodePtr OpPC)
Definition Interp.h:464
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ Success
Annotation was successful.
Definition Parser.h:65
@ Vector
'vector' clause, allowed on 'loop', Combined, and 'routine' directives.
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Expr * Cond
};
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
SyncScope
Defines sync scope values used internally by clang.
Definition SyncScope.h:42
llvm::StringRef getAsString(SyncScope S)
Definition SyncScope.h:62
U cast(CodeGen::Address addr)
Definition Address.h:327
@ Other
Other implicit parameter.
Definition Decl.h:1763
long int64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650
void clear(SanitizerMask K=SanitizerKind::All)
Disable the sanitizers specified in K.
Definition Sanitizers.h:195
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition Sanitizers.h:187
#define sinh(__x)
Definition tgmath.h:373
#define asin(__x)
Definition tgmath.h:112
#define scalbln(__x, __y)
Definition tgmath.h:1182
#define sqrt(__x)
Definition tgmath.h:520
#define acos(__x)
Definition tgmath.h:83
#define fmin(__x, __y)
Definition tgmath.h:780
#define exp(__x)
Definition tgmath.h:431
#define ilogb(__x)
Definition tgmath.h:851
#define copysign(__x, __y)
Definition tgmath.h:618
#define erf(__x)
Definition tgmath.h:636
#define atanh(__x)
Definition tgmath.h:228
#define remquo(__x, __y, __z)
Definition tgmath.h:1111
#define nextafter(__x, __y)
Definition tgmath.h:1055
#define frexp(__x, __y)
Definition tgmath.h:816
#define asinh(__x)
Definition tgmath.h:199
#define erfc(__x)
Definition tgmath.h:653
#define atan2(__x, __y)
Definition tgmath.h:566
#define nexttoward(__x, __y)
Definition tgmath.h:1073
#define hypot(__x, __y)
Definition tgmath.h:833
#define exp2(__x)
Definition tgmath.h:670
#define sin(__x)
Definition tgmath.h:286
#define cbrt(__x)
Definition tgmath.h:584
#define log2(__x)
Definition tgmath.h:970
#define llround(__x)
Definition tgmath.h:919
#define cosh(__x)
Definition tgmath.h:344
#define trunc(__x)
Definition tgmath.h:1216
#define fmax(__x, __y)
Definition tgmath.h:762
#define ldexp(__x, __y)
Definition tgmath.h:868
#define acosh(__x)
Definition tgmath.h:170
#define tgamma(__x)
Definition tgmath.h:1199
#define scalbn(__x, __y)
Definition tgmath.h:1165
#define round(__x)
Definition tgmath.h:1148
#define fmod(__x, __y)
Definition tgmath.h:798
#define llrint(__x)
Definition tgmath.h:902
#define tan(__x)
Definition tgmath.h:315
#define cos(__x)
Definition tgmath.h:257
#define log10(__x)
Definition tgmath.h:936
#define fabs(__x)
Definition tgmath.h:549
#define pow(__x, __y)
Definition tgmath.h:490
#define log1p(__x)
Definition tgmath.h:953
#define rint(__x)
Definition tgmath.h:1131
#define expm1(__x)
Definition tgmath.h:687
#define remainder(__x, __y)
Definition tgmath.h:1090
#define fdim(__x, __y)
Definition tgmath.h:704
#define lgamma(__x)
Definition tgmath.h:885
#define tanh(__x)
Definition tgmath.h:402
#define lrint(__x)
Definition tgmath.h:1004
#define atan(__x)
Definition tgmath.h:141
#define floor(__x)
Definition tgmath.h:722
#define ceil(__x)
Definition tgmath.h:601
#define log(__x)
Definition tgmath.h:460
#define logb(__x)
Definition tgmath.h:987
#define nearbyint(__x)
Definition tgmath.h:1038
#define lround(__x)
Definition tgmath.h:1021
#define fma(__x, __y, __z)
Definition tgmath.h:742