clang 22.0.0git
CGBuiltin.cpp
Go to the documentation of this file.
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGBuiltin.h"
14#include "ABIInfo.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGDebugInfo.h"
18#include "CGObjCRuntime.h"
19#include "CGOpenCLRuntime.h"
20#include "CGRecordLayout.h"
21#include "CGValue.h"
22#include "CodeGenFunction.h"
23#include "CodeGenModule.h"
24#include "ConstantEmitter.h"
25#include "PatternInit.h"
26#include "TargetInfo.h"
27#include "clang/AST/OSLog.h"
31#include "llvm/IR/InlineAsm.h"
32#include "llvm/IR/Instruction.h"
33#include "llvm/IR/Intrinsics.h"
34#include "llvm/IR/IntrinsicsX86.h"
35#include "llvm/IR/MatrixBuilder.h"
36#include "llvm/Support/ConvertUTF.h"
37#include "llvm/Support/ScopedPrinter.h"
38#include <optional>
39#include <utility>
40
41using namespace clang;
42using namespace CodeGen;
43using namespace llvm;
44
45/// Some builtins do not have library implementation on some targets and
46/// are instead emitted as LLVM IRs by some target builtin emitters.
47/// FIXME: Remove this when library support is added
48static bool shouldEmitBuiltinAsIR(unsigned BuiltinID,
49 const Builtin::Context &BI,
50 const CodeGenFunction &CGF) {
51 if (!CGF.CGM.getLangOpts().MathErrno &&
55 switch (BuiltinID) {
56 default:
57 return false;
58 case Builtin::BIlogbf:
59 case Builtin::BI__builtin_logbf:
60 case Builtin::BIlogb:
61 case Builtin::BI__builtin_logb:
62 case Builtin::BIscalbnf:
63 case Builtin::BI__builtin_scalbnf:
64 case Builtin::BIscalbn:
65 case Builtin::BI__builtin_scalbn:
66 return true;
67 }
68 }
69 return false;
70}
71
73 unsigned BuiltinID, const CallExpr *E,
74 ReturnValueSlot ReturnValue,
75 llvm::Triple::ArchType Arch) {
76 // When compiling in HipStdPar mode we have to be conservative in rejecting
77 // target specific features in the FE, and defer the possible error to the
78 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
79 // referenced by an accelerator executable function, we emit an error.
80 // Returning nullptr here leads to the builtin being handled in
81 // EmitStdParUnsupportedBuiltin.
82 if (CGF->getLangOpts().HIPStdPar && CGF->getLangOpts().CUDAIsDevice &&
83 Arch != CGF->getTarget().getTriple().getArch())
84 return nullptr;
85
86 switch (Arch) {
87 case llvm::Triple::arm:
88 case llvm::Triple::armeb:
89 case llvm::Triple::thumb:
90 case llvm::Triple::thumbeb:
91 return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch);
92 case llvm::Triple::aarch64:
93 case llvm::Triple::aarch64_32:
94 case llvm::Triple::aarch64_be:
95 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
96 case llvm::Triple::bpfeb:
97 case llvm::Triple::bpfel:
98 return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
99 case llvm::Triple::dxil:
100 return CGF->EmitDirectXBuiltinExpr(BuiltinID, E);
101 case llvm::Triple::x86:
102 case llvm::Triple::x86_64:
103 return CGF->EmitX86BuiltinExpr(BuiltinID, E);
104 case llvm::Triple::ppc:
105 case llvm::Triple::ppcle:
106 case llvm::Triple::ppc64:
107 case llvm::Triple::ppc64le:
108 return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
109 case llvm::Triple::r600:
110 case llvm::Triple::amdgcn:
111 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
112 case llvm::Triple::systemz:
113 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
114 case llvm::Triple::nvptx:
115 case llvm::Triple::nvptx64:
116 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
117 case llvm::Triple::wasm32:
118 case llvm::Triple::wasm64:
119 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
120 case llvm::Triple::hexagon:
121 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
122 case llvm::Triple::riscv32:
123 case llvm::Triple::riscv64:
124 return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue);
125 case llvm::Triple::spirv32:
126 case llvm::Triple::spirv64:
127 if (CGF->getTarget().getTriple().getOS() == llvm::Triple::OSType::AMDHSA)
128 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
129 [[fallthrough]];
130 case llvm::Triple::spirv:
131 return CGF->EmitSPIRVBuiltinExpr(BuiltinID, E);
132 default:
133 return nullptr;
134 }
135}
136
138 const CallExpr *E,
140 if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
141 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
143 this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
144 ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
145 }
146
147 return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue,
148 getTarget().getTriple().getArch());
149}
150
151static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
152 Align AlignmentInBytes) {
153 ConstantInt *Byte;
154 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
156 // Nothing to initialize.
157 return;
159 Byte = CGF.Builder.getInt8(0x00);
160 break;
162 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
163 Byte = llvm::dyn_cast<llvm::ConstantInt>(
164 initializationPatternFor(CGF.CGM, Int8));
165 break;
166 }
167 }
168 if (CGF.CGM.stopAutoInit())
169 return;
170 auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
171 I->addAnnotationMetadata("auto-init");
172}
173
174/// getBuiltinLibFunction - Given a builtin id for a function like
175/// "__builtin_fabsf", return a Function* for "fabsf".
177 unsigned BuiltinID) {
178 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
179
180 // Get the name, skip over the __builtin_ prefix (if necessary). We may have
181 // to build this up so provide a small stack buffer to handle the vast
182 // majority of names.
184 GlobalDecl D(FD);
185
186 // TODO: This list should be expanded or refactored after all GCC-compatible
187 // std libcall builtins are implemented.
188 static SmallDenseMap<unsigned, StringRef, 64> F128Builtins{
189 {Builtin::BI__builtin___fprintf_chk, "__fprintf_chkieee128"},
190 {Builtin::BI__builtin___printf_chk, "__printf_chkieee128"},
191 {Builtin::BI__builtin___snprintf_chk, "__snprintf_chkieee128"},
192 {Builtin::BI__builtin___sprintf_chk, "__sprintf_chkieee128"},
193 {Builtin::BI__builtin___vfprintf_chk, "__vfprintf_chkieee128"},
194 {Builtin::BI__builtin___vprintf_chk, "__vprintf_chkieee128"},
195 {Builtin::BI__builtin___vsnprintf_chk, "__vsnprintf_chkieee128"},
196 {Builtin::BI__builtin___vsprintf_chk, "__vsprintf_chkieee128"},
197 {Builtin::BI__builtin_fprintf, "__fprintfieee128"},
198 {Builtin::BI__builtin_printf, "__printfieee128"},
199 {Builtin::BI__builtin_snprintf, "__snprintfieee128"},
200 {Builtin::BI__builtin_sprintf, "__sprintfieee128"},
201 {Builtin::BI__builtin_vfprintf, "__vfprintfieee128"},
202 {Builtin::BI__builtin_vprintf, "__vprintfieee128"},
203 {Builtin::BI__builtin_vsnprintf, "__vsnprintfieee128"},
204 {Builtin::BI__builtin_vsprintf, "__vsprintfieee128"},
205 {Builtin::BI__builtin_fscanf, "__fscanfieee128"},
206 {Builtin::BI__builtin_scanf, "__scanfieee128"},
207 {Builtin::BI__builtin_sscanf, "__sscanfieee128"},
208 {Builtin::BI__builtin_vfscanf, "__vfscanfieee128"},
209 {Builtin::BI__builtin_vscanf, "__vscanfieee128"},
210 {Builtin::BI__builtin_vsscanf, "__vsscanfieee128"},
211 {Builtin::BI__builtin_nexttowardf128, "__nexttowardieee128"},
212 };
213
214 // The AIX library functions frexpl, ldexpl, and modfl are for 128-bit
215 // IBM 'long double' (i.e. __ibm128). Map to the 'double' versions
216 // if it is 64-bit 'long double' mode.
217 static SmallDenseMap<unsigned, StringRef, 4> AIXLongDouble64Builtins{
218 {Builtin::BI__builtin_frexpl, "frexp"},
219 {Builtin::BI__builtin_ldexpl, "ldexp"},
220 {Builtin::BI__builtin_modfl, "modf"},
221 };
222
223 // If the builtin has been declared explicitly with an assembler label,
224 // use the mangled name. This differs from the plain label on platforms
225 // that prefix labels.
226 if (FD->hasAttr<AsmLabelAttr>())
227 Name = getMangledName(D);
228 else {
229 // TODO: This mutation should also be applied to other targets other than
230 // PPC, after backend supports IEEE 128-bit style libcalls.
231 if (getTriple().isPPC64() &&
232 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad() &&
233 F128Builtins.contains(BuiltinID))
234 Name = F128Builtins[BuiltinID];
235 else if (getTriple().isOSAIX() &&
236 &getTarget().getLongDoubleFormat() ==
237 &llvm::APFloat::IEEEdouble() &&
238 AIXLongDouble64Builtins.contains(BuiltinID))
239 Name = AIXLongDouble64Builtins[BuiltinID];
240 else
241 Name = Context.BuiltinInfo.getName(BuiltinID).substr(10);
242 }
243
244 llvm::FunctionType *Ty =
245 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
246
247 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
248}
249
250/// Emit the conversions required to turn the given value into an
251/// integer of the given size.
252Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
253 QualType T, llvm::IntegerType *IntType) {
254 V = CGF.EmitToMemory(V, T);
255
256 if (V->getType()->isPointerTy())
257 return CGF.Builder.CreatePtrToInt(V, IntType);
258
259 assert(V->getType() == IntType);
260 return V;
261}
262
263Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
264 QualType T, llvm::Type *ResultType) {
265 V = CGF.EmitFromMemory(V, T);
266
267 if (ResultType->isPointerTy())
268 return CGF.Builder.CreateIntToPtr(V, ResultType);
269
270 assert(V->getType() == ResultType);
271 return V;
272}
273
275 ASTContext &Ctx = CGF.getContext();
276 Address Ptr = CGF.EmitPointerWithAlignment(E->getArg(0));
277 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
278 unsigned Bytes = Ptr.getElementType()->isPointerTy()
280 : DL.getTypeStoreSize(Ptr.getElementType());
281 unsigned Align = Ptr.getAlignment().getQuantity();
282 if (Align % Bytes != 0) {
283 DiagnosticsEngine &Diags = CGF.CGM.getDiags();
284 Diags.Report(E->getBeginLoc(), diag::warn_sync_op_misaligned);
285 // Force address to be at least naturally-aligned.
286 return Ptr.withAlignment(CharUnits::fromQuantity(Bytes));
287 }
288 return Ptr;
289}
290
291/// Utility to insert an atomic instruction based on Intrinsic::ID
292/// and the expression node.
294 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
295 AtomicOrdering Ordering) {
296
297 QualType T = E->getType();
298 assert(E->getArg(0)->getType()->isPointerType());
300 E->getArg(0)->getType()->getPointeeType()));
301 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
302
303 Address DestAddr = CheckAtomicAlignment(CGF, E);
304
305 llvm::IntegerType *IntType = llvm::IntegerType::get(
306 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
307
308 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1));
309 llvm::Type *ValueType = Val->getType();
310 Val = EmitToInt(CGF, Val, T, IntType);
311
312 llvm::Value *Result =
313 CGF.Builder.CreateAtomicRMW(Kind, DestAddr, Val, Ordering);
314 return EmitFromInt(CGF, Result, T, ValueType);
315}
316
318 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
320
321 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
322 LValue LV = CGF.MakeAddrLValue(Addr, E->getArg(0)->getType());
323 LV.setNontemporal(true);
324 CGF.EmitStoreOfScalar(Val, LV, false);
325 return nullptr;
326}
327
330
331 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
332 LV.setNontemporal(true);
333 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
334}
335
337 llvm::AtomicRMWInst::BinOp Kind,
338 const CallExpr *E) {
339 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
340}
341
342/// Utility to insert an atomic instruction based Intrinsic::ID and
343/// the expression node, where the return value is the result of the
344/// operation.
346 llvm::AtomicRMWInst::BinOp Kind,
347 const CallExpr *E,
348 Instruction::BinaryOps Op,
349 bool Invert = false) {
350 QualType T = E->getType();
351 assert(E->getArg(0)->getType()->isPointerType());
353 E->getArg(0)->getType()->getPointeeType()));
354 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
355
356 Address DestAddr = CheckAtomicAlignment(CGF, E);
357
358 llvm::IntegerType *IntType = llvm::IntegerType::get(
359 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
360
361 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1));
362 llvm::Type *ValueType = Val->getType();
363 Val = EmitToInt(CGF, Val, T, IntType);
364
365 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
366 Kind, DestAddr, Val, llvm::AtomicOrdering::SequentiallyConsistent);
367 Result = CGF.Builder.CreateBinOp(Op, Result, Val);
368 if (Invert)
369 Result =
370 CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
371 llvm::ConstantInt::getAllOnesValue(IntType));
372 Result = EmitFromInt(CGF, Result, T, ValueType);
373 return RValue::get(Result);
374}
375
376/// Utility to insert an atomic cmpxchg instruction.
377///
378/// @param CGF The current codegen function.
379/// @param E Builtin call expression to convert to cmpxchg.
380/// arg0 - address to operate on
381/// arg1 - value to compare with
382/// arg2 - new value
383/// @param ReturnBool Specifies whether to return success flag of
384/// cmpxchg result or the old value.
385///
386/// @returns result of cmpxchg, according to ReturnBool
387///
388/// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
389/// invoke the function EmitAtomicCmpXchgForMSIntrin.
391 bool ReturnBool) {
392 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
393 Address DestAddr = CheckAtomicAlignment(CGF, E);
394
395 llvm::IntegerType *IntType = llvm::IntegerType::get(
396 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
397
398 Value *Cmp = CGF.EmitScalarExpr(E->getArg(1));
399 llvm::Type *ValueType = Cmp->getType();
400 Cmp = EmitToInt(CGF, Cmp, T, IntType);
401 Value *New = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
402
404 DestAddr, Cmp, New, llvm::AtomicOrdering::SequentiallyConsistent,
405 llvm::AtomicOrdering::SequentiallyConsistent);
406 if (ReturnBool)
407 // Extract boolean success flag and zext it to int.
408 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
409 CGF.ConvertType(E->getType()));
410 else
411 // Extract old value and emit it using the same type as compare value.
412 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
413 ValueType);
414}
415
416/// This function should be invoked to emit atomic cmpxchg for Microsoft's
417/// _InterlockedCompareExchange* intrinsics which have the following signature:
418/// T _InterlockedCompareExchange(T volatile *Destination,
419/// T Exchange,
420/// T Comparand);
421///
422/// Whereas the llvm 'cmpxchg' instruction has the following syntax:
423/// cmpxchg *Destination, Comparand, Exchange.
424/// So we need to swap Comparand and Exchange when invoking
425/// CreateAtomicCmpXchg. That is the reason we could not use the above utility
426/// function MakeAtomicCmpXchgValue since it expects the arguments to be
427/// already swapped.
428
429static
431 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
432 assert(E->getArg(0)->getType()->isPointerType());
434 E->getType(), E->getArg(0)->getType()->getPointeeType()));
435 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
436 E->getArg(1)->getType()));
437 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
438 E->getArg(2)->getType()));
439
440 Address DestAddr = CheckAtomicAlignment(CGF, E);
441
442 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
443 auto *RTy = Exchange->getType();
444
445 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
446
447 if (RTy->isPointerTy()) {
448 Exchange = CGF.Builder.CreatePtrToInt(Exchange, CGF.IntPtrTy);
449 Comparand = CGF.Builder.CreatePtrToInt(Comparand, CGF.IntPtrTy);
450 }
451
452 // For Release ordering, the failure ordering should be Monotonic.
453 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
454 AtomicOrdering::Monotonic :
455 SuccessOrdering;
456
457 // The atomic instruction is marked volatile for consistency with MSVC. This
458 // blocks the few atomics optimizations that LLVM has. If we want to optimize
459 // _Interlocked* operations in the future, we will have to remove the volatile
460 // marker.
461 auto *CmpXchg = CGF.Builder.CreateAtomicCmpXchg(
462 DestAddr, Comparand, Exchange, SuccessOrdering, FailureOrdering);
463 CmpXchg->setVolatile(true);
464
465 auto *Result = CGF.Builder.CreateExtractValue(CmpXchg, 0);
466 if (RTy->isPointerTy()) {
467 Result = CGF.Builder.CreateIntToPtr(Result, RTy);
468 }
469
470 return Result;
471}
472
473// 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
474// prototyped like this:
475//
476// unsigned char _InterlockedCompareExchange128...(
477// __int64 volatile * _Destination,
478// __int64 _ExchangeHigh,
479// __int64 _ExchangeLow,
480// __int64 * _ComparandResult);
481//
482// Note that Destination is assumed to be at least 16-byte aligned, despite
483// being typed int64.
484
486 const CallExpr *E,
487 AtomicOrdering SuccessOrdering) {
488 assert(E->getNumArgs() == 4);
489 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
490 llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1));
491 llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2));
492 Address ComparandAddr = CGF.EmitPointerWithAlignment(E->getArg(3));
493
494 assert(DestPtr->getType()->isPointerTy());
495 assert(!ExchangeHigh->getType()->isPointerTy());
496 assert(!ExchangeLow->getType()->isPointerTy());
497
498 // For Release ordering, the failure ordering should be Monotonic.
499 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
500 ? AtomicOrdering::Monotonic
501 : SuccessOrdering;
502
503 // Convert to i128 pointers and values. Alignment is also overridden for
504 // destination pointer.
505 llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128);
506 Address DestAddr(DestPtr, Int128Ty,
508 ComparandAddr = ComparandAddr.withElementType(Int128Ty);
509
510 // (((i128)hi) << 64) | ((i128)lo)
511 ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
512 ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty);
513 ExchangeHigh =
514 CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64));
515 llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow);
516
517 // Load the comparand for the instruction.
518 llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandAddr);
519
520 auto *CXI = CGF.Builder.CreateAtomicCmpXchg(DestAddr, Comparand, Exchange,
521 SuccessOrdering, FailureOrdering);
522
523 // The atomic instruction is marked volatile for consistency with MSVC. This
524 // blocks the few atomics optimizations that LLVM has. If we want to optimize
525 // _Interlocked* operations in the future, we will have to remove the volatile
526 // marker.
527 CXI->setVolatile(true);
528
529 // Store the result as an outparameter.
530 CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0),
531 ComparandAddr);
532
533 // Get the success boolean and zero extend it to i8.
534 Value *Success = CGF.Builder.CreateExtractValue(CXI, 1);
535 return CGF.Builder.CreateZExt(Success, CGF.Int8Ty);
536}
537
539 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
540 assert(E->getArg(0)->getType()->isPointerType());
541
542 auto *IntTy = CGF.ConvertType(E->getType());
543 Address DestAddr = CheckAtomicAlignment(CGF, E);
544 auto *Result = CGF.Builder.CreateAtomicRMW(
545 AtomicRMWInst::Add, DestAddr, ConstantInt::get(IntTy, 1), Ordering);
546 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
547}
548
550 CodeGenFunction &CGF, const CallExpr *E,
551 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
552 assert(E->getArg(0)->getType()->isPointerType());
553
554 auto *IntTy = CGF.ConvertType(E->getType());
555 Address DestAddr = CheckAtomicAlignment(CGF, E);
556 auto *Result = CGF.Builder.CreateAtomicRMW(
557 AtomicRMWInst::Sub, DestAddr, ConstantInt::get(IntTy, 1), Ordering);
558 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
559}
560
561// Build a plain volatile load.
563 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
564 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
565 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
566 llvm::Type *ITy =
567 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
568 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize);
569 Load->setVolatile(true);
570 return Load;
571}
572
573// Build a plain volatile store.
575 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
576 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
577 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
578 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
579 llvm::StoreInst *Store =
580 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
581 Store->setVolatile(true);
582 return Store;
583}
584
585// Emit a simple mangled intrinsic that has 1 argument and a return type
586// matching the argument type. Depending on mode, this may be a constrained
587// floating-point intrinsic.
589 const CallExpr *E, unsigned IntrinsicID,
590 unsigned ConstrainedIntrinsicID) {
591 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
592
593 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
594 if (CGF.Builder.getIsFPConstrained()) {
595 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
596 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
597 } else {
598 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
599 return CGF.Builder.CreateCall(F, Src0);
600 }
601}
602
603// Emit an intrinsic that has 2 operands of the same type as its result.
604// Depending on mode, this may be a constrained floating-point intrinsic.
606 const CallExpr *E, unsigned IntrinsicID,
607 unsigned ConstrainedIntrinsicID) {
608 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
609 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
610
611 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
612 if (CGF.Builder.getIsFPConstrained()) {
613 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
614 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
615 } else {
616 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
617 return CGF.Builder.CreateCall(F, { Src0, Src1 });
618 }
619}
620
621// Has second type mangled argument.
622static Value *
624 Intrinsic::ID IntrinsicID,
625 Intrinsic::ID ConstrainedIntrinsicID) {
626 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
627 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
628
629 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
630 if (CGF.Builder.getIsFPConstrained()) {
631 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
632 {Src0->getType(), Src1->getType()});
633 return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1});
634 }
635
636 Function *F =
637 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), Src1->getType()});
638 return CGF.Builder.CreateCall(F, {Src0, Src1});
639}
640
641// Emit an intrinsic that has 3 operands of the same type as its result.
642// Depending on mode, this may be a constrained floating-point intrinsic.
644 const CallExpr *E, unsigned IntrinsicID,
645 unsigned ConstrainedIntrinsicID) {
646 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
647 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
648 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
649
650 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
651 if (CGF.Builder.getIsFPConstrained()) {
652 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
653 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
654 } else {
655 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
656 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
657 }
658}
659
660// Emit an intrinsic that has overloaded integer result and fp operand.
661static Value *
663 unsigned IntrinsicID,
664 unsigned ConstrainedIntrinsicID) {
665 llvm::Type *ResultType = CGF.ConvertType(E->getType());
666 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
667
668 if (CGF.Builder.getIsFPConstrained()) {
669 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
670 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
671 {ResultType, Src0->getType()});
672 return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
673 } else {
674 Function *F =
675 CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
676 return CGF.Builder.CreateCall(F, Src0);
677 }
678}
679
681 Intrinsic::ID IntrinsicID) {
682 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
683 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
684
685 QualType IntPtrTy = E->getArg(1)->getType()->getPointeeType();
686 llvm::Type *IntTy = CGF.ConvertType(IntPtrTy);
687 llvm::Function *F =
688 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), IntTy});
689 llvm::Value *Call = CGF.Builder.CreateCall(F, Src0);
690
691 llvm::Value *Exp = CGF.Builder.CreateExtractValue(Call, 1);
692 LValue LV = CGF.MakeNaturalAlignAddrLValue(Src1, IntPtrTy);
693 CGF.EmitStoreOfScalar(Exp, LV);
694
695 return CGF.Builder.CreateExtractValue(Call, 0);
696}
697
698static void emitSincosBuiltin(CodeGenFunction &CGF, const CallExpr *E,
699 Intrinsic::ID IntrinsicID) {
700 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(0));
701 llvm::Value *Dest0 = CGF.EmitScalarExpr(E->getArg(1));
702 llvm::Value *Dest1 = CGF.EmitScalarExpr(E->getArg(2));
703
704 llvm::Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {Val->getType()});
705 llvm::Value *Call = CGF.Builder.CreateCall(F, Val);
706
707 llvm::Value *SinResult = CGF.Builder.CreateExtractValue(Call, 0);
708 llvm::Value *CosResult = CGF.Builder.CreateExtractValue(Call, 1);
709
710 QualType DestPtrType = E->getArg(1)->getType()->getPointeeType();
711 LValue SinLV = CGF.MakeNaturalAlignAddrLValue(Dest0, DestPtrType);
712 LValue CosLV = CGF.MakeNaturalAlignAddrLValue(Dest1, DestPtrType);
713
714 llvm::StoreInst *StoreSin =
715 CGF.Builder.CreateStore(SinResult, SinLV.getAddress());
716 llvm::StoreInst *StoreCos =
717 CGF.Builder.CreateStore(CosResult, CosLV.getAddress());
718
719 // Mark the two stores as non-aliasing with each other. The order of stores
720 // emitted by this builtin is arbitrary, enforcing a particular order will
721 // prevent optimizations later on.
722 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
723 MDNode *Domain = MDHelper.createAnonymousAliasScopeDomain();
724 MDNode *AliasScope = MDHelper.createAnonymousAliasScope(Domain);
725 MDNode *AliasScopeList = MDNode::get(Call->getContext(), AliasScope);
726 StoreSin->setMetadata(LLVMContext::MD_alias_scope, AliasScopeList);
727 StoreCos->setMetadata(LLVMContext::MD_noalias, AliasScopeList);
728}
729
730static llvm::Value *emitModfBuiltin(CodeGenFunction &CGF, const CallExpr *E,
731 Intrinsic::ID IntrinsicID) {
732 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(0));
733 llvm::Value *IntPartDest = CGF.EmitScalarExpr(E->getArg(1));
734
735 llvm::Value *Call =
736 CGF.Builder.CreateIntrinsic(IntrinsicID, {Val->getType()}, Val);
737
738 llvm::Value *FractionalResult = CGF.Builder.CreateExtractValue(Call, 0);
739 llvm::Value *IntegralResult = CGF.Builder.CreateExtractValue(Call, 1);
740
741 QualType DestPtrType = E->getArg(1)->getType()->getPointeeType();
742 LValue IntegralLV = CGF.MakeNaturalAlignAddrLValue(IntPartDest, DestPtrType);
743 CGF.EmitStoreOfScalar(IntegralResult, IntegralLV);
744
745 return FractionalResult;
746}
747
748/// EmitFAbs - Emit a call to @llvm.fabs().
750 Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
751 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
752 Call->setDoesNotAccessMemory();
753 return Call;
754}
755
756/// Emit the computation of the sign bit for a floating point value. Returns
757/// the i1 sign bit value.
759 LLVMContext &C = CGF.CGM.getLLVMContext();
760
761 llvm::Type *Ty = V->getType();
762 int Width = Ty->getPrimitiveSizeInBits();
763 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
764 V = CGF.Builder.CreateBitCast(V, IntTy);
765 if (Ty->isPPC_FP128Ty()) {
766 // We want the sign bit of the higher-order double. The bitcast we just
767 // did works as if the double-double was stored to memory and then
768 // read as an i128. The "store" will put the higher-order double in the
769 // lower address in both little- and big-Endian modes, but the "load"
770 // will treat those bits as a different part of the i128: the low bits in
771 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
772 // we need to shift the high bits down to the low before truncating.
773 Width >>= 1;
774 if (CGF.getTarget().isBigEndian()) {
775 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
776 V = CGF.Builder.CreateLShr(V, ShiftCst);
777 }
778 // We are truncating value in order to extract the higher-order
779 // double, which we will be using to extract the sign from.
780 IntTy = llvm::IntegerType::get(C, Width);
781 V = CGF.Builder.CreateTrunc(V, IntTy);
782 }
783 Value *Zero = llvm::Constant::getNullValue(IntTy);
784 return CGF.Builder.CreateICmpSLT(V, Zero);
785}
786
787/// Checks no arguments or results are passed indirectly in the ABI (i.e. via a
788/// hidden pointer). This is used to check annotating FP libcalls (that could
789/// set `errno`) with "int" TBAA metadata is safe. If any floating-point
790/// arguments are passed indirectly, setup for the call could be incorrectly
791/// optimized out.
793 auto IsIndirect = [&](ABIArgInfo const &info) {
794 return info.isIndirect() || info.isIndirectAliased() || info.isInAlloca();
795 };
796 return !IsIndirect(FnInfo.getReturnInfo()) &&
797 llvm::none_of(FnInfo.arguments(),
798 [&](CGFunctionInfoArgInfo const &ArgInfo) {
799 return IsIndirect(ArgInfo.info);
800 });
801}
802
804 const CallExpr *E, llvm::Constant *calleeValue) {
805 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
806 CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
807 llvm::CallBase *callOrInvoke = nullptr;
808 CGFunctionInfo const *FnInfo = nullptr;
809 RValue Call =
810 CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot(),
811 /*Chain=*/nullptr, &callOrInvoke, &FnInfo);
812
813 if (unsigned BuiltinID = FD->getBuiltinID()) {
814 // Check whether a FP math builtin function, such as BI__builtin_expf
815 ASTContext &Context = CGF.getContext();
816 bool ConstWithoutErrnoAndExceptions =
818 // Restrict to target with errno, for example, MacOS doesn't set errno.
819 // TODO: Support builtin function with complex type returned, eg: cacosh
820 if (ConstWithoutErrnoAndExceptions && CGF.CGM.getLangOpts().MathErrno &&
821 !CGF.Builder.getIsFPConstrained() && Call.isScalar() &&
823 // Emit "int" TBAA metadata on FP math libcalls.
824 clang::QualType IntTy = Context.IntTy;
825 TBAAAccessInfo TBAAInfo = CGF.CGM.getTBAAAccessInfo(IntTy);
826 CGF.CGM.DecorateInstructionWithTBAA(callOrInvoke, TBAAInfo);
827 }
828 }
829 return Call;
830}
831
832/// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
833/// depending on IntrinsicID.
834///
835/// \arg CGF The current codegen function.
836/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
837/// \arg X The first argument to the llvm.*.with.overflow.*.
838/// \arg Y The second argument to the llvm.*.with.overflow.*.
839/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
840/// \returns The result (i.e. sum/product) returned by the intrinsic.
842 const Intrinsic::ID IntrinsicID,
843 llvm::Value *X, llvm::Value *Y,
844 llvm::Value *&Carry) {
845 // Make sure we have integers of the same width.
846 assert(X->getType() == Y->getType() &&
847 "Arguments must be the same type. (Did you forget to make sure both "
848 "arguments have the same integer width?)");
849
850 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
851 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
852 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
853 return CGF.Builder.CreateExtractValue(Tmp, 0);
854}
855
856namespace {
857 struct WidthAndSignedness {
858 unsigned Width;
859 bool Signed;
860 };
861}
862
863static WidthAndSignedness
865 const clang::QualType Type) {
866 assert(Type->isIntegerType() && "Given type is not an integer.");
867 unsigned Width = context.getIntWidth(Type);
869 return {Width, Signed};
870}
871
872// Given one or more integer types, this function produces an integer type that
873// encompasses them: any value in one of the given types could be expressed in
874// the encompassing type.
875static struct WidthAndSignedness
876EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
877 assert(Types.size() > 0 && "Empty list of types.");
878
879 // If any of the given types is signed, we must return a signed type.
880 bool Signed = false;
881 for (const auto &Type : Types) {
882 Signed |= Type.Signed;
883 }
884
885 // The encompassing type must have a width greater than or equal to the width
886 // of the specified types. Additionally, if the encompassing type is signed,
887 // its width must be strictly greater than the width of any unsigned types
888 // given.
889 unsigned Width = 0;
890 for (const auto &Type : Types) {
891 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
892 if (Width < MinWidth) {
893 Width = MinWidth;
894 }
895 }
896
897 return {Width, Signed};
898}
899
900Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
901 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
902 return Builder.CreateCall(CGM.getIntrinsic(inst, {ArgValue->getType()}),
903 ArgValue);
904}
905
906/// Checks if using the result of __builtin_object_size(p, @p From) in place of
907/// __builtin_object_size(p, @p To) is correct
908static bool areBOSTypesCompatible(int From, int To) {
909 // Note: Our __builtin_object_size implementation currently treats Type=0 and
910 // Type=2 identically. Encoding this implementation detail here may make
911 // improving __builtin_object_size difficult in the future, so it's omitted.
912 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
913}
914
915static llvm::Value *
916getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
917 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
918}
919
920llvm::Value *
921CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
922 llvm::IntegerType *ResType,
923 llvm::Value *EmittedE,
924 bool IsDynamic) {
925 uint64_t ObjectSize;
926 if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
927 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
928 return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
929}
930
931namespace {
932
933/// StructFieldAccess is a simple visitor class to grab the first MemberExpr
934/// from an Expr. It records any ArraySubscriptExpr we meet along the way.
935class StructFieldAccess
936 : public ConstStmtVisitor<StructFieldAccess, const Expr *> {
937 bool AddrOfSeen = false;
938
939public:
940 const Expr *ArrayIndex = nullptr;
941 QualType ArrayElementTy;
942
943 const Expr *VisitMemberExpr(const MemberExpr *E) {
944 if (AddrOfSeen && E->getType()->isArrayType())
945 // Avoid forms like '&ptr->array'.
946 return nullptr;
947 return E;
948 }
949
950 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
951 if (ArrayIndex)
952 // We don't support multiple subscripts.
953 return nullptr;
954
955 AddrOfSeen = false; // '&ptr->array[idx]' is okay.
956 ArrayIndex = E->getIdx();
957 ArrayElementTy = E->getBase()->getType();
958 return Visit(E->getBase());
959 }
960 const Expr *VisitCastExpr(const CastExpr *E) {
961 if (E->getCastKind() == CK_LValueToRValue)
962 return E;
963 return Visit(E->getSubExpr());
964 }
965 const Expr *VisitParenExpr(const ParenExpr *E) {
966 return Visit(E->getSubExpr());
967 }
968 const Expr *VisitUnaryAddrOf(const clang::UnaryOperator *E) {
969 AddrOfSeen = true;
970 return Visit(E->getSubExpr());
971 }
972 const Expr *VisitUnaryDeref(const clang::UnaryOperator *E) {
973 AddrOfSeen = false;
974 return Visit(E->getSubExpr());
975 }
976 const Expr *VisitBinaryOperator(const clang::BinaryOperator *Op) {
977 return Op->isCommaOp() ? Visit(Op->getRHS()) : nullptr;
978 }
979};
980
981} // end anonymous namespace
982
983/// Find a struct's flexible array member. It may be embedded inside multiple
984/// sub-structs, but must still be the last field.
986 ASTContext &Ctx,
987 const RecordDecl *RD) {
988 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
989 CGF.getLangOpts().getStrictFlexArraysLevel();
990
991 if (RD->isImplicit())
992 return nullptr;
993
994 for (const FieldDecl *FD : RD->fields()) {
996 Ctx, FD, FD->getType(), StrictFlexArraysLevel,
997 /*IgnoreTemplateOrMacroSubstitution=*/true))
998 return FD;
999
1000 if (const auto *RD = FD->getType()->getAsRecordDecl())
1001 if (const FieldDecl *FD = FindFlexibleArrayMemberField(CGF, Ctx, RD))
1002 return FD;
1003 }
1004
1005 return nullptr;
1006}
1007
1008/// Calculate the offset of a struct field. It may be embedded inside multiple
1009/// sub-structs.
1010static bool GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD,
1011 const FieldDecl *FD, int64_t &Offset) {
1012 if (RD->isImplicit())
1013 return false;
1014
1015 // Keep track of the field number ourselves, because the other methods
1016 // (CGRecordLayout::getLLVMFieldNo) aren't always equivalent to how the AST
1017 // is laid out.
1018 uint32_t FieldNo = 0;
1019 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
1020
1021 for (const FieldDecl *Field : RD->fields()) {
1022 if (Field == FD) {
1023 Offset += Layout.getFieldOffset(FieldNo);
1024 return true;
1025 }
1026
1027 if (const auto *RD = Field->getType()->getAsRecordDecl()) {
1028 if (GetFieldOffset(Ctx, RD, FD, Offset)) {
1029 Offset += Layout.getFieldOffset(FieldNo);
1030 return true;
1031 }
1032 }
1033
1034 if (!RD->isUnion())
1035 ++FieldNo;
1036 }
1037
1038 return false;
1039}
1040
1041static std::optional<int64_t>
1042GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FD) {
1043 int64_t Offset = 0;
1044
1045 if (GetFieldOffset(Ctx, RD, FD, Offset))
1046 return std::optional<int64_t>(Offset);
1047
1048 return std::nullopt;
1049}
1050
1051llvm::Value *CodeGenFunction::emitCountedBySize(const Expr *E,
1052 llvm::Value *EmittedE,
1053 unsigned Type,
1054 llvm::IntegerType *ResType) {
1055 // Note: If the whole struct is specificed in the __bdos (i.e. Visitor
1056 // returns a DeclRefExpr). The calculation of the whole size of the structure
1057 // with a flexible array member can be done in two ways:
1058 //
1059 // 1) sizeof(struct S) + count * sizeof(typeof(fam))
1060 // 2) offsetof(struct S, fam) + count * sizeof(typeof(fam))
1061 //
1062 // The first will add additional padding after the end of the array
1063 // allocation while the second method is more precise, but not quite expected
1064 // from programmers. See
1065 // https://lore.kernel.org/lkml/ZvV6X5FPBBW7CO1f@archlinux/ for a discussion
1066 // of the topic.
1067 //
1068 // GCC isn't (currently) able to calculate __bdos on a pointer to the whole
1069 // structure. Therefore, because of the above issue, we choose to match what
1070 // GCC does for consistency's sake.
1071
1072 StructFieldAccess Visitor;
1073 E = Visitor.Visit(E);
1074 if (!E)
1075 return nullptr;
1076
1077 const Expr *Idx = Visitor.ArrayIndex;
1078 if (Idx) {
1079 if (Idx->HasSideEffects(getContext()))
1080 // We can't have side-effects.
1081 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1082
1083 if (const auto *IL = dyn_cast<IntegerLiteral>(Idx)) {
1084 int64_t Val = IL->getValue().getSExtValue();
1085 if (Val < 0)
1086 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1087
1088 // The index is 0, so we don't need to take it into account.
1089 if (Val == 0)
1090 Idx = nullptr;
1091 }
1092 }
1093
1094 // __counted_by on either a flexible array member or a pointer into a struct
1095 // with a flexible array member.
1096 if (const auto *ME = dyn_cast<MemberExpr>(E))
1097 return emitCountedByMemberSize(ME, Idx, EmittedE, Visitor.ArrayElementTy,
1098 Type, ResType);
1099
1100 // __counted_by on a pointer in a struct.
1101 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E);
1102 ICE && ICE->getCastKind() == CK_LValueToRValue)
1103 return emitCountedByPointerSize(ICE, Idx, EmittedE, Visitor.ArrayElementTy,
1104 Type, ResType);
1105
1106 return nullptr;
1107}
1108
1110 llvm::Value *Res,
1111 llvm::Value *Index,
1112 llvm::IntegerType *ResType,
1113 bool IsSigned) {
1114 // cmp = (array_size >= 0)
1115 Value *Cmp = CGF.Builder.CreateIsNotNeg(Res);
1116 if (Index)
1117 // cmp = (cmp && index >= 0)
1118 Cmp = CGF.Builder.CreateAnd(CGF.Builder.CreateIsNotNeg(Index), Cmp);
1119
1120 // return cmp ? result : 0
1121 return CGF.Builder.CreateSelect(Cmp, Res,
1122 ConstantInt::get(ResType, 0, IsSigned));
1123}
1124
1125static std::pair<llvm::Value *, llvm::Value *>
1127 const FieldDecl *ArrayFD, const FieldDecl *CountFD,
1128 const Expr *Idx, llvm::IntegerType *ResType,
1129 bool IsSigned) {
1130 // count = ptr->count;
1131 Value *Count = CGF.EmitLoadOfCountedByField(ME, ArrayFD, CountFD);
1132 if (!Count)
1133 return std::make_pair<Value *>(nullptr, nullptr);
1134 Count = CGF.Builder.CreateIntCast(Count, ResType, IsSigned, "count");
1135
1136 // index = ptr->index;
1137 Value *Index = nullptr;
1138 if (Idx) {
1139 bool IdxSigned = Idx->getType()->isSignedIntegerType();
1140 Index = CGF.EmitScalarExpr(Idx);
1141 Index = CGF.Builder.CreateIntCast(Index, ResType, IdxSigned, "index");
1142 }
1143
1144 return std::make_pair(Count, Index);
1145}
1146
1147llvm::Value *CodeGenFunction::emitCountedByPointerSize(
1148 const ImplicitCastExpr *E, const Expr *Idx, llvm::Value *EmittedE,
1149 QualType CastedArrayElementTy, unsigned Type, llvm::IntegerType *ResType) {
1150 assert(E->getCastKind() == CK_LValueToRValue &&
1151 "must be an LValue to RValue cast");
1152
1153 const MemberExpr *ME =
1154 dyn_cast<MemberExpr>(E->getSubExpr()->IgnoreParenNoopCasts(getContext()));
1155 if (!ME)
1156 return nullptr;
1157
1158 const auto *ArrayBaseFD = dyn_cast<FieldDecl>(ME->getMemberDecl());
1159 if (!ArrayBaseFD || !ArrayBaseFD->getType()->isPointerType() ||
1160 !ArrayBaseFD->getType()->isCountAttributedType())
1161 return nullptr;
1162
1163 // Get the 'count' FieldDecl.
1164 const FieldDecl *CountFD = ArrayBaseFD->findCountedByField();
1165 if (!CountFD)
1166 // Can't find the field referenced by the "counted_by" attribute.
1167 return nullptr;
1168
1169 // Calculate the array's object size using these formulae. (Note: if the
1170 // calculation is negative, we return 0.):
1171 //
1172 // struct p;
1173 // struct s {
1174 // /* ... */
1175 // struct p **array __attribute__((counted_by(count)));
1176 // int count;
1177 // };
1178 //
1179 // 1) 'ptr->array':
1180 //
1181 // count = ptr->count;
1182 //
1183 // array_element_size = sizeof (*ptr->array);
1184 // array_size = count * array_element_size;
1185 //
1186 // result = array_size;
1187 //
1188 // cmp = (result >= 0)
1189 // return cmp ? result : 0;
1190 //
1191 // 2) '&((cast) ptr->array)[idx]':
1192 //
1193 // count = ptr->count;
1194 // index = idx;
1195 //
1196 // array_element_size = sizeof (*ptr->array);
1197 // array_size = count * array_element_size;
1198 //
1199 // casted_array_element_size = sizeof (*((cast) ptr->array));
1200 //
1201 // index_size = index * casted_array_element_size;
1202 // result = array_size - index_size;
1203 //
1204 // cmp = (result >= 0)
1205 // if (index)
1206 // cmp = (cmp && index > 0)
1207 // return cmp ? result : 0;
1208
1209 auto GetElementBaseSize = [&](QualType ElementTy) {
1210 CharUnits ElementSize =
1211 getContext().getTypeSizeInChars(ElementTy->getPointeeType());
1212
1213 if (ElementSize.isZero()) {
1214 // This might be a __sized_by (or __counted_by) on a
1215 // 'void *', which counts bytes, not elements.
1216 [[maybe_unused]] auto *CAT = ElementTy->getAs<CountAttributedType>();
1217 assert(CAT && "must have an CountAttributedType");
1218
1219 ElementSize = CharUnits::One();
1220 }
1221
1222 return std::optional<CharUnits>(ElementSize);
1223 };
1224
1225 // Get the sizes of the original array element and the casted array element,
1226 // if different.
1227 std::optional<CharUnits> ArrayElementBaseSize =
1228 GetElementBaseSize(ArrayBaseFD->getType());
1229 if (!ArrayElementBaseSize)
1230 return nullptr;
1231
1232 std::optional<CharUnits> CastedArrayElementBaseSize = ArrayElementBaseSize;
1233 if (!CastedArrayElementTy.isNull() && CastedArrayElementTy->isPointerType()) {
1234 CastedArrayElementBaseSize = GetElementBaseSize(CastedArrayElementTy);
1235 if (!CastedArrayElementBaseSize)
1236 return nullptr;
1237 }
1238
1239 bool IsSigned = CountFD->getType()->isSignedIntegerType();
1240
1241 // count = ptr->count;
1242 // index = ptr->index;
1243 Value *Count, *Index;
1244 std::tie(Count, Index) = GetCountFieldAndIndex(
1245 *this, ME, ArrayBaseFD, CountFD, Idx, ResType, IsSigned);
1246 if (!Count)
1247 return nullptr;
1248
1249 // array_element_size = sizeof (*ptr->array)
1250 auto *ArrayElementSize = llvm::ConstantInt::get(
1251 ResType, ArrayElementBaseSize->getQuantity(), IsSigned);
1252
1253 // casted_array_element_size = sizeof (*((cast) ptr->array));
1254 auto *CastedArrayElementSize = llvm::ConstantInt::get(
1255 ResType, CastedArrayElementBaseSize->getQuantity(), IsSigned);
1256
1257 // array_size = count * array_element_size;
1258 Value *ArraySize = Builder.CreateMul(Count, ArrayElementSize, "array_size",
1259 !IsSigned, IsSigned);
1260
1261 // Option (1) 'ptr->array'
1262 // result = array_size
1263 Value *Result = ArraySize;
1264
1265 if (Idx) { // Option (2) '&((cast) ptr->array)[idx]'
1266 // index_size = index * casted_array_element_size;
1267 Value *IndexSize = Builder.CreateMul(Index, CastedArrayElementSize,
1268 "index_size", !IsSigned, IsSigned);
1269
1270 // result = result - index_size;
1271 Result =
1272 Builder.CreateSub(Result, IndexSize, "result", !IsSigned, IsSigned);
1273 }
1274
1275 return EmitPositiveResultOrZero(*this, Result, Index, ResType, IsSigned);
1276}
1277
1278llvm::Value *CodeGenFunction::emitCountedByMemberSize(
1279 const MemberExpr *ME, const Expr *Idx, llvm::Value *EmittedE,
1280 QualType CastedArrayElementTy, unsigned Type, llvm::IntegerType *ResType) {
1281 const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
1282 if (!FD)
1283 return nullptr;
1284
1285 // Find the flexible array member and check that it has the __counted_by
1286 // attribute.
1287 ASTContext &Ctx = getContext();
1288 const RecordDecl *RD = FD->getDeclContext()->getOuterLexicalRecordContext();
1289 const FieldDecl *FlexibleArrayMemberFD = nullptr;
1290
1292 Ctx, FD, FD->getType(), getLangOpts().getStrictFlexArraysLevel(),
1293 /*IgnoreTemplateOrMacroSubstitution=*/true))
1294 FlexibleArrayMemberFD = FD;
1295 else
1296 FlexibleArrayMemberFD = FindFlexibleArrayMemberField(*this, Ctx, RD);
1297
1298 if (!FlexibleArrayMemberFD ||
1299 !FlexibleArrayMemberFD->getType()->isCountAttributedType())
1300 return nullptr;
1301
1302 // Get the 'count' FieldDecl.
1303 const FieldDecl *CountFD = FlexibleArrayMemberFD->findCountedByField();
1304 if (!CountFD)
1305 // Can't find the field referenced by the "counted_by" attribute.
1306 return nullptr;
1307
1308 // Calculate the flexible array member's object size using these formulae.
1309 // (Note: if the calculation is negative, we return 0.):
1310 //
1311 // struct p;
1312 // struct s {
1313 // /* ... */
1314 // int count;
1315 // struct p *array[] __attribute__((counted_by(count)));
1316 // };
1317 //
1318 // 1) 'ptr->array':
1319 //
1320 // count = ptr->count;
1321 //
1322 // flexible_array_member_element_size = sizeof (*ptr->array);
1323 // flexible_array_member_size =
1324 // count * flexible_array_member_element_size;
1325 //
1326 // result = flexible_array_member_size;
1327 //
1328 // cmp = (result >= 0)
1329 // return cmp ? result : 0;
1330 //
1331 // 2) '&((cast) ptr->array)[idx]':
1332 //
1333 // count = ptr->count;
1334 // index = idx;
1335 //
1336 // flexible_array_member_element_size = sizeof (*ptr->array);
1337 // flexible_array_member_size =
1338 // count * flexible_array_member_element_size;
1339 //
1340 // casted_flexible_array_member_element_size =
1341 // sizeof (*((cast) ptr->array));
1342 // index_size = index * casted_flexible_array_member_element_size;
1343 //
1344 // result = flexible_array_member_size - index_size;
1345 //
1346 // cmp = (result >= 0)
1347 // if (index != 0)
1348 // cmp = (cmp && index >= 0)
1349 // return cmp ? result : 0;
1350 //
1351 // 3) '&ptr->field':
1352 //
1353 // count = ptr->count;
1354 // sizeof_struct = sizeof (struct s);
1355 //
1356 // flexible_array_member_element_size = sizeof (*ptr->array);
1357 // flexible_array_member_size =
1358 // count * flexible_array_member_element_size;
1359 //
1360 // field_offset = offsetof (struct s, field);
1361 // offset_diff = sizeof_struct - field_offset;
1362 //
1363 // result = offset_diff + flexible_array_member_size;
1364 //
1365 // cmp = (result >= 0)
1366 // return cmp ? result : 0;
1367 //
1368 // 4) '&((cast) ptr->field_array)[idx]':
1369 //
1370 // count = ptr->count;
1371 // index = idx;
1372 // sizeof_struct = sizeof (struct s);
1373 //
1374 // flexible_array_member_element_size = sizeof (*ptr->array);
1375 // flexible_array_member_size =
1376 // count * flexible_array_member_element_size;
1377 //
1378 // casted_field_element_size = sizeof (*((cast) ptr->field_array));
1379 // field_offset = offsetof (struct s, field)
1380 // field_offset += index * casted_field_element_size;
1381 //
1382 // offset_diff = sizeof_struct - field_offset;
1383 //
1384 // result = offset_diff + flexible_array_member_size;
1385 //
1386 // cmp = (result >= 0)
1387 // if (index != 0)
1388 // cmp = (cmp && index >= 0)
1389 // return cmp ? result : 0;
1390
1391 bool IsSigned = CountFD->getType()->isSignedIntegerType();
1392
1393 QualType FlexibleArrayMemberTy = FlexibleArrayMemberFD->getType();
1394
1395 // Explicit cast because otherwise the CharWidth will promote an i32's into
1396 // u64's leading to overflows.
1397 int64_t CharWidth = static_cast<int64_t>(CGM.getContext().getCharWidth());
1398
1399 // field_offset = offsetof (struct s, field);
1400 Value *FieldOffset = nullptr;
1401 if (FlexibleArrayMemberFD != FD) {
1402 std::optional<int64_t> Offset = GetFieldOffset(Ctx, RD, FD);
1403 if (!Offset)
1404 return nullptr;
1405 FieldOffset =
1406 llvm::ConstantInt::get(ResType, *Offset / CharWidth, IsSigned);
1407 }
1408
1409 // count = ptr->count;
1410 // index = ptr->index;
1411 Value *Count, *Index;
1412 std::tie(Count, Index) = GetCountFieldAndIndex(
1413 *this, ME, FlexibleArrayMemberFD, CountFD, Idx, ResType, IsSigned);
1414 if (!Count)
1415 return nullptr;
1416
1417 // flexible_array_member_element_size = sizeof (*ptr->array);
1418 const ArrayType *ArrayTy = Ctx.getAsArrayType(FlexibleArrayMemberTy);
1419 CharUnits BaseSize = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
1420 auto *FlexibleArrayMemberElementSize =
1421 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1422
1423 // flexible_array_member_size = count * flexible_array_member_element_size;
1424 Value *FlexibleArrayMemberSize =
1425 Builder.CreateMul(Count, FlexibleArrayMemberElementSize,
1426 "flexible_array_member_size", !IsSigned, IsSigned);
1427
1428 Value *Result = nullptr;
1429 if (FlexibleArrayMemberFD == FD) {
1430 if (Idx) { // Option (2) '&((cast) ptr->array)[idx]'
1431 // casted_flexible_array_member_element_size =
1432 // sizeof (*((cast) ptr->array));
1433 llvm::ConstantInt *CastedFlexibleArrayMemberElementSize =
1434 FlexibleArrayMemberElementSize;
1435 if (!CastedArrayElementTy.isNull() &&
1436 CastedArrayElementTy->isPointerType()) {
1437 CharUnits BaseSize =
1438 Ctx.getTypeSizeInChars(CastedArrayElementTy->getPointeeType());
1439 CastedFlexibleArrayMemberElementSize =
1440 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1441 }
1442
1443 // index_size = index * casted_flexible_array_member_element_size;
1444 Value *IndexSize =
1445 Builder.CreateMul(Index, CastedFlexibleArrayMemberElementSize,
1446 "index_size", !IsSigned, IsSigned);
1447
1448 // result = flexible_array_member_size - index_size;
1449 Result = Builder.CreateSub(FlexibleArrayMemberSize, IndexSize, "result",
1450 !IsSigned, IsSigned);
1451 } else { // Option (1) 'ptr->array'
1452 // result = flexible_array_member_size;
1453 Result = FlexibleArrayMemberSize;
1454 }
1455 } else {
1456 // sizeof_struct = sizeof (struct s);
1457 llvm::StructType *StructTy = getTypes().getCGRecordLayout(RD).getLLVMType();
1458 const llvm::DataLayout &Layout = CGM.getDataLayout();
1459 TypeSize Size = Layout.getTypeSizeInBits(StructTy);
1460 Value *SizeofStruct =
1461 llvm::ConstantInt::get(ResType, Size.getKnownMinValue() / CharWidth);
1462
1463 if (Idx) { // Option (4) '&((cast) ptr->field_array)[idx]'
1464 // casted_field_element_size = sizeof (*((cast) ptr->field_array));
1465 CharUnits BaseSize;
1466 if (!CastedArrayElementTy.isNull() &&
1467 CastedArrayElementTy->isPointerType()) {
1468 BaseSize =
1469 Ctx.getTypeSizeInChars(CastedArrayElementTy->getPointeeType());
1470 } else {
1471 const ArrayType *ArrayTy = Ctx.getAsArrayType(FD->getType());
1472 BaseSize = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
1473 }
1474
1475 llvm::ConstantInt *CastedFieldElementSize =
1476 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1477
1478 // field_offset += index * casted_field_element_size;
1479 Value *Mul = Builder.CreateMul(Index, CastedFieldElementSize,
1480 "field_offset", !IsSigned, IsSigned);
1481 FieldOffset = Builder.CreateAdd(FieldOffset, Mul);
1482 }
1483 // Option (3) '&ptr->field', and Option (4) continuation.
1484 // offset_diff = flexible_array_member_offset - field_offset;
1485 Value *OffsetDiff = Builder.CreateSub(SizeofStruct, FieldOffset,
1486 "offset_diff", !IsSigned, IsSigned);
1487
1488 // result = offset_diff + flexible_array_member_size;
1489 Result = Builder.CreateAdd(FlexibleArrayMemberSize, OffsetDiff, "result");
1490 }
1491
1492 return EmitPositiveResultOrZero(*this, Result, Index, ResType, IsSigned);
1493}
1494
1495/// Returns a Value corresponding to the size of the given expression.
1496/// This Value may be either of the following:
1497/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
1498/// it)
1499/// - A call to the @llvm.objectsize intrinsic
1500///
1501/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
1502/// and we wouldn't otherwise try to reference a pass_object_size parameter,
1503/// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
1504llvm::Value *
1505CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
1506 llvm::IntegerType *ResType,
1507 llvm::Value *EmittedE, bool IsDynamic) {
1508 // We need to reference an argument if the pointer is a parameter with the
1509 // pass_object_size attribute.
1510 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
1511 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
1512 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
1513 if (Param != nullptr && PS != nullptr &&
1514 areBOSTypesCompatible(PS->getType(), Type)) {
1515 auto Iter = SizeArguments.find(Param);
1516 assert(Iter != SizeArguments.end());
1517
1518 const ImplicitParamDecl *D = Iter->second;
1519 auto DIter = LocalDeclMap.find(D);
1520 assert(DIter != LocalDeclMap.end());
1521
1522 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
1523 getContext().getSizeType(), E->getBeginLoc());
1524 }
1525 }
1526
1527 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
1528 // evaluate E for side-effects. In either case, we shouldn't lower to
1529 // @llvm.objectsize.
1530 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
1531 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1532
1533 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
1534 assert(Ptr->getType()->isPointerTy() &&
1535 "Non-pointer passed to __builtin_object_size?");
1536
1537 if (IsDynamic)
1538 // Emit special code for a flexible array member with the "counted_by"
1539 // attribute.
1540 if (Value *V = emitCountedBySize(E, Ptr, Type, ResType))
1541 return V;
1542
1543 Function *F =
1544 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
1545
1546 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
1547 Value *Min = Builder.getInt1((Type & 2) != 0);
1548 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
1549 Value *NullIsUnknown = Builder.getTrue();
1550 Value *Dynamic = Builder.getInt1(IsDynamic);
1551 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
1552}
1553
1554namespace {
1555/// A struct to generically describe a bit test intrinsic.
1556struct BitTest {
1557 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
1558 enum InterlockingKind : uint8_t {
1559 Unlocked,
1560 Sequential,
1561 Acquire,
1562 Release,
1563 NoFence
1564 };
1565
1566 ActionKind Action;
1567 InterlockingKind Interlocking;
1568 bool Is64Bit;
1569
1570 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
1571};
1572
1573} // namespace
1574
1575BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
1576 switch (BuiltinID) {
1577 // Main portable variants.
1578 case Builtin::BI_bittest:
1579 return {TestOnly, Unlocked, false};
1580 case Builtin::BI_bittestandcomplement:
1581 return {Complement, Unlocked, false};
1582 case Builtin::BI_bittestandreset:
1583 return {Reset, Unlocked, false};
1584 case Builtin::BI_bittestandset:
1585 return {Set, Unlocked, false};
1586 case Builtin::BI_interlockedbittestandreset:
1587 return {Reset, Sequential, false};
1588 case Builtin::BI_interlockedbittestandset:
1589 return {Set, Sequential, false};
1590
1591 // 64-bit variants.
1592 case Builtin::BI_bittest64:
1593 return {TestOnly, Unlocked, true};
1594 case Builtin::BI_bittestandcomplement64:
1595 return {Complement, Unlocked, true};
1596 case Builtin::BI_bittestandreset64:
1597 return {Reset, Unlocked, true};
1598 case Builtin::BI_bittestandset64:
1599 return {Set, Unlocked, true};
1600 case Builtin::BI_interlockedbittestandreset64:
1601 return {Reset, Sequential, true};
1602 case Builtin::BI_interlockedbittestandset64:
1603 return {Set, Sequential, true};
1604
1605 // ARM/AArch64-specific ordering variants.
1606 case Builtin::BI_interlockedbittestandset_acq:
1607 return {Set, Acquire, false};
1608 case Builtin::BI_interlockedbittestandset_rel:
1609 return {Set, Release, false};
1610 case Builtin::BI_interlockedbittestandset_nf:
1611 return {Set, NoFence, false};
1612 case Builtin::BI_interlockedbittestandreset_acq:
1613 return {Reset, Acquire, false};
1614 case Builtin::BI_interlockedbittestandreset_rel:
1615 return {Reset, Release, false};
1616 case Builtin::BI_interlockedbittestandreset_nf:
1617 return {Reset, NoFence, false};
1618 case Builtin::BI_interlockedbittestandreset64_acq:
1619 return {Reset, Acquire, false};
1620 case Builtin::BI_interlockedbittestandreset64_rel:
1621 return {Reset, Release, false};
1622 case Builtin::BI_interlockedbittestandreset64_nf:
1623 return {Reset, NoFence, false};
1624 case Builtin::BI_interlockedbittestandset64_acq:
1625 return {Set, Acquire, false};
1626 case Builtin::BI_interlockedbittestandset64_rel:
1627 return {Set, Release, false};
1628 case Builtin::BI_interlockedbittestandset64_nf:
1629 return {Set, NoFence, false};
1630 }
1631 llvm_unreachable("expected only bittest intrinsics");
1632}
1633
1634static char bitActionToX86BTCode(BitTest::ActionKind A) {
1635 switch (A) {
1636 case BitTest::TestOnly: return '\0';
1637 case BitTest::Complement: return 'c';
1638 case BitTest::Reset: return 'r';
1639 case BitTest::Set: return 's';
1640 }
1641 llvm_unreachable("invalid action");
1642}
1643
1645 BitTest BT,
1646 const CallExpr *E, Value *BitBase,
1647 Value *BitPos) {
1648 char Action = bitActionToX86BTCode(BT.Action);
1649 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
1650
1651 // Build the assembly.
1653 raw_svector_ostream AsmOS(Asm);
1654 if (BT.Interlocking != BitTest::Unlocked)
1655 AsmOS << "lock ";
1656 AsmOS << "bt";
1657 if (Action)
1658 AsmOS << Action;
1659 AsmOS << SizeSuffix << " $2, ($1)";
1660
1661 // Build the constraints. FIXME: We should support immediates when possible.
1662 std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
1663 std::string_view MachineClobbers = CGF.getTarget().getClobbers();
1664 if (!MachineClobbers.empty()) {
1665 Constraints += ',';
1666 Constraints += MachineClobbers;
1667 }
1668 llvm::IntegerType *IntType = llvm::IntegerType::get(
1669 CGF.getLLVMContext(),
1670 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
1671 llvm::FunctionType *FTy =
1672 llvm::FunctionType::get(CGF.Int8Ty, {CGF.DefaultPtrTy, IntType}, false);
1673
1674 llvm::InlineAsm *IA =
1675 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1676 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
1677}
1678
1679static llvm::AtomicOrdering
1680getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
1681 switch (I) {
1682 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
1683 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
1684 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
1685 case BitTest::Release: return llvm::AtomicOrdering::Release;
1686 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
1687 }
1688 llvm_unreachable("invalid interlocking");
1689}
1690
1691static llvm::Value *EmitBitCountExpr(CodeGenFunction &CGF, const Expr *E) {
1692 llvm::Value *ArgValue = CGF.EmitScalarExpr(E);
1693 llvm::Type *ArgType = ArgValue->getType();
1694
1695 // Boolean vectors can be casted directly to its bitfield representation. We
1696 // intentionally do not round up to the next power of two size and let LLVM
1697 // handle the trailing bits.
1698 if (auto *VT = dyn_cast<llvm::FixedVectorType>(ArgType);
1699 VT && VT->getElementType()->isIntegerTy(1)) {
1700 llvm::Type *StorageType =
1701 llvm::Type::getIntNTy(CGF.getLLVMContext(), VT->getNumElements());
1702 ArgValue = CGF.Builder.CreateBitCast(ArgValue, StorageType);
1703 }
1704
1705 return ArgValue;
1706}
1707
1708/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
1709/// bits and a bit position and read and optionally modify the bit at that
1710/// position. The position index can be arbitrarily large, i.e. it can be larger
1711/// than 31 or 63, so we need an indexed load in the general case.
1712static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
1713 unsigned BuiltinID,
1714 const CallExpr *E) {
1715 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
1716 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
1717
1718 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
1719
1720 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
1721 // indexing operation internally. Use them if possible.
1722 if (CGF.getTarget().getTriple().isX86())
1723 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
1724
1725 // Otherwise, use generic code to load one byte and test the bit. Use all but
1726 // the bottom three bits as the array index, and the bottom three bits to form
1727 // a mask.
1728 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
1729 Value *ByteIndex = CGF.Builder.CreateAShr(
1730 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
1731 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBase, ByteIndex,
1732 "bittest.byteaddr"),
1733 CGF.Int8Ty, CharUnits::One());
1734 Value *PosLow =
1735 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
1736 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
1737
1738 // The updating instructions will need a mask.
1739 Value *Mask = nullptr;
1740 if (BT.Action != BitTest::TestOnly) {
1741 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
1742 "bittest.mask");
1743 }
1744
1745 // Check the action and ordering of the interlocked intrinsics.
1746 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
1747
1748 Value *OldByte = nullptr;
1749 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
1750 // Emit a combined atomicrmw load/store operation for the interlocked
1751 // intrinsics.
1752 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
1753 if (BT.Action == BitTest::Reset) {
1754 Mask = CGF.Builder.CreateNot(Mask);
1755 RMWOp = llvm::AtomicRMWInst::And;
1756 }
1757 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr, Mask, Ordering);
1758 } else {
1759 // Emit a plain load for the non-interlocked intrinsics.
1760 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
1761 Value *NewByte = nullptr;
1762 switch (BT.Action) {
1763 case BitTest::TestOnly:
1764 // Don't store anything.
1765 break;
1766 case BitTest::Complement:
1767 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
1768 break;
1769 case BitTest::Reset:
1770 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
1771 break;
1772 case BitTest::Set:
1773 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
1774 break;
1775 }
1776 if (NewByte)
1777 CGF.Builder.CreateStore(NewByte, ByteAddr);
1778 }
1779
1780 // However we loaded the old byte, either by plain load or atomicrmw, shift
1781 // the bit into the low position and mask it to 0 or 1.
1782 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
1783 return CGF.Builder.CreateAnd(
1784 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
1785}
1786
1787namespace {
1788enum class MSVCSetJmpKind {
1789 _setjmpex,
1790 _setjmp3,
1791 _setjmp
1792};
1793}
1794
1795/// MSVC handles setjmp a bit differently on different platforms. On every
1796/// architecture except 32-bit x86, the frame address is passed. On x86, extra
1797/// parameters can be passed as variadic arguments, but we always pass none.
1798static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
1799 const CallExpr *E) {
1800 llvm::Value *Arg1 = nullptr;
1801 llvm::Type *Arg1Ty = nullptr;
1802 StringRef Name;
1803 bool IsVarArg = false;
1804 if (SJKind == MSVCSetJmpKind::_setjmp3) {
1805 Name = "_setjmp3";
1806 Arg1Ty = CGF.Int32Ty;
1807 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
1808 IsVarArg = true;
1809 } else {
1810 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
1811 Arg1Ty = CGF.Int8PtrTy;
1812 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
1813 Arg1 = CGF.Builder.CreateCall(
1814 CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
1815 } else
1816 Arg1 = CGF.Builder.CreateCall(
1817 CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
1818 llvm::ConstantInt::get(CGF.Int32Ty, 0));
1819 }
1820
1821 // Mark the call site and declaration with ReturnsTwice.
1822 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
1823 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
1824 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
1825 llvm::Attribute::ReturnsTwice);
1826 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
1827 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
1828 ReturnsTwiceAttr, /*Local=*/true);
1829
1830 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
1831 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
1832 llvm::Value *Args[] = {Buf, Arg1};
1833 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
1834 CB->setAttributes(ReturnsTwiceAttr);
1835 return RValue::get(CB);
1836}
1837
1838// Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated.
1840 const CallExpr *E) {
1841 switch (BuiltinID) {
1844 Address IndexAddress(EmitPointerWithAlignment(E->getArg(0)));
1845 Value *ArgValue = EmitScalarExpr(E->getArg(1));
1846
1847 llvm::Type *ArgType = ArgValue->getType();
1848 llvm::Type *IndexType = IndexAddress.getElementType();
1849 llvm::Type *ResultType = ConvertType(E->getType());
1850
1851 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
1852 Value *ResZero = llvm::Constant::getNullValue(ResultType);
1853 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
1854
1855 BasicBlock *Begin = Builder.GetInsertBlock();
1856 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
1857 Builder.SetInsertPoint(End);
1858 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
1859
1860 Builder.SetInsertPoint(Begin);
1861 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
1862 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
1863 Builder.CreateCondBr(IsZero, End, NotZero);
1864 Result->addIncoming(ResZero, Begin);
1865
1866 Builder.SetInsertPoint(NotZero);
1867
1868 if (BuiltinID == MSVCIntrin::_BitScanForward) {
1869 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1870 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1871 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1872 Builder.CreateStore(ZeroCount, IndexAddress, false);
1873 } else {
1874 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
1875 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
1876
1877 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1878 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1879 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1880 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
1881 Builder.CreateStore(Index, IndexAddress, false);
1882 }
1883 Builder.CreateBr(End);
1884 Result->addIncoming(ResOne, NotZero);
1885
1886 Builder.SetInsertPoint(End);
1887 return Result;
1888 }
1890 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
1892 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
1894 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
1896 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
1898 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
1900 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
1902 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1903 AtomicOrdering::Acquire);
1905 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1906 AtomicOrdering::Release);
1908 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1909 AtomicOrdering::Monotonic);
1911 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1912 AtomicOrdering::Acquire);
1914 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1915 AtomicOrdering::Release);
1917 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1918 AtomicOrdering::Monotonic);
1920 return EmitAtomicCmpXchgForMSIntrin(*this, E);
1922 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
1924 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
1926 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1929 *this, E, AtomicOrdering::SequentiallyConsistent);
1931 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire);
1933 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release);
1935 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1937 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1938 AtomicOrdering::Acquire);
1940 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1941 AtomicOrdering::Release);
1943 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1944 AtomicOrdering::Monotonic);
1946 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1947 AtomicOrdering::Acquire);
1949 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1950 AtomicOrdering::Release);
1952 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1953 AtomicOrdering::Monotonic);
1955 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1956 AtomicOrdering::Acquire);
1958 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1959 AtomicOrdering::Release);
1961 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1962 AtomicOrdering::Monotonic);
1964 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1966 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1968 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1970 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1972 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1974 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1975
1977 return EmitAtomicDecrementValue(*this, E);
1979 return EmitAtomicIncrementValue(*this, E);
1980
1982 // Request immediate process termination from the kernel. The instruction
1983 // sequences to do this are documented on MSDN:
1984 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1985 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1986 StringRef Asm, Constraints;
1987 switch (ISA) {
1988 default:
1989 ErrorUnsupported(E, "__fastfail call for this architecture");
1990 break;
1991 case llvm::Triple::x86:
1992 case llvm::Triple::x86_64:
1993 Asm = "int $$0x29";
1994 Constraints = "{cx}";
1995 break;
1996 case llvm::Triple::thumb:
1997 Asm = "udf #251";
1998 Constraints = "{r0}";
1999 break;
2000 case llvm::Triple::aarch64:
2001 Asm = "brk #0xF003";
2002 Constraints = "{w0}";
2003 }
2004 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
2005 llvm::InlineAsm *IA =
2006 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
2007 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
2008 getLLVMContext(), llvm::AttributeList::FunctionIndex,
2009 llvm::Attribute::NoReturn);
2010 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
2011 CI->setAttributes(NoReturnAttr);
2012 return CI;
2013 }
2014 }
2015 llvm_unreachable("Incorrect MSVC intrinsic!");
2016}
2017
2018namespace {
2019// ARC cleanup for __builtin_os_log_format
2020struct CallObjCArcUse final : EHScopeStack::Cleanup {
2021 CallObjCArcUse(llvm::Value *object) : object(object) {}
2022 llvm::Value *object;
2023
2024 void Emit(CodeGenFunction &CGF, Flags flags) override {
2025 CGF.EmitARCIntrinsicUse(object);
2026 }
2027};
2028}
2029
2031 BuiltinCheckKind Kind) {
2032 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) &&
2033 "Unsupported builtin check kind");
2034
2035 Value *ArgValue = EmitBitCountExpr(*this, E);
2036 if (!SanOpts.has(SanitizerKind::Builtin))
2037 return ArgValue;
2038
2039 auto CheckOrdinal = SanitizerKind::SO_Builtin;
2040 auto CheckHandler = SanitizerHandler::InvalidBuiltin;
2041 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2042 Value *Cond = Builder.CreateICmpNE(
2043 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
2044 EmitCheck(std::make_pair(Cond, CheckOrdinal), CheckHandler,
2046 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
2047 {});
2048 return ArgValue;
2049}
2050
2052 Value *ArgValue = EvaluateExprAsBool(E);
2053 if (!SanOpts.has(SanitizerKind::Builtin))
2054 return ArgValue;
2055
2056 auto CheckOrdinal = SanitizerKind::SO_Builtin;
2057 auto CheckHandler = SanitizerHandler::InvalidBuiltin;
2058 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2059 EmitCheck(
2060 std::make_pair(ArgValue, CheckOrdinal), CheckHandler,
2062 llvm::ConstantInt::get(Builder.getInt8Ty(), BCK_AssumePassedFalse)},
2063 {});
2064 return ArgValue;
2065}
2066
2067static Value *EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW) {
2068 return CGF.Builder.CreateBinaryIntrinsic(
2069 Intrinsic::abs, ArgValue,
2070 ConstantInt::get(CGF.Builder.getInt1Ty(), HasNSW));
2071}
2072
2074 bool SanitizeOverflow) {
2075 Value *ArgValue = CGF.EmitScalarExpr(E->getArg(0));
2076
2077 // Try to eliminate overflow check.
2078 if (const auto *VCI = dyn_cast<llvm::ConstantInt>(ArgValue)) {
2079 if (!VCI->isMinSignedValue())
2080 return EmitAbs(CGF, ArgValue, true);
2081 }
2082
2084 SanitizerHandler CheckHandler;
2085 if (SanitizeOverflow) {
2086 Ordinals.push_back(SanitizerKind::SO_SignedIntegerOverflow);
2087 CheckHandler = SanitizerHandler::NegateOverflow;
2088 } else
2089 CheckHandler = SanitizerHandler::SubOverflow;
2090
2091 SanitizerDebugLocation SanScope(&CGF, Ordinals, CheckHandler);
2092
2093 Constant *Zero = Constant::getNullValue(ArgValue->getType());
2094 Value *ResultAndOverflow = CGF.Builder.CreateBinaryIntrinsic(
2095 Intrinsic::ssub_with_overflow, Zero, ArgValue);
2096 Value *Result = CGF.Builder.CreateExtractValue(ResultAndOverflow, 0);
2097 Value *NotOverflow = CGF.Builder.CreateNot(
2098 CGF.Builder.CreateExtractValue(ResultAndOverflow, 1));
2099
2100 // TODO: support -ftrapv-handler.
2101 if (SanitizeOverflow) {
2102 CGF.EmitCheck({{NotOverflow, SanitizerKind::SO_SignedIntegerOverflow}},
2103 CheckHandler,
2106 {ArgValue});
2107 } else
2108 CGF.EmitTrapCheck(NotOverflow, CheckHandler);
2109
2110 Value *CmpResult = CGF.Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
2111 return CGF.Builder.CreateSelect(CmpResult, Result, ArgValue, "abs");
2112}
2113
2114/// Get the argument type for arguments to os_log_helper.
2116 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
2117 return C.getCanonicalType(UnsignedTy);
2118}
2119
2122 CharUnits BufferAlignment) {
2123 ASTContext &Ctx = getContext();
2124
2126 {
2127 raw_svector_ostream OS(Name);
2128 OS << "__os_log_helper";
2129 OS << "_" << BufferAlignment.getQuantity();
2130 OS << "_" << int(Layout.getSummaryByte());
2131 OS << "_" << int(Layout.getNumArgsByte());
2132 for (const auto &Item : Layout.Items)
2133 OS << "_" << int(Item.getSizeByte()) << "_"
2134 << int(Item.getDescriptorByte());
2135 }
2136
2137 if (llvm::Function *F = CGM.getModule().getFunction(Name))
2138 return F;
2139
2141 FunctionArgList Args;
2142 Args.push_back(ImplicitParamDecl::Create(
2143 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
2145 ArgTys.emplace_back(Ctx.VoidPtrTy);
2146
2147 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
2148 char Size = Layout.Items[I].getSizeByte();
2149 if (!Size)
2150 continue;
2151
2152 QualType ArgTy = getOSLogArgType(Ctx, Size);
2153 Args.push_back(ImplicitParamDecl::Create(
2154 Ctx, nullptr, SourceLocation(),
2155 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
2157 ArgTys.emplace_back(ArgTy);
2158 }
2159
2160 QualType ReturnTy = Ctx.VoidTy;
2161
2162 // The helper function has linkonce_odr linkage to enable the linker to merge
2163 // identical functions. To ensure the merging always happens, 'noinline' is
2164 // attached to the function when compiling with -Oz.
2165 const CGFunctionInfo &FI =
2166 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
2167 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
2168 llvm::Function *Fn = llvm::Function::Create(
2169 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
2170 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
2171 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn, /*IsThunk=*/false);
2172 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
2173 Fn->setDoesNotThrow();
2174
2175 // Attach 'noinline' at -Oz.
2176 if (CGM.getCodeGenOpts().OptimizeSize == 2)
2177 Fn->addFnAttr(llvm::Attribute::NoInline);
2178
2179 auto NL = ApplyDebugLocation::CreateEmpty(*this);
2180 StartFunction(GlobalDecl(), ReturnTy, Fn, FI, Args);
2181
2182 // Create a scope with an artificial location for the body of this function.
2183 auto AL = ApplyDebugLocation::CreateArtificial(*this);
2184
2185 CharUnits Offset;
2187 Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), Ctx.VoidTy,
2188 BufferAlignment);
2189 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
2190 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
2191 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
2192 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
2193
2194 unsigned I = 1;
2195 for (const auto &Item : Layout.Items) {
2196 Builder.CreateStore(
2197 Builder.getInt8(Item.getDescriptorByte()),
2198 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
2199 Builder.CreateStore(
2200 Builder.getInt8(Item.getSizeByte()),
2201 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
2202
2203 CharUnits Size = Item.size();
2204 if (!Size.getQuantity())
2205 continue;
2206
2207 Address Arg = GetAddrOfLocalVar(Args[I]);
2208 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
2209 Addr = Addr.withElementType(Arg.getElementType());
2210 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
2211 Offset += Size;
2212 ++I;
2213 }
2214
2216
2217 return Fn;
2218}
2219
2221 assert(E.getNumArgs() >= 2 &&
2222 "__builtin_os_log_format takes at least 2 arguments");
2223 ASTContext &Ctx = getContext();
2226 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
2227
2228 // Ignore argument 1, the format string. It is not currently used.
2229 CallArgList Args;
2230 Args.add(RValue::get(BufAddr.emitRawPointer(*this)), Ctx.VoidPtrTy);
2231
2232 for (const auto &Item : Layout.Items) {
2233 int Size = Item.getSizeByte();
2234 if (!Size)
2235 continue;
2236
2237 llvm::Value *ArgVal;
2238
2239 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
2240 uint64_t Val = 0;
2241 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
2242 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
2243 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
2244 } else if (const Expr *TheExpr = Item.getExpr()) {
2245 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
2246
2247 // If a temporary object that requires destruction after the full
2248 // expression is passed, push a lifetime-extended cleanup to extend its
2249 // lifetime to the end of the enclosing block scope.
2250 auto LifetimeExtendObject = [&](const Expr *E) {
2251 E = E->IgnoreParenCasts();
2252 // Extend lifetimes of objects returned by function calls and message
2253 // sends.
2254
2255 // FIXME: We should do this in other cases in which temporaries are
2256 // created including arguments of non-ARC types (e.g., C++
2257 // temporaries).
2259 return true;
2260 return false;
2261 };
2262
2263 if (TheExpr->getType()->isObjCRetainableType() &&
2264 getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
2265 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
2266 "Only scalar can be a ObjC retainable type");
2267 if (!isa<Constant>(ArgVal)) {
2268 CleanupKind Cleanup = getARCCleanupKind();
2269 QualType Ty = TheExpr->getType();
2271 RawAddress Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
2272 ArgVal = EmitARCRetain(Ty, ArgVal);
2273 Builder.CreateStore(ArgVal, Addr);
2274 pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
2276 Cleanup & EHCleanup);
2277
2278 // Push a clang.arc.use call to ensure ARC optimizer knows that the
2279 // argument has to be alive.
2280 if (CGM.getCodeGenOpts().OptimizationLevel != 0)
2282 }
2283 }
2284 } else {
2285 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
2286 }
2287
2288 unsigned ArgValSize =
2289 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
2290 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
2291 ArgValSize);
2292 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
2293 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
2294 // If ArgVal has type x86_fp80, zero-extend ArgVal.
2295 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
2296 Args.add(RValue::get(ArgVal), ArgTy);
2297 }
2298
2299 const CGFunctionInfo &FI =
2300 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
2301 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
2302 Layout, BufAddr.getAlignment());
2304 return RValue::get(BufAddr, *this);
2305}
2306
2308 unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info,
2309 WidthAndSignedness ResultInfo) {
2310 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2311 Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width &&
2312 !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed;
2313}
2314
2316 CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info,
2317 const clang::Expr *Op2, WidthAndSignedness Op2Info,
2318 const clang::Expr *ResultArg, QualType ResultQTy,
2319 WidthAndSignedness ResultInfo) {
2321 Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&
2322 "Cannot specialize this multiply");
2323
2324 llvm::Value *V1 = CGF.EmitScalarExpr(Op1);
2325 llvm::Value *V2 = CGF.EmitScalarExpr(Op2);
2326
2327 llvm::Value *HasOverflow;
2328 llvm::Value *Result = EmitOverflowIntrinsic(
2329 CGF, Intrinsic::umul_with_overflow, V1, V2, HasOverflow);
2330
2331 // The intrinsic call will detect overflow when the value is > UINT_MAX,
2332 // however, since the original builtin had a signed result, we need to report
2333 // an overflow when the result is greater than INT_MAX.
2334 auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width);
2335 llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax);
2336
2337 llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(Result, IntMaxValue);
2338 HasOverflow = CGF.Builder.CreateOr(HasOverflow, IntMaxOverflow);
2339
2340 bool isVolatile =
2341 ResultArg->getType()->getPointeeType().isVolatileQualified();
2342 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2343 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2344 isVolatile);
2345 return RValue::get(HasOverflow);
2346}
2347
2348/// Determine if a binop is a checked mixed-sign multiply we can specialize.
2349static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
2350 WidthAndSignedness Op1Info,
2351 WidthAndSignedness Op2Info,
2352 WidthAndSignedness ResultInfo) {
2353 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2354 std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
2355 Op1Info.Signed != Op2Info.Signed;
2356}
2357
2358/// Emit a checked mixed-sign multiply. This is a cheaper specialization of
2359/// the generic checked-binop irgen.
2360static RValue
2362 WidthAndSignedness Op1Info, const clang::Expr *Op2,
2363 WidthAndSignedness Op2Info,
2364 const clang::Expr *ResultArg, QualType ResultQTy,
2365 WidthAndSignedness ResultInfo) {
2366 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,
2367 Op2Info, ResultInfo) &&
2368 "Not a mixed-sign multipliction we can specialize");
2369
2370 // Emit the signed and unsigned operands.
2371 const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
2372 const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
2373 llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
2374 llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
2375 unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
2376 unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
2377
2378 // One of the operands may be smaller than the other. If so, [s|z]ext it.
2379 if (SignedOpWidth < UnsignedOpWidth)
2380 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
2381 if (UnsignedOpWidth < SignedOpWidth)
2382 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
2383
2384 llvm::Type *OpTy = Signed->getType();
2385 llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
2386 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2387 llvm::Type *ResTy = CGF.getTypes().ConvertType(ResultQTy);
2388 unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
2389
2390 // Take the absolute value of the signed operand.
2391 llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
2392 llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
2393 llvm::Value *AbsSigned =
2394 CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
2395
2396 // Perform a checked unsigned multiplication.
2397 llvm::Value *UnsignedOverflow;
2398 llvm::Value *UnsignedResult =
2399 EmitOverflowIntrinsic(CGF, Intrinsic::umul_with_overflow, AbsSigned,
2400 Unsigned, UnsignedOverflow);
2401
2402 llvm::Value *Overflow, *Result;
2403 if (ResultInfo.Signed) {
2404 // Signed overflow occurs if the result is greater than INT_MAX or lesser
2405 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
2406 auto IntMax =
2407 llvm::APInt::getSignedMaxValue(ResultInfo.Width).zext(OpWidth);
2408 llvm::Value *MaxResult =
2409 CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
2410 CGF.Builder.CreateZExt(IsNegative, OpTy));
2411 llvm::Value *SignedOverflow =
2412 CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
2413 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
2414
2415 // Prepare the signed result (possibly by negating it).
2416 llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
2417 llvm::Value *SignedResult =
2418 CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
2419 Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
2420 } else {
2421 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
2422 llvm::Value *Underflow = CGF.Builder.CreateAnd(
2423 IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
2424 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
2425 if (ResultInfo.Width < OpWidth) {
2426 auto IntMax =
2427 llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
2428 llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
2429 UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
2430 Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
2431 }
2432
2433 // Negate the product if it would be negative in infinite precision.
2434 Result = CGF.Builder.CreateSelect(
2435 IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
2436
2437 Result = CGF.Builder.CreateTrunc(Result, ResTy);
2438 }
2439 assert(Overflow && Result && "Missing overflow or result");
2440
2441 bool isVolatile =
2442 ResultArg->getType()->getPointeeType().isVolatileQualified();
2443 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2444 isVolatile);
2445 return RValue::get(Overflow);
2446}
2447
2448static bool
2450 llvm::SmallPtrSetImpl<const Decl *> &Seen) {
2451 if (const auto *Arr = Ctx.getAsArrayType(Ty))
2452 Ty = Ctx.getBaseElementType(Arr);
2453
2454 const auto *Record = Ty->getAsCXXRecordDecl();
2455 if (!Record)
2456 return false;
2457
2458 // We've already checked this type, or are in the process of checking it.
2459 if (!Seen.insert(Record).second)
2460 return false;
2461
2462 assert(Record->hasDefinition() &&
2463 "Incomplete types should already be diagnosed");
2464
2465 if (Record->isDynamicClass())
2466 return true;
2467
2468 for (FieldDecl *F : Record->fields()) {
2469 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
2470 return true;
2471 }
2472 return false;
2473}
2474
2475/// Determine if the specified type requires laundering by checking if it is a
2476/// dynamic class type or contains a subobject which is a dynamic class type.
2478 if (!CGM.getCodeGenOpts().StrictVTablePointers)
2479 return false;
2481 return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
2482}
2483
2484RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
2485 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
2486 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
2487
2488 // The builtin's shift arg may have a different type than the source arg and
2489 // result, but the LLVM intrinsic uses the same type for all values.
2490 llvm::Type *Ty = Src->getType();
2491 ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
2492
2493 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
2494 unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
2495 Function *F = CGM.getIntrinsic(IID, Ty);
2496 return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
2497}
2498
2499// Map math builtins for long-double to f128 version.
2500static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {
2501 switch (BuiltinID) {
2502#define MUTATE_LDBL(func) \
2503 case Builtin::BI__builtin_##func##l: \
2504 return Builtin::BI__builtin_##func##f128;
2535 MUTATE_LDBL(nans)
2536 MUTATE_LDBL(inf)
2555 MUTATE_LDBL(huge_val)
2565#undef MUTATE_LDBL
2566 default:
2567 return BuiltinID;
2568 }
2569}
2570
2571static Value *tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID,
2572 Value *V) {
2573 if (CGF.Builder.getIsFPConstrained() &&
2574 CGF.Builder.getDefaultConstrainedExcept() != fp::ebIgnore) {
2575 if (Value *Result =
2576 CGF.getTargetHooks().testFPKind(V, BuiltinID, CGF.Builder, CGF.CGM))
2577 return Result;
2578 }
2579 return nullptr;
2580}
2581
2583 const FunctionDecl *FD) {
2584 auto Name = FD->getNameAsString() + "__hipstdpar_unsupported";
2585 auto FnTy = CGF->CGM.getTypes().GetFunctionType(FD);
2586 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2587
2589 for (auto &&FormalTy : FnTy->params())
2590 Args.push_back(llvm::PoisonValue::get(FormalTy));
2591
2592 return RValue::get(CGF->Builder.CreateCall(UBF, Args));
2593}
2594
2596 const CallExpr *E,
2598 assert(!getContext().BuiltinInfo.isImmediate(BuiltinID) &&
2599 "Should not codegen for consteval builtins");
2600
2601 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
2602 // See if we can constant fold this builtin. If so, don't emit it at all.
2603 // TODO: Extend this handling to all builtin calls that we can constant-fold.
2605 if (E->isPRValue() && E->EvaluateAsRValue(Result, CGM.getContext()) &&
2606 !Result.hasSideEffects()) {
2607 if (Result.Val.isInt())
2608 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
2609 Result.Val.getInt()));
2610 if (Result.Val.isFloat())
2611 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
2612 Result.Val.getFloat()));
2613 }
2614
2615 // If current long-double semantics is IEEE 128-bit, replace math builtins
2616 // of long-double with f128 equivalent.
2617 // TODO: This mutation should also be applied to other targets other than PPC,
2618 // after backend supports IEEE 128-bit style libcalls.
2619 if (getTarget().getTriple().isPPC64() &&
2620 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
2621 BuiltinID = mutateLongDoubleBuiltin(BuiltinID);
2622
2623 // If the builtin has been declared explicitly with an assembler label,
2624 // disable the specialized emitting below. Ideally we should communicate the
2625 // rename in IR, or at least avoid generating the intrinsic calls that are
2626 // likely to get lowered to the renamed library functions.
2627 const unsigned BuiltinIDIfNoAsmLabel =
2628 FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID;
2629
2630 std::optional<bool> ErrnoOverriden;
2631 // ErrnoOverriden is true if math-errno is overriden via the
2632 // '#pragma float_control(precise, on)'. This pragma disables fast-math,
2633 // which implies math-errno.
2634 if (E->hasStoredFPFeatures()) {
2636 if (OP.hasMathErrnoOverride())
2637 ErrnoOverriden = OP.getMathErrnoOverride();
2638 }
2639 // True if 'attribute__((optnone))' is used. This attribute overrides
2640 // fast-math which implies math-errno.
2641 bool OptNone = CurFuncDecl && CurFuncDecl->hasAttr<OptimizeNoneAttr>();
2642
2643 bool IsOptimizationEnabled = CGM.getCodeGenOpts().OptimizationLevel != 0;
2644
2645 bool GenerateFPMathIntrinsics =
2647 BuiltinID, CGM.getTriple(), ErrnoOverriden, getLangOpts().MathErrno,
2648 OptNone, IsOptimizationEnabled);
2649
2650 if (GenerateFPMathIntrinsics) {
2651 switch (BuiltinIDIfNoAsmLabel) {
2652 case Builtin::BIacos:
2653 case Builtin::BIacosf:
2654 case Builtin::BIacosl:
2655 case Builtin::BI__builtin_acos:
2656 case Builtin::BI__builtin_acosf:
2657 case Builtin::BI__builtin_acosf16:
2658 case Builtin::BI__builtin_acosl:
2659 case Builtin::BI__builtin_acosf128:
2660 case Builtin::BI__builtin_elementwise_acos:
2662 *this, E, Intrinsic::acos, Intrinsic::experimental_constrained_acos));
2663
2664 case Builtin::BIasin:
2665 case Builtin::BIasinf:
2666 case Builtin::BIasinl:
2667 case Builtin::BI__builtin_asin:
2668 case Builtin::BI__builtin_asinf:
2669 case Builtin::BI__builtin_asinf16:
2670 case Builtin::BI__builtin_asinl:
2671 case Builtin::BI__builtin_asinf128:
2672 case Builtin::BI__builtin_elementwise_asin:
2674 *this, E, Intrinsic::asin, Intrinsic::experimental_constrained_asin));
2675
2676 case Builtin::BIatan:
2677 case Builtin::BIatanf:
2678 case Builtin::BIatanl:
2679 case Builtin::BI__builtin_atan:
2680 case Builtin::BI__builtin_atanf:
2681 case Builtin::BI__builtin_atanf16:
2682 case Builtin::BI__builtin_atanl:
2683 case Builtin::BI__builtin_atanf128:
2684 case Builtin::BI__builtin_elementwise_atan:
2686 *this, E, Intrinsic::atan, Intrinsic::experimental_constrained_atan));
2687
2688 case Builtin::BIatan2:
2689 case Builtin::BIatan2f:
2690 case Builtin::BIatan2l:
2691 case Builtin::BI__builtin_atan2:
2692 case Builtin::BI__builtin_atan2f:
2693 case Builtin::BI__builtin_atan2f16:
2694 case Builtin::BI__builtin_atan2l:
2695 case Builtin::BI__builtin_atan2f128:
2696 case Builtin::BI__builtin_elementwise_atan2:
2698 *this, E, Intrinsic::atan2,
2699 Intrinsic::experimental_constrained_atan2));
2700
2701 case Builtin::BIceil:
2702 case Builtin::BIceilf:
2703 case Builtin::BIceill:
2704 case Builtin::BI__builtin_ceil:
2705 case Builtin::BI__builtin_ceilf:
2706 case Builtin::BI__builtin_ceilf16:
2707 case Builtin::BI__builtin_ceill:
2708 case Builtin::BI__builtin_ceilf128:
2709 case Builtin::BI__builtin_elementwise_ceil:
2711 Intrinsic::ceil,
2712 Intrinsic::experimental_constrained_ceil));
2713
2714 case Builtin::BIcopysign:
2715 case Builtin::BIcopysignf:
2716 case Builtin::BIcopysignl:
2717 case Builtin::BI__builtin_copysign:
2718 case Builtin::BI__builtin_copysignf:
2719 case Builtin::BI__builtin_copysignf16:
2720 case Builtin::BI__builtin_copysignl:
2721 case Builtin::BI__builtin_copysignf128:
2722 return RValue::get(
2723 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::copysign));
2724
2725 case Builtin::BIcos:
2726 case Builtin::BIcosf:
2727 case Builtin::BIcosl:
2728 case Builtin::BI__builtin_cos:
2729 case Builtin::BI__builtin_cosf:
2730 case Builtin::BI__builtin_cosf16:
2731 case Builtin::BI__builtin_cosl:
2732 case Builtin::BI__builtin_cosf128:
2733 case Builtin::BI__builtin_elementwise_cos:
2735 Intrinsic::cos,
2736 Intrinsic::experimental_constrained_cos));
2737
2738 case Builtin::BIcosh:
2739 case Builtin::BIcoshf:
2740 case Builtin::BIcoshl:
2741 case Builtin::BI__builtin_cosh:
2742 case Builtin::BI__builtin_coshf:
2743 case Builtin::BI__builtin_coshf16:
2744 case Builtin::BI__builtin_coshl:
2745 case Builtin::BI__builtin_coshf128:
2746 case Builtin::BI__builtin_elementwise_cosh:
2748 *this, E, Intrinsic::cosh, Intrinsic::experimental_constrained_cosh));
2749
2750 case Builtin::BIexp:
2751 case Builtin::BIexpf:
2752 case Builtin::BIexpl:
2753 case Builtin::BI__builtin_exp:
2754 case Builtin::BI__builtin_expf:
2755 case Builtin::BI__builtin_expf16:
2756 case Builtin::BI__builtin_expl:
2757 case Builtin::BI__builtin_expf128:
2758 case Builtin::BI__builtin_elementwise_exp:
2760 Intrinsic::exp,
2761 Intrinsic::experimental_constrained_exp));
2762
2763 case Builtin::BIexp2:
2764 case Builtin::BIexp2f:
2765 case Builtin::BIexp2l:
2766 case Builtin::BI__builtin_exp2:
2767 case Builtin::BI__builtin_exp2f:
2768 case Builtin::BI__builtin_exp2f16:
2769 case Builtin::BI__builtin_exp2l:
2770 case Builtin::BI__builtin_exp2f128:
2771 case Builtin::BI__builtin_elementwise_exp2:
2773 Intrinsic::exp2,
2774 Intrinsic::experimental_constrained_exp2));
2775 case Builtin::BI__builtin_exp10:
2776 case Builtin::BI__builtin_exp10f:
2777 case Builtin::BI__builtin_exp10f16:
2778 case Builtin::BI__builtin_exp10l:
2779 case Builtin::BI__builtin_exp10f128:
2780 case Builtin::BI__builtin_elementwise_exp10: {
2781 // TODO: strictfp support
2782 if (Builder.getIsFPConstrained())
2783 break;
2784 return RValue::get(
2785 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::exp10));
2786 }
2787 case Builtin::BIfabs:
2788 case Builtin::BIfabsf:
2789 case Builtin::BIfabsl:
2790 case Builtin::BI__builtin_fabs:
2791 case Builtin::BI__builtin_fabsf:
2792 case Builtin::BI__builtin_fabsf16:
2793 case Builtin::BI__builtin_fabsl:
2794 case Builtin::BI__builtin_fabsf128:
2795 return RValue::get(
2796 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::fabs));
2797
2798 case Builtin::BIfloor:
2799 case Builtin::BIfloorf:
2800 case Builtin::BIfloorl:
2801 case Builtin::BI__builtin_floor:
2802 case Builtin::BI__builtin_floorf:
2803 case Builtin::BI__builtin_floorf16:
2804 case Builtin::BI__builtin_floorl:
2805 case Builtin::BI__builtin_floorf128:
2806 case Builtin::BI__builtin_elementwise_floor:
2808 Intrinsic::floor,
2809 Intrinsic::experimental_constrained_floor));
2810
2811 case Builtin::BIfma:
2812 case Builtin::BIfmaf:
2813 case Builtin::BIfmal:
2814 case Builtin::BI__builtin_fma:
2815 case Builtin::BI__builtin_fmaf:
2816 case Builtin::BI__builtin_fmaf16:
2817 case Builtin::BI__builtin_fmal:
2818 case Builtin::BI__builtin_fmaf128:
2819 case Builtin::BI__builtin_elementwise_fma:
2821 Intrinsic::fma,
2822 Intrinsic::experimental_constrained_fma));
2823
2824 case Builtin::BIfmax:
2825 case Builtin::BIfmaxf:
2826 case Builtin::BIfmaxl:
2827 case Builtin::BI__builtin_fmax:
2828 case Builtin::BI__builtin_fmaxf:
2829 case Builtin::BI__builtin_fmaxf16:
2830 case Builtin::BI__builtin_fmaxl:
2831 case Builtin::BI__builtin_fmaxf128:
2833 Intrinsic::maxnum,
2834 Intrinsic::experimental_constrained_maxnum));
2835
2836 case Builtin::BIfmin:
2837 case Builtin::BIfminf:
2838 case Builtin::BIfminl:
2839 case Builtin::BI__builtin_fmin:
2840 case Builtin::BI__builtin_fminf:
2841 case Builtin::BI__builtin_fminf16:
2842 case Builtin::BI__builtin_fminl:
2843 case Builtin::BI__builtin_fminf128:
2845 Intrinsic::minnum,
2846 Intrinsic::experimental_constrained_minnum));
2847
2848 case Builtin::BIfmaximum_num:
2849 case Builtin::BIfmaximum_numf:
2850 case Builtin::BIfmaximum_numl:
2851 case Builtin::BI__builtin_fmaximum_num:
2852 case Builtin::BI__builtin_fmaximum_numf:
2853 case Builtin::BI__builtin_fmaximum_numf16:
2854 case Builtin::BI__builtin_fmaximum_numl:
2855 case Builtin::BI__builtin_fmaximum_numf128:
2856 return RValue::get(
2857 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::maximumnum));
2858
2859 case Builtin::BIfminimum_num:
2860 case Builtin::BIfminimum_numf:
2861 case Builtin::BIfminimum_numl:
2862 case Builtin::BI__builtin_fminimum_num:
2863 case Builtin::BI__builtin_fminimum_numf:
2864 case Builtin::BI__builtin_fminimum_numf16:
2865 case Builtin::BI__builtin_fminimum_numl:
2866 case Builtin::BI__builtin_fminimum_numf128:
2867 return RValue::get(
2868 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::minimumnum));
2869
2870 // fmod() is a special-case. It maps to the frem instruction rather than an
2871 // LLVM intrinsic.
2872 case Builtin::BIfmod:
2873 case Builtin::BIfmodf:
2874 case Builtin::BIfmodl:
2875 case Builtin::BI__builtin_fmod:
2876 case Builtin::BI__builtin_fmodf:
2877 case Builtin::BI__builtin_fmodf16:
2878 case Builtin::BI__builtin_fmodl:
2879 case Builtin::BI__builtin_fmodf128:
2880 case Builtin::BI__builtin_elementwise_fmod: {
2881 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2882 Value *Arg1 = EmitScalarExpr(E->getArg(0));
2883 Value *Arg2 = EmitScalarExpr(E->getArg(1));
2884 if (Builder.getIsFPConstrained()) {
2885 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_frem,
2886 Arg1->getType());
2887 return RValue::get(Builder.CreateConstrainedFPCall(F, {Arg1, Arg2}));
2888 } else {
2889 return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
2890 }
2891 }
2892
2893 case Builtin::BIlog:
2894 case Builtin::BIlogf:
2895 case Builtin::BIlogl:
2896 case Builtin::BI__builtin_log:
2897 case Builtin::BI__builtin_logf:
2898 case Builtin::BI__builtin_logf16:
2899 case Builtin::BI__builtin_logl:
2900 case Builtin::BI__builtin_logf128:
2901 case Builtin::BI__builtin_elementwise_log:
2903 Intrinsic::log,
2904 Intrinsic::experimental_constrained_log));
2905
2906 case Builtin::BIlog10:
2907 case Builtin::BIlog10f:
2908 case Builtin::BIlog10l:
2909 case Builtin::BI__builtin_log10:
2910 case Builtin::BI__builtin_log10f:
2911 case Builtin::BI__builtin_log10f16:
2912 case Builtin::BI__builtin_log10l:
2913 case Builtin::BI__builtin_log10f128:
2914 case Builtin::BI__builtin_elementwise_log10:
2916 Intrinsic::log10,
2917 Intrinsic::experimental_constrained_log10));
2918
2919 case Builtin::BIlog2:
2920 case Builtin::BIlog2f:
2921 case Builtin::BIlog2l:
2922 case Builtin::BI__builtin_log2:
2923 case Builtin::BI__builtin_log2f:
2924 case Builtin::BI__builtin_log2f16:
2925 case Builtin::BI__builtin_log2l:
2926 case Builtin::BI__builtin_log2f128:
2927 case Builtin::BI__builtin_elementwise_log2:
2929 Intrinsic::log2,
2930 Intrinsic::experimental_constrained_log2));
2931
2932 case Builtin::BInearbyint:
2933 case Builtin::BInearbyintf:
2934 case Builtin::BInearbyintl:
2935 case Builtin::BI__builtin_nearbyint:
2936 case Builtin::BI__builtin_nearbyintf:
2937 case Builtin::BI__builtin_nearbyintl:
2938 case Builtin::BI__builtin_nearbyintf128:
2939 case Builtin::BI__builtin_elementwise_nearbyint:
2941 Intrinsic::nearbyint,
2942 Intrinsic::experimental_constrained_nearbyint));
2943
2944 case Builtin::BIpow:
2945 case Builtin::BIpowf:
2946 case Builtin::BIpowl:
2947 case Builtin::BI__builtin_pow:
2948 case Builtin::BI__builtin_powf:
2949 case Builtin::BI__builtin_powf16:
2950 case Builtin::BI__builtin_powl:
2951 case Builtin::BI__builtin_powf128:
2952 case Builtin::BI__builtin_elementwise_pow:
2954 Intrinsic::pow,
2955 Intrinsic::experimental_constrained_pow));
2956
2957 case Builtin::BIrint:
2958 case Builtin::BIrintf:
2959 case Builtin::BIrintl:
2960 case Builtin::BI__builtin_rint:
2961 case Builtin::BI__builtin_rintf:
2962 case Builtin::BI__builtin_rintf16:
2963 case Builtin::BI__builtin_rintl:
2964 case Builtin::BI__builtin_rintf128:
2965 case Builtin::BI__builtin_elementwise_rint:
2967 Intrinsic::rint,
2968 Intrinsic::experimental_constrained_rint));
2969
2970 case Builtin::BIround:
2971 case Builtin::BIroundf:
2972 case Builtin::BIroundl:
2973 case Builtin::BI__builtin_round:
2974 case Builtin::BI__builtin_roundf:
2975 case Builtin::BI__builtin_roundf16:
2976 case Builtin::BI__builtin_roundl:
2977 case Builtin::BI__builtin_roundf128:
2978 case Builtin::BI__builtin_elementwise_round:
2980 Intrinsic::round,
2981 Intrinsic::experimental_constrained_round));
2982
2983 case Builtin::BIroundeven:
2984 case Builtin::BIroundevenf:
2985 case Builtin::BIroundevenl:
2986 case Builtin::BI__builtin_roundeven:
2987 case Builtin::BI__builtin_roundevenf:
2988 case Builtin::BI__builtin_roundevenf16:
2989 case Builtin::BI__builtin_roundevenl:
2990 case Builtin::BI__builtin_roundevenf128:
2991 case Builtin::BI__builtin_elementwise_roundeven:
2993 Intrinsic::roundeven,
2994 Intrinsic::experimental_constrained_roundeven));
2995
2996 case Builtin::BIsin:
2997 case Builtin::BIsinf:
2998 case Builtin::BIsinl:
2999 case Builtin::BI__builtin_sin:
3000 case Builtin::BI__builtin_sinf:
3001 case Builtin::BI__builtin_sinf16:
3002 case Builtin::BI__builtin_sinl:
3003 case Builtin::BI__builtin_sinf128:
3004 case Builtin::BI__builtin_elementwise_sin:
3006 Intrinsic::sin,
3007 Intrinsic::experimental_constrained_sin));
3008
3009 case Builtin::BIsinh:
3010 case Builtin::BIsinhf:
3011 case Builtin::BIsinhl:
3012 case Builtin::BI__builtin_sinh:
3013 case Builtin::BI__builtin_sinhf:
3014 case Builtin::BI__builtin_sinhf16:
3015 case Builtin::BI__builtin_sinhl:
3016 case Builtin::BI__builtin_sinhf128:
3017 case Builtin::BI__builtin_elementwise_sinh:
3019 *this, E, Intrinsic::sinh, Intrinsic::experimental_constrained_sinh));
3020
3021 case Builtin::BI__builtin_sincospi:
3022 case Builtin::BI__builtin_sincospif:
3023 case Builtin::BI__builtin_sincospil:
3024 if (Builder.getIsFPConstrained())
3025 break; // TODO: Emit constrained sincospi intrinsic once one exists.
3026 emitSincosBuiltin(*this, E, Intrinsic::sincospi);
3027 return RValue::get(nullptr);
3028
3029 case Builtin::BIsincos:
3030 case Builtin::BIsincosf:
3031 case Builtin::BIsincosl:
3032 case Builtin::BI__builtin_sincos:
3033 case Builtin::BI__builtin_sincosf:
3034 case Builtin::BI__builtin_sincosf16:
3035 case Builtin::BI__builtin_sincosl:
3036 case Builtin::BI__builtin_sincosf128:
3037 if (Builder.getIsFPConstrained())
3038 break; // TODO: Emit constrained sincos intrinsic once one exists.
3039 emitSincosBuiltin(*this, E, Intrinsic::sincos);
3040 return RValue::get(nullptr);
3041
3042 case Builtin::BIsqrt:
3043 case Builtin::BIsqrtf:
3044 case Builtin::BIsqrtl:
3045 case Builtin::BI__builtin_sqrt:
3046 case Builtin::BI__builtin_sqrtf:
3047 case Builtin::BI__builtin_sqrtf16:
3048 case Builtin::BI__builtin_sqrtl:
3049 case Builtin::BI__builtin_sqrtf128:
3050 case Builtin::BI__builtin_elementwise_sqrt: {
3052 *this, E, Intrinsic::sqrt, Intrinsic::experimental_constrained_sqrt);
3054 return RValue::get(Call);
3055 }
3056
3057 case Builtin::BItan:
3058 case Builtin::BItanf:
3059 case Builtin::BItanl:
3060 case Builtin::BI__builtin_tan:
3061 case Builtin::BI__builtin_tanf:
3062 case Builtin::BI__builtin_tanf16:
3063 case Builtin::BI__builtin_tanl:
3064 case Builtin::BI__builtin_tanf128:
3065 case Builtin::BI__builtin_elementwise_tan:
3067 *this, E, Intrinsic::tan, Intrinsic::experimental_constrained_tan));
3068
3069 case Builtin::BItanh:
3070 case Builtin::BItanhf:
3071 case Builtin::BItanhl:
3072 case Builtin::BI__builtin_tanh:
3073 case Builtin::BI__builtin_tanhf:
3074 case Builtin::BI__builtin_tanhf16:
3075 case Builtin::BI__builtin_tanhl:
3076 case Builtin::BI__builtin_tanhf128:
3077 case Builtin::BI__builtin_elementwise_tanh:
3079 *this, E, Intrinsic::tanh, Intrinsic::experimental_constrained_tanh));
3080
3081 case Builtin::BItrunc:
3082 case Builtin::BItruncf:
3083 case Builtin::BItruncl:
3084 case Builtin::BI__builtin_trunc:
3085 case Builtin::BI__builtin_truncf:
3086 case Builtin::BI__builtin_truncf16:
3087 case Builtin::BI__builtin_truncl:
3088 case Builtin::BI__builtin_truncf128:
3089 case Builtin::BI__builtin_elementwise_trunc:
3091 Intrinsic::trunc,
3092 Intrinsic::experimental_constrained_trunc));
3093
3094 case Builtin::BIlround:
3095 case Builtin::BIlroundf:
3096 case Builtin::BIlroundl:
3097 case Builtin::BI__builtin_lround:
3098 case Builtin::BI__builtin_lroundf:
3099 case Builtin::BI__builtin_lroundl:
3100 case Builtin::BI__builtin_lroundf128:
3102 *this, E, Intrinsic::lround,
3103 Intrinsic::experimental_constrained_lround));
3104
3105 case Builtin::BIllround:
3106 case Builtin::BIllroundf:
3107 case Builtin::BIllroundl:
3108 case Builtin::BI__builtin_llround:
3109 case Builtin::BI__builtin_llroundf:
3110 case Builtin::BI__builtin_llroundl:
3111 case Builtin::BI__builtin_llroundf128:
3113 *this, E, Intrinsic::llround,
3114 Intrinsic::experimental_constrained_llround));
3115
3116 case Builtin::BIlrint:
3117 case Builtin::BIlrintf:
3118 case Builtin::BIlrintl:
3119 case Builtin::BI__builtin_lrint:
3120 case Builtin::BI__builtin_lrintf:
3121 case Builtin::BI__builtin_lrintl:
3122 case Builtin::BI__builtin_lrintf128:
3124 *this, E, Intrinsic::lrint,
3125 Intrinsic::experimental_constrained_lrint));
3126
3127 case Builtin::BIllrint:
3128 case Builtin::BIllrintf:
3129 case Builtin::BIllrintl:
3130 case Builtin::BI__builtin_llrint:
3131 case Builtin::BI__builtin_llrintf:
3132 case Builtin::BI__builtin_llrintl:
3133 case Builtin::BI__builtin_llrintf128:
3135 *this, E, Intrinsic::llrint,
3136 Intrinsic::experimental_constrained_llrint));
3137 case Builtin::BI__builtin_ldexp:
3138 case Builtin::BI__builtin_ldexpf:
3139 case Builtin::BI__builtin_ldexpl:
3140 case Builtin::BI__builtin_ldexpf16:
3141 case Builtin::BI__builtin_ldexpf128:
3142 case Builtin::BI__builtin_elementwise_ldexp:
3144 *this, E, Intrinsic::ldexp,
3145 Intrinsic::experimental_constrained_ldexp));
3146 default:
3147 break;
3148 }
3149 }
3150
3151 // Check NonnullAttribute/NullabilityArg and Alignment.
3152 auto EmitArgCheck = [&](TypeCheckKind Kind, Address A, const Expr *Arg,
3153 unsigned ParmNum) {
3154 Value *Val = A.emitRawPointer(*this);
3155 EmitNonNullArgCheck(RValue::get(Val), Arg->getType(), Arg->getExprLoc(), FD,
3156 ParmNum);
3157
3158 if (SanOpts.has(SanitizerKind::Alignment)) {
3159 SanitizerSet SkippedChecks;
3160 SkippedChecks.set(SanitizerKind::All);
3161 SkippedChecks.clear(SanitizerKind::Alignment);
3162 SourceLocation Loc = Arg->getExprLoc();
3163 // Strip an implicit cast.
3164 if (auto *CE = dyn_cast<ImplicitCastExpr>(Arg))
3165 if (CE->getCastKind() == CK_BitCast)
3166 Arg = CE->getSubExpr();
3167 EmitTypeCheck(Kind, Loc, Val, Arg->getType(), A.getAlignment(),
3168 SkippedChecks);
3169 }
3170 };
3171
3172 switch (BuiltinIDIfNoAsmLabel) {
3173 default: break;
3174 case Builtin::BI__builtin___CFStringMakeConstantString:
3175 case Builtin::BI__builtin___NSStringMakeConstantString:
3176 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
3177 case Builtin::BI__builtin_stdarg_start:
3178 case Builtin::BI__builtin_va_start:
3179 case Builtin::BI__va_start:
3180 case Builtin::BI__builtin_c23_va_start:
3181 case Builtin::BI__builtin_va_end:
3182 EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
3183 ? EmitScalarExpr(E->getArg(0))
3184 : EmitVAListRef(E->getArg(0)).emitRawPointer(*this),
3185 BuiltinID != Builtin::BI__builtin_va_end);
3186 return RValue::get(nullptr);
3187 case Builtin::BI__builtin_va_copy: {
3188 Value *DstPtr = EmitVAListRef(E->getArg(0)).emitRawPointer(*this);
3189 Value *SrcPtr = EmitVAListRef(E->getArg(1)).emitRawPointer(*this);
3190 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy, {DstPtr->getType()}),
3191 {DstPtr, SrcPtr});
3192 return RValue::get(nullptr);
3193 }
3194 case Builtin::BIabs:
3195 case Builtin::BIlabs:
3196 case Builtin::BIllabs:
3197 case Builtin::BI__builtin_abs:
3198 case Builtin::BI__builtin_labs:
3199 case Builtin::BI__builtin_llabs: {
3200 bool SanitizeOverflow = SanOpts.has(SanitizerKind::SignedIntegerOverflow);
3201
3202 Value *Result;
3203 switch (getLangOpts().getSignedOverflowBehavior()) {
3205 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), false);
3206 break;
3208 if (!SanitizeOverflow) {
3209 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), true);
3210 break;
3211 }
3212 [[fallthrough]];
3214 // TODO: Somehow handle the corner case when the address of abs is taken.
3215 Result = EmitOverflowCheckedAbs(*this, E, SanitizeOverflow);
3216 break;
3217 }
3218 return RValue::get(Result);
3219 }
3220 case Builtin::BI__builtin_complex: {
3221 Value *Real = EmitScalarExpr(E->getArg(0));
3222 Value *Imag = EmitScalarExpr(E->getArg(1));
3223 return RValue::getComplex({Real, Imag});
3224 }
3225 case Builtin::BI__builtin_conj:
3226 case Builtin::BI__builtin_conjf:
3227 case Builtin::BI__builtin_conjl:
3228 case Builtin::BIconj:
3229 case Builtin::BIconjf:
3230 case Builtin::BIconjl: {
3231 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3232 Value *Real = ComplexVal.first;
3233 Value *Imag = ComplexVal.second;
3234 Imag = Builder.CreateFNeg(Imag, "neg");
3235 return RValue::getComplex(std::make_pair(Real, Imag));
3236 }
3237 case Builtin::BI__builtin_creal:
3238 case Builtin::BI__builtin_crealf:
3239 case Builtin::BI__builtin_creall:
3240 case Builtin::BIcreal:
3241 case Builtin::BIcrealf:
3242 case Builtin::BIcreall: {
3243 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3244 return RValue::get(ComplexVal.first);
3245 }
3246
3247 case Builtin::BI__builtin_preserve_access_index: {
3248 // Only enabled preserved access index region when debuginfo
3249 // is available as debuginfo is needed to preserve user-level
3250 // access pattern.
3251 if (!getDebugInfo()) {
3252 CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
3253 return RValue::get(EmitScalarExpr(E->getArg(0)));
3254 }
3255
3256 // Nested builtin_preserve_access_index() not supported
3258 CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
3259 return RValue::get(EmitScalarExpr(E->getArg(0)));
3260 }
3261
3262 IsInPreservedAIRegion = true;
3263 Value *Res = EmitScalarExpr(E->getArg(0));
3264 IsInPreservedAIRegion = false;
3265 return RValue::get(Res);
3266 }
3267
3268 case Builtin::BI__builtin_cimag:
3269 case Builtin::BI__builtin_cimagf:
3270 case Builtin::BI__builtin_cimagl:
3271 case Builtin::BIcimag:
3272 case Builtin::BIcimagf:
3273 case Builtin::BIcimagl: {
3274 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3275 return RValue::get(ComplexVal.second);
3276 }
3277
3278 case Builtin::BI__builtin_clrsb:
3279 case Builtin::BI__builtin_clrsbl:
3280 case Builtin::BI__builtin_clrsbll: {
3281 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
3282 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3283
3284 llvm::Type *ArgType = ArgValue->getType();
3285 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3286
3287 llvm::Type *ResultType = ConvertType(E->getType());
3288 Value *Zero = llvm::Constant::getNullValue(ArgType);
3289 Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
3290 Value *Inverse = Builder.CreateNot(ArgValue, "not");
3291 Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
3292 Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
3293 Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
3294 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3295 "cast");
3296 return RValue::get(Result);
3297 }
3298 case Builtin::BI__builtin_ctzs:
3299 case Builtin::BI__builtin_ctz:
3300 case Builtin::BI__builtin_ctzl:
3301 case Builtin::BI__builtin_ctzll:
3302 case Builtin::BI__builtin_ctzg:
3303 case Builtin::BI__builtin_elementwise_ctzg: {
3304 bool HasFallback =
3305 (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_ctzg ||
3306 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_ctzg) &&
3307 E->getNumArgs() > 1;
3308
3309 Value *ArgValue =
3310 HasFallback ? EmitBitCountExpr(*this, E->getArg(0))
3312
3313 llvm::Type *ArgType = ArgValue->getType();
3314 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
3315
3316 llvm::Type *ResultType = ConvertType(E->getType());
3317 // The elementwise builtins always exhibit zero-is-undef behaviour
3318 Value *ZeroUndef = Builder.getInt1(
3319 HasFallback || getTarget().isCLZForZeroUndef() ||
3320 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_ctzg);
3321 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
3322 if (Result->getType() != ResultType)
3323 Result =
3324 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3325 if (!HasFallback)
3326 return RValue::get(Result);
3327
3328 Value *Zero = Constant::getNullValue(ArgType);
3329 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3330 Value *FallbackValue = EmitScalarExpr(E->getArg(1));
3331 Value *ResultOrFallback =
3332 Builder.CreateSelect(IsZero, FallbackValue, Result, "ctzg");
3333 return RValue::get(ResultOrFallback);
3334 }
3335 case Builtin::BI__builtin_clzs:
3336 case Builtin::BI__builtin_clz:
3337 case Builtin::BI__builtin_clzl:
3338 case Builtin::BI__builtin_clzll:
3339 case Builtin::BI__builtin_clzg:
3340 case Builtin::BI__builtin_elementwise_clzg: {
3341 bool HasFallback =
3342 (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_clzg ||
3343 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_clzg) &&
3344 E->getNumArgs() > 1;
3345
3346 Value *ArgValue =
3347 HasFallback ? EmitBitCountExpr(*this, E->getArg(0))
3349
3350 llvm::Type *ArgType = ArgValue->getType();
3351 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3352
3353 llvm::Type *ResultType = ConvertType(E->getType());
3354 // The elementwise builtins always exhibit zero-is-undef behaviour
3355 Value *ZeroUndef = Builder.getInt1(
3356 HasFallback || getTarget().isCLZForZeroUndef() ||
3357 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_clzg);
3358 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
3359 if (Result->getType() != ResultType)
3360 Result =
3361 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3362 if (!HasFallback)
3363 return RValue::get(Result);
3364
3365 Value *Zero = Constant::getNullValue(ArgType);
3366 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3367 Value *FallbackValue = EmitScalarExpr(E->getArg(1));
3368 Value *ResultOrFallback =
3369 Builder.CreateSelect(IsZero, FallbackValue, Result, "clzg");
3370 return RValue::get(ResultOrFallback);
3371 }
3372 case Builtin::BI__builtin_ffs:
3373 case Builtin::BI__builtin_ffsl:
3374 case Builtin::BI__builtin_ffsll: {
3375 // ffs(x) -> x ? cttz(x) + 1 : 0
3376 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3377
3378 llvm::Type *ArgType = ArgValue->getType();
3379 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
3380
3381 llvm::Type *ResultType = ConvertType(E->getType());
3382 Value *Tmp =
3383 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
3384 llvm::ConstantInt::get(ArgType, 1));
3385 Value *Zero = llvm::Constant::getNullValue(ArgType);
3386 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3387 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
3388 if (Result->getType() != ResultType)
3389 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3390 "cast");
3391 return RValue::get(Result);
3392 }
3393 case Builtin::BI__builtin_parity:
3394 case Builtin::BI__builtin_parityl:
3395 case Builtin::BI__builtin_parityll: {
3396 // parity(x) -> ctpop(x) & 1
3397 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3398
3399 llvm::Type *ArgType = ArgValue->getType();
3400 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3401
3402 llvm::Type *ResultType = ConvertType(E->getType());
3403 Value *Tmp = Builder.CreateCall(F, ArgValue);
3404 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
3405 if (Result->getType() != ResultType)
3406 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3407 "cast");
3408 return RValue::get(Result);
3409 }
3410 case Builtin::BI__lzcnt16:
3411 case Builtin::BI__lzcnt:
3412 case Builtin::BI__lzcnt64: {
3413 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3414
3415 llvm::Type *ArgType = ArgValue->getType();
3416 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3417
3418 llvm::Type *ResultType = ConvertType(E->getType());
3419 Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
3420 if (Result->getType() != ResultType)
3421 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3422 "cast");
3423 return RValue::get(Result);
3424 }
3425 case Builtin::BI__popcnt16:
3426 case Builtin::BI__popcnt:
3427 case Builtin::BI__popcnt64:
3428 case Builtin::BI__builtin_popcount:
3429 case Builtin::BI__builtin_popcountl:
3430 case Builtin::BI__builtin_popcountll:
3431 case Builtin::BI__builtin_popcountg: {
3432 Value *ArgValue = EmitBitCountExpr(*this, E->getArg(0));
3433
3434 llvm::Type *ArgType = ArgValue->getType();
3435 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3436
3437 llvm::Type *ResultType = ConvertType(E->getType());
3438 Value *Result = Builder.CreateCall(F, ArgValue);
3439 if (Result->getType() != ResultType)
3440 Result =
3441 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3442 return RValue::get(Result);
3443 }
3444 case Builtin::BI__builtin_unpredictable: {
3445 // Always return the argument of __builtin_unpredictable. LLVM does not
3446 // handle this builtin. Metadata for this builtin should be added directly
3447 // to instructions such as branches or switches that use it.
3448 return RValue::get(EmitScalarExpr(E->getArg(0)));
3449 }
3450 case Builtin::BI__builtin_expect: {
3451 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3452 llvm::Type *ArgType = ArgValue->getType();
3453
3454 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
3455 // Don't generate llvm.expect on -O0 as the backend won't use it for
3456 // anything.
3457 // Note, we still IRGen ExpectedValue because it could have side-effects.
3458 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3459 return RValue::get(ArgValue);
3460
3461 Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
3462 Value *Result =
3463 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
3464 return RValue::get(Result);
3465 }
3466 case Builtin::BI__builtin_expect_with_probability: {
3467 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3468 llvm::Type *ArgType = ArgValue->getType();
3469
3470 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
3471 llvm::APFloat Probability(0.0);
3472 const Expr *ProbArg = E->getArg(2);
3473 bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
3474 assert(EvalSucceed && "probability should be able to evaluate as float");
3475 (void)EvalSucceed;
3476 bool LoseInfo = false;
3477 Probability.convert(llvm::APFloat::IEEEdouble(),
3478 llvm::RoundingMode::Dynamic, &LoseInfo);
3479 llvm::Type *Ty = ConvertType(ProbArg->getType());
3480 Constant *Confidence = ConstantFP::get(Ty, Probability);
3481 // Don't generate llvm.expect.with.probability on -O0 as the backend
3482 // won't use it for anything.
3483 // Note, we still IRGen ExpectedValue because it could have side-effects.
3484 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3485 return RValue::get(ArgValue);
3486
3487 Function *FnExpect =
3488 CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
3489 Value *Result = Builder.CreateCall(
3490 FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
3491 return RValue::get(Result);
3492 }
3493 case Builtin::BI__builtin_assume_aligned: {
3494 const Expr *Ptr = E->getArg(0);
3495 Value *PtrValue = EmitScalarExpr(Ptr);
3496 Value *OffsetValue =
3497 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
3498
3499 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
3500 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
3501 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
3502 AlignmentCI = ConstantInt::get(AlignmentCI->getIntegerType(),
3503 llvm::Value::MaximumAlignment);
3504
3505 emitAlignmentAssumption(PtrValue, Ptr,
3506 /*The expr loc is sufficient.*/ SourceLocation(),
3507 AlignmentCI, OffsetValue);
3508 return RValue::get(PtrValue);
3509 }
3510 case Builtin::BI__builtin_assume_dereferenceable: {
3511 const Expr *Ptr = E->getArg(0);
3512 const Expr *Size = E->getArg(1);
3513 Value *PtrValue = EmitScalarExpr(Ptr);
3514 Value *SizeValue = EmitScalarExpr(Size);
3515 if (SizeValue->getType() != IntPtrTy)
3516 SizeValue =
3517 Builder.CreateIntCast(SizeValue, IntPtrTy, false, "casted.size");
3518 Builder.CreateDereferenceableAssumption(PtrValue, SizeValue);
3519 return RValue::get(nullptr);
3520 }
3521 case Builtin::BI__assume:
3522 case Builtin::BI__builtin_assume: {
3523 if (E->getArg(0)->HasSideEffects(getContext()))
3524 return RValue::get(nullptr);
3525
3526 Value *ArgValue = EmitCheckedArgForAssume(E->getArg(0));
3527 Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
3528 Builder.CreateCall(FnAssume, ArgValue);
3529 return RValue::get(nullptr);
3530 }
3531 case Builtin::BI__builtin_assume_separate_storage: {
3532 const Expr *Arg0 = E->getArg(0);
3533 const Expr *Arg1 = E->getArg(1);
3534
3535 Value *Value0 = EmitScalarExpr(Arg0);
3536 Value *Value1 = EmitScalarExpr(Arg1);
3537
3538 Value *Values[] = {Value0, Value1};
3539 OperandBundleDefT<Value *> OBD("separate_storage", Values);
3540 Builder.CreateAssumption(ConstantInt::getTrue(getLLVMContext()), {OBD});
3541 return RValue::get(nullptr);
3542 }
3543 case Builtin::BI__builtin_allow_runtime_check: {
3544 StringRef Kind =
3545 cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts())->getString();
3546 LLVMContext &Ctx = CGM.getLLVMContext();
3547 llvm::Value *Allow = Builder.CreateCall(
3548 CGM.getIntrinsic(Intrinsic::allow_runtime_check),
3549 llvm::MetadataAsValue::get(Ctx, llvm::MDString::get(Ctx, Kind)));
3550 return RValue::get(Allow);
3551 }
3552 case Builtin::BI__arithmetic_fence: {
3553 // Create the builtin call if FastMath is selected, and the target
3554 // supports the builtin, otherwise just return the argument.
3555 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3556 llvm::FastMathFlags FMF = Builder.getFastMathFlags();
3557 bool isArithmeticFenceEnabled =
3558 FMF.allowReassoc() &&
3560 QualType ArgType = E->getArg(0)->getType();
3561 if (ArgType->isComplexType()) {
3562 if (isArithmeticFenceEnabled) {
3563 QualType ElementType = ArgType->castAs<ComplexType>()->getElementType();
3564 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3565 Value *Real = Builder.CreateArithmeticFence(ComplexVal.first,
3566 ConvertType(ElementType));
3567 Value *Imag = Builder.CreateArithmeticFence(ComplexVal.second,
3568 ConvertType(ElementType));
3569 return RValue::getComplex(std::make_pair(Real, Imag));
3570 }
3571 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3572 Value *Real = ComplexVal.first;
3573 Value *Imag = ComplexVal.second;
3574 return RValue::getComplex(std::make_pair(Real, Imag));
3575 }
3576 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3577 if (isArithmeticFenceEnabled)
3578 return RValue::get(
3579 Builder.CreateArithmeticFence(ArgValue, ConvertType(ArgType)));
3580 return RValue::get(ArgValue);
3581 }
3582 case Builtin::BI__builtin_bswapg: {
3583 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3584 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(ArgValue->getType());
3585 assert(IntTy && "LLVM's __builtin_bswapg only supports integer variants");
3586 if (IntTy->getBitWidth() == 1 || IntTy->getBitWidth() == 8)
3587 return RValue::get(ArgValue);
3588 assert(((IntTy->getBitWidth() % 16 == 0 && IntTy->getBitWidth() != 0)) &&
3589 "LLVM's __builtin_bswapg only supports integer variants that has a "
3590 "multiple of 16 bits as well as a single byte");
3591 return RValue::get(
3592 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bswap));
3593 }
3594 case Builtin::BI__builtin_bswap16:
3595 case Builtin::BI__builtin_bswap32:
3596 case Builtin::BI__builtin_bswap64:
3597 case Builtin::BI_byteswap_ushort:
3598 case Builtin::BI_byteswap_ulong:
3599 case Builtin::BI_byteswap_uint64: {
3600 return RValue::get(
3601 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bswap));
3602 }
3603 case Builtin::BI__builtin_bitreverse8:
3604 case Builtin::BI__builtin_bitreverse16:
3605 case Builtin::BI__builtin_bitreverse32:
3606 case Builtin::BI__builtin_bitreverse64: {
3607 return RValue::get(
3608 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bitreverse));
3609 }
3610 case Builtin::BI__builtin_rotateleft8:
3611 case Builtin::BI__builtin_rotateleft16:
3612 case Builtin::BI__builtin_rotateleft32:
3613 case Builtin::BI__builtin_rotateleft64:
3614 case Builtin::BI_rotl8: // Microsoft variants of rotate left
3615 case Builtin::BI_rotl16:
3616 case Builtin::BI_rotl:
3617 case Builtin::BI_lrotl:
3618 case Builtin::BI_rotl64:
3619 return emitRotate(E, false);
3620
3621 case Builtin::BI__builtin_rotateright8:
3622 case Builtin::BI__builtin_rotateright16:
3623 case Builtin::BI__builtin_rotateright32:
3624 case Builtin::BI__builtin_rotateright64:
3625 case Builtin::BI_rotr8: // Microsoft variants of rotate right
3626 case Builtin::BI_rotr16:
3627 case Builtin::BI_rotr:
3628 case Builtin::BI_lrotr:
3629 case Builtin::BI_rotr64:
3630 return emitRotate(E, true);
3631
3632 case Builtin::BI__builtin_constant_p: {
3633 llvm::Type *ResultType = ConvertType(E->getType());
3634
3635 const Expr *Arg = E->getArg(0);
3636 QualType ArgType = Arg->getType();
3637 // FIXME: The allowance for Obj-C pointers and block pointers is historical
3638 // and likely a mistake.
3639 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
3640 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
3641 // Per the GCC documentation, only numeric constants are recognized after
3642 // inlining.
3643 return RValue::get(ConstantInt::get(ResultType, 0));
3644
3645 if (Arg->HasSideEffects(getContext()))
3646 // The argument is unevaluated, so be conservative if it might have
3647 // side-effects.
3648 return RValue::get(ConstantInt::get(ResultType, 0));
3649
3650 Value *ArgValue = EmitScalarExpr(Arg);
3651 if (ArgType->isObjCObjectPointerType()) {
3652 // Convert Objective-C objects to id because we cannot distinguish between
3653 // LLVM types for Obj-C classes as they are opaque.
3654 ArgType = CGM.getContext().getObjCIdType();
3655 ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
3656 }
3657 Function *F =
3658 CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
3659 Value *Result = Builder.CreateCall(F, ArgValue);
3660 if (Result->getType() != ResultType)
3661 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
3662 return RValue::get(Result);
3663 }
3664 case Builtin::BI__builtin_dynamic_object_size:
3665 case Builtin::BI__builtin_object_size: {
3666 unsigned Type =
3667 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
3668 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
3669
3670 // We pass this builtin onto the optimizer so that it can figure out the
3671 // object size in more complex cases.
3672 bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
3673 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
3674 /*EmittedE=*/nullptr, IsDynamic));
3675 }
3676 case Builtin::BI__builtin_counted_by_ref: {
3677 // Default to returning '(void *) 0'.
3678 llvm::Value *Result = llvm::ConstantPointerNull::get(
3679 llvm::PointerType::getUnqual(getLLVMContext()));
3680
3681 const Expr *Arg = E->getArg(0)->IgnoreParenImpCasts();
3682
3683 if (auto *UO = dyn_cast<UnaryOperator>(Arg);
3684 UO && UO->getOpcode() == UO_AddrOf) {
3685 Arg = UO->getSubExpr()->IgnoreParenImpCasts();
3686
3687 if (auto *ASE = dyn_cast<ArraySubscriptExpr>(Arg))
3688 Arg = ASE->getBase()->IgnoreParenImpCasts();
3689 }
3690
3691 if (const MemberExpr *ME = dyn_cast_if_present<MemberExpr>(Arg)) {
3692 if (auto *CATy =
3694 CATy && CATy->getKind() == CountAttributedType::CountedBy) {
3695 const auto *MemberDecl = cast<FieldDecl>(ME->getMemberDecl());
3696 if (const FieldDecl *CountFD = MemberDecl->findCountedByField())
3697 Result = GetCountedByFieldExprGEP(Arg, MemberDecl, CountFD);
3698 else
3699 llvm::report_fatal_error("Cannot find the counted_by 'count' field");
3700 }
3701 }
3702
3703 return RValue::get(Result);
3704 }
3705 case Builtin::BI__builtin_prefetch: {
3706 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
3707 // FIXME: Technically these constants should of type 'int', yes?
3708 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
3709 llvm::ConstantInt::get(Int32Ty, 0);
3710 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
3711 llvm::ConstantInt::get(Int32Ty, 3);
3712 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
3713 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
3714 Builder.CreateCall(F, {Address, RW, Locality, Data});
3715 return RValue::get(nullptr);
3716 }
3717 case Builtin::BI__builtin_readcyclecounter: {
3718 Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
3719 return RValue::get(Builder.CreateCall(F));
3720 }
3721 case Builtin::BI__builtin_readsteadycounter: {
3722 Function *F = CGM.getIntrinsic(Intrinsic::readsteadycounter);
3723 return RValue::get(Builder.CreateCall(F));
3724 }
3725 case Builtin::BI__builtin___clear_cache: {
3726 Value *Begin = EmitScalarExpr(E->getArg(0));
3727 Value *End = EmitScalarExpr(E->getArg(1));
3728 Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
3729 return RValue::get(Builder.CreateCall(F, {Begin, End}));
3730 }
3731 case Builtin::BI__builtin_trap:
3732 EmitTrapCall(Intrinsic::trap);
3733 return RValue::get(nullptr);
3734 case Builtin::BI__builtin_verbose_trap: {
3735 llvm::DILocation *TrapLocation = Builder.getCurrentDebugLocation();
3736 if (getDebugInfo()) {
3737 TrapLocation = getDebugInfo()->CreateTrapFailureMessageFor(
3738 TrapLocation, *E->getArg(0)->tryEvaluateString(getContext()),
3740 }
3741 ApplyDebugLocation ApplyTrapDI(*this, TrapLocation);
3742 // Currently no attempt is made to prevent traps from being merged.
3743 EmitTrapCall(Intrinsic::trap);
3744 return RValue::get(nullptr);
3745 }
3746 case Builtin::BI__debugbreak:
3747 EmitTrapCall(Intrinsic::debugtrap);
3748 return RValue::get(nullptr);
3749 case Builtin::BI__builtin_unreachable: {
3751
3752 // We do need to preserve an insertion point.
3753 EmitBlock(createBasicBlock("unreachable.cont"));
3754
3755 return RValue::get(nullptr);
3756 }
3757
3758 case Builtin::BI__builtin_powi:
3759 case Builtin::BI__builtin_powif:
3760 case Builtin::BI__builtin_powil: {
3761 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
3762 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
3763
3764 if (Builder.getIsFPConstrained()) {
3765 // FIXME: llvm.powi has 2 mangling types,
3766 // llvm.experimental.constrained.powi has one.
3767 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3768 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi,
3769 Src0->getType());
3770 return RValue::get(Builder.CreateConstrainedFPCall(F, { Src0, Src1 }));
3771 }
3772
3773 Function *F = CGM.getIntrinsic(Intrinsic::powi,
3774 { Src0->getType(), Src1->getType() });
3775 return RValue::get(Builder.CreateCall(F, { Src0, Src1 }));
3776 }
3777 case Builtin::BI__builtin_frexpl: {
3778 // Linux PPC will not be adding additional PPCDoubleDouble support.
3779 // WIP to switch default to IEEE long double. Will emit libcall for
3780 // frexpl instead of legalizing this type in the BE.
3781 if (&getTarget().getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble())
3782 break;
3783 [[fallthrough]];
3784 }
3785 case Builtin::BI__builtin_frexp:
3786 case Builtin::BI__builtin_frexpf:
3787 case Builtin::BI__builtin_frexpf128:
3788 case Builtin::BI__builtin_frexpf16:
3789 return RValue::get(emitFrexpBuiltin(*this, E, Intrinsic::frexp));
3790 case Builtin::BImodf:
3791 case Builtin::BImodff:
3792 case Builtin::BImodfl:
3793 case Builtin::BI__builtin_modf:
3794 case Builtin::BI__builtin_modff:
3795 case Builtin::BI__builtin_modfl:
3796 if (Builder.getIsFPConstrained())
3797 break; // TODO: Emit constrained modf intrinsic once one exists.
3798 return RValue::get(emitModfBuiltin(*this, E, Intrinsic::modf));
3799 case Builtin::BI__builtin_isgreater:
3800 case Builtin::BI__builtin_isgreaterequal:
3801 case Builtin::BI__builtin_isless:
3802 case Builtin::BI__builtin_islessequal:
3803 case Builtin::BI__builtin_islessgreater:
3804 case Builtin::BI__builtin_isunordered: {
3805 // Ordered comparisons: we know the arguments to these are matching scalar
3806 // floating point values.
3807 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3808 Value *LHS = EmitScalarExpr(E->getArg(0));
3809 Value *RHS = EmitScalarExpr(E->getArg(1));
3810
3811 switch (BuiltinID) {
3812 default: llvm_unreachable("Unknown ordered comparison");
3813 case Builtin::BI__builtin_isgreater:
3814 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
3815 break;
3816 case Builtin::BI__builtin_isgreaterequal:
3817 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
3818 break;
3819 case Builtin::BI__builtin_isless:
3820 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
3821 break;
3822 case Builtin::BI__builtin_islessequal:
3823 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
3824 break;
3825 case Builtin::BI__builtin_islessgreater:
3826 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
3827 break;
3828 case Builtin::BI__builtin_isunordered:
3829 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
3830 break;
3831 }
3832 // ZExt bool to int type.
3833 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
3834 }
3835
3836 case Builtin::BI__builtin_isnan: {
3837 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3838 Value *V = EmitScalarExpr(E->getArg(0));
3839 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3840 return RValue::get(Result);
3841 return RValue::get(
3842 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNan),
3843 ConvertType(E->getType())));
3844 }
3845
3846 case Builtin::BI__builtin_issignaling: {
3847 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3848 Value *V = EmitScalarExpr(E->getArg(0));
3849 return RValue::get(
3850 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSNan),
3851 ConvertType(E->getType())));
3852 }
3853
3854 case Builtin::BI__builtin_isinf: {
3855 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3856 Value *V = EmitScalarExpr(E->getArg(0));
3857 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3858 return RValue::get(Result);
3859 return RValue::get(
3860 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcInf),
3861 ConvertType(E->getType())));
3862 }
3863
3864 case Builtin::BIfinite:
3865 case Builtin::BI__finite:
3866 case Builtin::BIfinitef:
3867 case Builtin::BI__finitef:
3868 case Builtin::BIfinitel:
3869 case Builtin::BI__finitel:
3870 case Builtin::BI__builtin_isfinite: {
3871 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3872 Value *V = EmitScalarExpr(E->getArg(0));
3873 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3874 return RValue::get(Result);
3875 return RValue::get(
3876 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcFinite),
3877 ConvertType(E->getType())));
3878 }
3879
3880 case Builtin::BI__builtin_isnormal: {
3881 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3882 Value *V = EmitScalarExpr(E->getArg(0));
3883 return RValue::get(
3884 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNormal),
3885 ConvertType(E->getType())));
3886 }
3887
3888 case Builtin::BI__builtin_issubnormal: {
3889 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3890 Value *V = EmitScalarExpr(E->getArg(0));
3891 return RValue::get(
3892 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSubnormal),
3893 ConvertType(E->getType())));
3894 }
3895
3896 case Builtin::BI__builtin_iszero: {
3897 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3898 Value *V = EmitScalarExpr(E->getArg(0));
3899 return RValue::get(
3900 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcZero),
3901 ConvertType(E->getType())));
3902 }
3903
3904 case Builtin::BI__builtin_isfpclass: {
3906 if (!E->getArg(1)->EvaluateAsInt(Result, CGM.getContext()))
3907 break;
3908 uint64_t Test = Result.Val.getInt().getLimitedValue();
3909 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3910 Value *V = EmitScalarExpr(E->getArg(0));
3911 return RValue::get(Builder.CreateZExt(Builder.createIsFPClass(V, Test),
3912 ConvertType(E->getType())));
3913 }
3914
3915 case Builtin::BI__builtin_nondeterministic_value: {
3916 llvm::Type *Ty = ConvertType(E->getArg(0)->getType());
3917
3918 Value *Result = PoisonValue::get(Ty);
3919 Result = Builder.CreateFreeze(Result);
3920
3921 return RValue::get(Result);
3922 }
3923
3924 case Builtin::BI__builtin_elementwise_abs: {
3925 Value *Result;
3926 QualType QT = E->getArg(0)->getType();
3927
3928 if (auto *VecTy = QT->getAs<VectorType>())
3929 QT = VecTy->getElementType();
3930 if (QT->isIntegerType())
3931 Result = Builder.CreateBinaryIntrinsic(
3932 Intrinsic::abs, EmitScalarExpr(E->getArg(0)), Builder.getFalse(),
3933 nullptr, "elt.abs");
3934 else
3935 Result = emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::fabs,
3936 "elt.abs");
3937
3938 return RValue::get(Result);
3939 }
3940 case Builtin::BI__builtin_elementwise_bitreverse:
3942 *this, E, Intrinsic::bitreverse, "elt.bitreverse"));
3943 case Builtin::BI__builtin_elementwise_popcount:
3945 *this, E, Intrinsic::ctpop, "elt.ctpop"));
3946 case Builtin::BI__builtin_elementwise_canonicalize:
3948 *this, E, Intrinsic::canonicalize, "elt.canonicalize"));
3949 case Builtin::BI__builtin_elementwise_copysign:
3950 return RValue::get(
3951 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::copysign));
3952 case Builtin::BI__builtin_elementwise_fshl:
3953 return RValue::get(
3954 emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fshl));
3955 case Builtin::BI__builtin_elementwise_fshr:
3956 return RValue::get(
3957 emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fshr));
3958
3959 case Builtin::BI__builtin_elementwise_add_sat:
3960 case Builtin::BI__builtin_elementwise_sub_sat: {
3961 Value *Op0 = EmitScalarExpr(E->getArg(0));
3962 Value *Op1 = EmitScalarExpr(E->getArg(1));
3963 Value *Result;
3964 assert(Op0->getType()->isIntOrIntVectorTy() && "integer type expected");
3965 QualType Ty = E->getArg(0)->getType();
3966 if (auto *VecTy = Ty->getAs<VectorType>())
3967 Ty = VecTy->getElementType();
3968 bool IsSigned = Ty->isSignedIntegerType();
3969 unsigned Opc;
3970 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_add_sat)
3971 Opc = IsSigned ? Intrinsic::sadd_sat : Intrinsic::uadd_sat;
3972 else
3973 Opc = IsSigned ? Intrinsic::ssub_sat : Intrinsic::usub_sat;
3974 Result = Builder.CreateBinaryIntrinsic(Opc, Op0, Op1, nullptr, "elt.sat");
3975 return RValue::get(Result);
3976 }
3977
3978 case Builtin::BI__builtin_elementwise_max: {
3979 Value *Op0 = EmitScalarExpr(E->getArg(0));
3980 Value *Op1 = EmitScalarExpr(E->getArg(1));
3981 Value *Result;
3982 if (Op0->getType()->isIntOrIntVectorTy()) {
3983 QualType Ty = E->getArg(0)->getType();
3984 if (auto *VecTy = Ty->getAs<VectorType>())
3985 Ty = VecTy->getElementType();
3986 Result = Builder.CreateBinaryIntrinsic(
3987 Ty->isSignedIntegerType() ? Intrinsic::smax : Intrinsic::umax, Op0,
3988 Op1, nullptr, "elt.max");
3989 } else
3990 Result = Builder.CreateMaxNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.max");
3991 return RValue::get(Result);
3992 }
3993 case Builtin::BI__builtin_elementwise_min: {
3994 Value *Op0 = EmitScalarExpr(E->getArg(0));
3995 Value *Op1 = EmitScalarExpr(E->getArg(1));
3996 Value *Result;
3997 if (Op0->getType()->isIntOrIntVectorTy()) {
3998 QualType Ty = E->getArg(0)->getType();
3999 if (auto *VecTy = Ty->getAs<VectorType>())
4000 Ty = VecTy->getElementType();
4001 Result = Builder.CreateBinaryIntrinsic(
4002 Ty->isSignedIntegerType() ? Intrinsic::smin : Intrinsic::umin, Op0,
4003 Op1, nullptr, "elt.min");
4004 } else
4005 Result = Builder.CreateMinNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.min");
4006 return RValue::get(Result);
4007 }
4008
4009 case Builtin::BI__builtin_elementwise_maxnum: {
4010 Value *Op0 = EmitScalarExpr(E->getArg(0));
4011 Value *Op1 = EmitScalarExpr(E->getArg(1));
4012 Value *Result = Builder.CreateBinaryIntrinsic(llvm::Intrinsic::maxnum, Op0,
4013 Op1, nullptr, "elt.maxnum");
4014 return RValue::get(Result);
4015 }
4016
4017 case Builtin::BI__builtin_elementwise_minnum: {
4018 Value *Op0 = EmitScalarExpr(E->getArg(0));
4019 Value *Op1 = EmitScalarExpr(E->getArg(1));
4020 Value *Result = Builder.CreateBinaryIntrinsic(llvm::Intrinsic::minnum, Op0,
4021 Op1, nullptr, "elt.minnum");
4022 return RValue::get(Result);
4023 }
4024
4025 case Builtin::BI__builtin_elementwise_maximum: {
4026 Value *Op0 = EmitScalarExpr(E->getArg(0));
4027 Value *Op1 = EmitScalarExpr(E->getArg(1));
4028 Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::maximum, Op0, Op1,
4029 nullptr, "elt.maximum");
4030 return RValue::get(Result);
4031 }
4032
4033 case Builtin::BI__builtin_elementwise_minimum: {
4034 Value *Op0 = EmitScalarExpr(E->getArg(0));
4035 Value *Op1 = EmitScalarExpr(E->getArg(1));
4036 Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::minimum, Op0, Op1,
4037 nullptr, "elt.minimum");
4038 return RValue::get(Result);
4039 }
4040
4041 case Builtin::BI__builtin_elementwise_maximumnum: {
4042 Value *Op0 = EmitScalarExpr(E->getArg(0));
4043 Value *Op1 = EmitScalarExpr(E->getArg(1));
4044 Value *Result = Builder.CreateBinaryIntrinsic(
4045 Intrinsic::maximumnum, Op0, Op1, nullptr, "elt.maximumnum");
4046 return RValue::get(Result);
4047 }
4048
4049 case Builtin::BI__builtin_elementwise_minimumnum: {
4050 Value *Op0 = EmitScalarExpr(E->getArg(0));
4051 Value *Op1 = EmitScalarExpr(E->getArg(1));
4052 Value *Result = Builder.CreateBinaryIntrinsic(
4053 Intrinsic::minimumnum, Op0, Op1, nullptr, "elt.minimumnum");
4054 return RValue::get(Result);
4055 }
4056
4057 case Builtin::BI__builtin_reduce_max: {
4058 auto GetIntrinsicID = [this](QualType QT) {
4059 if (auto *VecTy = QT->getAs<VectorType>())
4060 QT = VecTy->getElementType();
4061 else if (QT->isSizelessVectorType())
4062 QT = QT->getSizelessVectorEltType(CGM.getContext());
4063
4064 if (QT->isSignedIntegerType())
4065 return Intrinsic::vector_reduce_smax;
4066 if (QT->isUnsignedIntegerType())
4067 return Intrinsic::vector_reduce_umax;
4068 assert(QT->isFloatingType() && "must have a float here");
4069 return Intrinsic::vector_reduce_fmax;
4070 };
4072 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
4073 }
4074
4075 case Builtin::BI__builtin_reduce_min: {
4076 auto GetIntrinsicID = [this](QualType QT) {
4077 if (auto *VecTy = QT->getAs<VectorType>())
4078 QT = VecTy->getElementType();
4079 else if (QT->isSizelessVectorType())
4080 QT = QT->getSizelessVectorEltType(CGM.getContext());
4081
4082 if (QT->isSignedIntegerType())
4083 return Intrinsic::vector_reduce_smin;
4084 if (QT->isUnsignedIntegerType())
4085 return Intrinsic::vector_reduce_umin;
4086 assert(QT->isFloatingType() && "must have a float here");
4087 return Intrinsic::vector_reduce_fmin;
4088 };
4089
4091 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
4092 }
4093
4094 case Builtin::BI__builtin_reduce_add:
4096 *this, E, Intrinsic::vector_reduce_add, "rdx.add"));
4097 case Builtin::BI__builtin_reduce_mul:
4099 *this, E, Intrinsic::vector_reduce_mul, "rdx.mul"));
4100 case Builtin::BI__builtin_reduce_xor:
4102 *this, E, Intrinsic::vector_reduce_xor, "rdx.xor"));
4103 case Builtin::BI__builtin_reduce_or:
4105 *this, E, Intrinsic::vector_reduce_or, "rdx.or"));
4106 case Builtin::BI__builtin_reduce_and:
4108 *this, E, Intrinsic::vector_reduce_and, "rdx.and"));
4109 case Builtin::BI__builtin_reduce_maximum:
4111 *this, E, Intrinsic::vector_reduce_fmaximum, "rdx.maximum"));
4112 case Builtin::BI__builtin_reduce_minimum:
4114 *this, E, Intrinsic::vector_reduce_fminimum, "rdx.minimum"));
4115
4116 case Builtin::BI__builtin_matrix_transpose: {
4117 auto *MatrixTy = E->getArg(0)->getType()->castAs<ConstantMatrixType>();
4118 Value *MatValue = EmitScalarExpr(E->getArg(0));
4119 MatrixBuilder MB(Builder);
4120 Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
4121 MatrixTy->getNumColumns());
4122 return RValue::get(Result);
4123 }
4124
4125 case Builtin::BI__builtin_matrix_column_major_load: {
4126 MatrixBuilder MB(Builder);
4127 // Emit everything that isn't dependent on the first parameter type
4128 Value *Stride = EmitScalarExpr(E->getArg(3));
4129 const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
4130 auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
4131 assert(PtrTy && "arg0 must be of pointer type");
4132 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
4133
4136 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4137 0);
4138 Value *Result = MB.CreateColumnMajorLoad(
4139 Src.getElementType(), Src.emitRawPointer(*this),
4140 Align(Src.getAlignment().getQuantity()), Stride, IsVolatile,
4141 ResultTy->getNumRows(), ResultTy->getNumColumns(), "matrix");
4142 return RValue::get(Result);
4143 }
4144
4145 case Builtin::BI__builtin_matrix_column_major_store: {
4146 MatrixBuilder MB(Builder);
4147 Value *Matrix = EmitScalarExpr(E->getArg(0));
4149 Value *Stride = EmitScalarExpr(E->getArg(2));
4150
4151 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
4152 auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
4153 assert(PtrTy && "arg1 must be of pointer type");
4154 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
4155
4157 E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
4158 0);
4159 Value *Result = MB.CreateColumnMajorStore(
4160 Matrix, Dst.emitRawPointer(*this),
4161 Align(Dst.getAlignment().getQuantity()), Stride, IsVolatile,
4162 MatrixTy->getNumRows(), MatrixTy->getNumColumns());
4164 return RValue::get(Result);
4165 }
4166
4167 case Builtin::BI__builtin_masked_load:
4168 case Builtin::BI__builtin_masked_expand_load: {
4169 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4170 llvm::Value *Ptr = EmitScalarExpr(E->getArg(1));
4171
4172 llvm::Type *RetTy = CGM.getTypes().ConvertType(E->getType());
4173 llvm::Value *PassThru = llvm::PoisonValue::get(RetTy);
4174 if (E->getNumArgs() > 2)
4175 PassThru = EmitScalarExpr(E->getArg(2));
4176
4177 CharUnits Align = CGM.getNaturalTypeAlignment(
4178 E->getType()->getAs<VectorType>()->getElementType(), nullptr);
4179
4180 llvm::Value *Result;
4181 if (BuiltinID == Builtin::BI__builtin_masked_load) {
4182 Result = Builder.CreateMaskedLoad(RetTy, Ptr, Align.getAsAlign(), Mask,
4183 PassThru, "masked_load");
4184 } else {
4185 Function *F = CGM.getIntrinsic(Intrinsic::masked_expandload, {RetTy});
4186 Result =
4187 Builder.CreateCall(F, {Ptr, Mask, PassThru}, "masked_expand_load");
4188 }
4189 return RValue::get(Result);
4190 };
4191 case Builtin::BI__builtin_masked_gather: {
4192 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4193 llvm::Value *Idx = EmitScalarExpr(E->getArg(1));
4194 llvm::Value *Ptr = EmitScalarExpr(E->getArg(2));
4195
4196 llvm::Type *RetTy = CGM.getTypes().ConvertType(E->getType());
4197 CharUnits Align = CGM.getNaturalTypeAlignment(
4198 E->getType()->getAs<VectorType>()->getElementType(), nullptr);
4199
4200 llvm::Value *PassThru = llvm::PoisonValue::get(RetTy);
4201 if (E->getNumArgs() > 3)
4202 PassThru = EmitScalarExpr(E->getArg(3));
4203
4204 llvm::Type *ElemTy = CGM.getTypes().ConvertType(
4206 llvm::Value *PtrVec = Builder.CreateGEP(ElemTy, Ptr, Idx);
4207
4208 llvm::Value *Result = Builder.CreateMaskedGather(
4209 RetTy, PtrVec, Align.getAsAlign(), Mask, PassThru, "masked_gather");
4210 return RValue::get(Result);
4211 }
4212 case Builtin::BI__builtin_masked_store:
4213 case Builtin::BI__builtin_masked_compress_store: {
4214 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4215 llvm::Value *Val = EmitScalarExpr(E->getArg(1));
4216 llvm::Value *Ptr = EmitScalarExpr(E->getArg(2));
4217
4218 QualType ValTy = E->getArg(1)->getType();
4219 llvm::Type *ValLLTy = CGM.getTypes().ConvertType(ValTy);
4220
4221 CharUnits Align = CGM.getNaturalTypeAlignment(
4223 nullptr);
4224
4225 if (BuiltinID == Builtin::BI__builtin_masked_store) {
4226 Builder.CreateMaskedStore(Val, Ptr, Align.getAsAlign(), Mask);
4227 } else {
4228 llvm::Function *F =
4229 CGM.getIntrinsic(llvm::Intrinsic::masked_compressstore, {ValLLTy});
4230 Builder.CreateCall(F, {Val, Ptr, Mask});
4231 }
4232 return RValue::get(nullptr);
4233 }
4234 case Builtin::BI__builtin_masked_scatter: {
4235 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4236 llvm::Value *Idx = EmitScalarExpr(E->getArg(1));
4237 llvm::Value *Val = EmitScalarExpr(E->getArg(2));
4238 llvm::Value *Ptr = EmitScalarExpr(E->getArg(3));
4239
4240 CharUnits Align = CGM.getNaturalTypeAlignment(
4242 nullptr);
4243
4244 llvm::Type *ElemTy = CGM.getTypes().ConvertType(
4245 E->getArg(1)->getType()->getAs<VectorType>()->getElementType());
4246 llvm::Value *PtrVec = Builder.CreateGEP(ElemTy, Ptr, Idx);
4247
4248 Builder.CreateMaskedScatter(Val, PtrVec, Align.getAsAlign(), Mask);
4249 return RValue();
4250 }
4251 case Builtin::BI__builtin_isinf_sign: {
4252 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
4253 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4254 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
4255 Value *Arg = EmitScalarExpr(E->getArg(0));
4256 Value *AbsArg = EmitFAbs(*this, Arg);
4257 Value *IsInf = Builder.CreateFCmpOEQ(
4258 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
4259 Value *IsNeg = EmitSignBit(*this, Arg);
4260
4261 llvm::Type *IntTy = ConvertType(E->getType());
4262 Value *Zero = Constant::getNullValue(IntTy);
4263 Value *One = ConstantInt::get(IntTy, 1);
4264 Value *NegativeOne = ConstantInt::get(IntTy, -1);
4265 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
4266 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
4267 return RValue::get(Result);
4268 }
4269
4270 case Builtin::BI__builtin_flt_rounds: {
4271 Function *F = CGM.getIntrinsic(Intrinsic::get_rounding);
4272
4273 llvm::Type *ResultType = ConvertType(E->getType());
4274 Value *Result = Builder.CreateCall(F);
4275 if (Result->getType() != ResultType)
4276 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
4277 "cast");
4278 return RValue::get(Result);
4279 }
4280
4281 case Builtin::BI__builtin_set_flt_rounds: {
4282 Function *F = CGM.getIntrinsic(Intrinsic::set_rounding);
4283
4284 Value *V = EmitScalarExpr(E->getArg(0));
4285 Builder.CreateCall(F, V);
4286 return RValue::get(nullptr);
4287 }
4288
4289 case Builtin::BI__builtin_fpclassify: {
4290 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4291 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
4292 Value *V = EmitScalarExpr(E->getArg(5));
4293 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
4294
4295 // Create Result
4296 BasicBlock *Begin = Builder.GetInsertBlock();
4297 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
4298 Builder.SetInsertPoint(End);
4299 PHINode *Result =
4300 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
4301 "fpclassify_result");
4302
4303 // if (V==0) return FP_ZERO
4304 Builder.SetInsertPoint(Begin);
4305 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
4306 "iszero");
4307 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
4308 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
4309 Builder.CreateCondBr(IsZero, End, NotZero);
4310 Result->addIncoming(ZeroLiteral, Begin);
4311
4312 // if (V != V) return FP_NAN
4313 Builder.SetInsertPoint(NotZero);
4314 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
4315 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
4316 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
4317 Builder.CreateCondBr(IsNan, End, NotNan);
4318 Result->addIncoming(NanLiteral, NotZero);
4319
4320 // if (fabs(V) == infinity) return FP_INFINITY
4321 Builder.SetInsertPoint(NotNan);
4322 Value *VAbs = EmitFAbs(*this, V);
4323 Value *IsInf =
4324 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
4325 "isinf");
4326 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
4327 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
4328 Builder.CreateCondBr(IsInf, End, NotInf);
4329 Result->addIncoming(InfLiteral, NotNan);
4330
4331 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
4332 Builder.SetInsertPoint(NotInf);
4333 APFloat Smallest = APFloat::getSmallestNormalized(
4334 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
4335 Value *IsNormal =
4336 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
4337 "isnormal");
4338 Value *NormalResult =
4339 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
4340 EmitScalarExpr(E->getArg(3)));
4341 Builder.CreateBr(End);
4342 Result->addIncoming(NormalResult, NotInf);
4343
4344 // return Result
4345 Builder.SetInsertPoint(End);
4346 return RValue::get(Result);
4347 }
4348
4349 // An alloca will always return a pointer to the alloca (stack) address
4350 // space. This address space need not be the same as the AST / Language
4351 // default (e.g. in C / C++ auto vars are in the generic address space). At
4352 // the AST level this is handled within CreateTempAlloca et al., but for the
4353 // builtin / dynamic alloca we have to handle it here. We use an explicit cast
4354 // instead of passing an AS to CreateAlloca so as to not inhibit optimisation.
4355 case Builtin::BIalloca:
4356 case Builtin::BI_alloca:
4357 case Builtin::BI__builtin_alloca_uninitialized:
4358 case Builtin::BI__builtin_alloca: {
4359 Value *Size = EmitScalarExpr(E->getArg(0));
4360 const TargetInfo &TI = getContext().getTargetInfo();
4361 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
4362 const Align SuitableAlignmentInBytes =
4363 CGM.getContext()
4364 .toCharUnitsFromBits(TI.getSuitableAlign())
4365 .getAsAlign();
4366 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
4367 AI->setAlignment(SuitableAlignmentInBytes);
4368 if (BuiltinID != Builtin::BI__builtin_alloca_uninitialized)
4369 initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
4372 if (AAS != EAS) {
4373 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
4374 return RValue::get(
4375 getTargetHooks().performAddrSpaceCast(*this, AI, AAS, Ty));
4376 }
4377 return RValue::get(AI);
4378 }
4379
4380 case Builtin::BI__builtin_alloca_with_align_uninitialized:
4381 case Builtin::BI__builtin_alloca_with_align: {
4382 Value *Size = EmitScalarExpr(E->getArg(0));
4383 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
4384 auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
4385 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
4386 const Align AlignmentInBytes =
4387 CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
4388 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
4389 AI->setAlignment(AlignmentInBytes);
4390 if (BuiltinID != Builtin::BI__builtin_alloca_with_align_uninitialized)
4391 initializeAlloca(*this, AI, Size, AlignmentInBytes);
4394 if (AAS != EAS) {
4395 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
4396 return RValue::get(
4397 getTargetHooks().performAddrSpaceCast(*this, AI, AAS, Ty));
4398 }
4399 return RValue::get(AI);
4400 }
4401
4402 case Builtin::BI__builtin_infer_alloc_token: {
4403 llvm::MDNode *MDN = buildAllocToken(E);
4404 llvm::Value *MDV = MetadataAsValue::get(getLLVMContext(), MDN);
4405 llvm::Function *F =
4406 CGM.getIntrinsic(llvm::Intrinsic::alloc_token_id, {IntPtrTy});
4407 llvm::CallBase *TokenID = Builder.CreateCall(F, MDV);
4408 return RValue::get(TokenID);
4409 }
4410
4411 case Builtin::BIbzero:
4412 case Builtin::BI__builtin_bzero: {
4414 Value *SizeVal = EmitScalarExpr(E->getArg(1));
4415 EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
4416 E->getArg(0)->getExprLoc(), FD, 0);
4417 auto *I = Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
4418 addInstToNewSourceAtom(I, nullptr);
4419 return RValue::get(nullptr);
4420 }
4421
4422 case Builtin::BIbcopy:
4423 case Builtin::BI__builtin_bcopy: {
4426 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4428 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4429 0);
4431 E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
4432 0);
4433 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4434 addInstToNewSourceAtom(I, nullptr);
4435 return RValue::get(nullptr);
4436 }
4437
4438 case Builtin::BImemcpy:
4439 case Builtin::BI__builtin_memcpy:
4440 case Builtin::BImempcpy:
4441 case Builtin::BI__builtin_mempcpy: {
4444 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4445 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4446 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4447 auto *I = Builder.CreateMemCpy(Dest, Src, SizeVal, false);
4448 addInstToNewSourceAtom(I, nullptr);
4449 if (BuiltinID == Builtin::BImempcpy ||
4450 BuiltinID == Builtin::BI__builtin_mempcpy)
4451 return RValue::get(Builder.CreateInBoundsGEP(
4452 Dest.getElementType(), Dest.emitRawPointer(*this), SizeVal));
4453 else
4454 return RValue::get(Dest, *this);
4455 }
4456
4457 case Builtin::BI__builtin_memcpy_inline: {
4460 uint64_t Size =
4461 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
4462 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4463 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4464 auto *I = Builder.CreateMemCpyInline(Dest, Src, Size);
4465 addInstToNewSourceAtom(I, nullptr);
4466 return RValue::get(nullptr);
4467 }
4468
4469 case Builtin::BI__builtin_char_memchr:
4470 BuiltinID = Builtin::BI__builtin_memchr;
4471 break;
4472
4473 case Builtin::BI__builtin___memcpy_chk: {
4474 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
4475 Expr::EvalResult SizeResult, DstSizeResult;
4476 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4477 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4478 break;
4479 llvm::APSInt Size = SizeResult.Val.getInt();
4480 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4481 if (Size.ugt(DstSize))
4482 break;
4485 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4486 auto *I = Builder.CreateMemCpy(Dest, Src, SizeVal, false);
4487 addInstToNewSourceAtom(I, nullptr);
4488 return RValue::get(Dest, *this);
4489 }
4490
4491 case Builtin::BI__builtin_objc_memmove_collectable: {
4492 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
4493 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
4494 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4495 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
4496 DestAddr, SrcAddr, SizeVal);
4497 return RValue::get(DestAddr, *this);
4498 }
4499
4500 case Builtin::BI__builtin___memmove_chk: {
4501 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
4502 Expr::EvalResult SizeResult, DstSizeResult;
4503 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4504 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4505 break;
4506 llvm::APSInt Size = SizeResult.Val.getInt();
4507 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4508 if (Size.ugt(DstSize))
4509 break;
4512 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4513 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4514 addInstToNewSourceAtom(I, nullptr);
4515 return RValue::get(Dest, *this);
4516 }
4517
4518 case Builtin::BI__builtin_trivially_relocate:
4519 case Builtin::BImemmove:
4520 case Builtin::BI__builtin_memmove: {
4523 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4524 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_trivially_relocate)
4525 SizeVal = Builder.CreateMul(
4526 SizeVal,
4527 ConstantInt::get(
4528 SizeVal->getType(),
4529 getContext()
4530 .getTypeSizeInChars(E->getArg(0)->getType()->getPointeeType())
4531 .getQuantity()));
4532 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4533 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4534 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4535 addInstToNewSourceAtom(I, nullptr);
4536 return RValue::get(Dest, *this);
4537 }
4538 case Builtin::BImemset:
4539 case Builtin::BI__builtin_memset: {
4541 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
4542 Builder.getInt8Ty());
4543 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4544 EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
4545 E->getArg(0)->getExprLoc(), FD, 0);
4546 auto *I = Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
4547 addInstToNewSourceAtom(I, ByteVal);
4548 return RValue::get(Dest, *this);
4549 }
4550 case Builtin::BI__builtin_memset_inline: {
4552 Value *ByteVal =
4553 Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty());
4554 uint64_t Size =
4555 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
4557 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4558 0);
4559 auto *I = Builder.CreateMemSetInline(Dest, ByteVal, Size);
4560 addInstToNewSourceAtom(I, nullptr);
4561 return RValue::get(nullptr);
4562 }
4563 case Builtin::BI__builtin___memset_chk: {
4564 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
4565 Expr::EvalResult SizeResult, DstSizeResult;
4566 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4567 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4568 break;
4569 llvm::APSInt Size = SizeResult.Val.getInt();
4570 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4571 if (Size.ugt(DstSize))
4572 break;
4574 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
4575 Builder.getInt8Ty());
4576 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4577 auto *I = Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
4578 addInstToNewSourceAtom(I, nullptr);
4579 return RValue::get(Dest, *this);
4580 }
4581 case Builtin::BI__builtin_wmemchr: {
4582 // The MSVC runtime library does not provide a definition of wmemchr, so we
4583 // need an inline implementation.
4584 if (!getTarget().getTriple().isOSMSVCRT())
4585 break;
4586
4587 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
4588 Value *Str = EmitScalarExpr(E->getArg(0));
4589 Value *Chr = EmitScalarExpr(E->getArg(1));
4590 Value *Size = EmitScalarExpr(E->getArg(2));
4591
4592 BasicBlock *Entry = Builder.GetInsertBlock();
4593 BasicBlock *CmpEq = createBasicBlock("wmemchr.eq");
4594 BasicBlock *Next = createBasicBlock("wmemchr.next");
4595 BasicBlock *Exit = createBasicBlock("wmemchr.exit");
4596 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
4597 Builder.CreateCondBr(SizeEq0, Exit, CmpEq);
4598
4599 EmitBlock(CmpEq);
4600 PHINode *StrPhi = Builder.CreatePHI(Str->getType(), 2);
4601 StrPhi->addIncoming(Str, Entry);
4602 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
4603 SizePhi->addIncoming(Size, Entry);
4604 CharUnits WCharAlign =
4606 Value *StrCh = Builder.CreateAlignedLoad(WCharTy, StrPhi, WCharAlign);
4607 Value *FoundChr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 0);
4608 Value *StrEqChr = Builder.CreateICmpEQ(StrCh, Chr);
4609 Builder.CreateCondBr(StrEqChr, Exit, Next);
4610
4611 EmitBlock(Next);
4612 Value *NextStr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 1);
4613 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
4614 Value *NextSizeEq0 =
4615 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
4616 Builder.CreateCondBr(NextSizeEq0, Exit, CmpEq);
4617 StrPhi->addIncoming(NextStr, Next);
4618 SizePhi->addIncoming(NextSize, Next);
4619
4620 EmitBlock(Exit);
4621 PHINode *Ret = Builder.CreatePHI(Str->getType(), 3);
4622 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Entry);
4623 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Next);
4624 Ret->addIncoming(FoundChr, CmpEq);
4625 return RValue::get(Ret);
4626 }
4627 case Builtin::BI__builtin_wmemcmp: {
4628 // The MSVC runtime library does not provide a definition of wmemcmp, so we
4629 // need an inline implementation.
4630 if (!getTarget().getTriple().isOSMSVCRT())
4631 break;
4632
4633 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
4634
4635 Value *Dst = EmitScalarExpr(E->getArg(0));
4636 Value *Src = EmitScalarExpr(E->getArg(1));
4637 Value *Size = EmitScalarExpr(E->getArg(2));
4638
4639 BasicBlock *Entry = Builder.GetInsertBlock();
4640 BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
4641 BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
4642 BasicBlock *Next = createBasicBlock("wmemcmp.next");
4643 BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
4644 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
4645 Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
4646
4647 EmitBlock(CmpGT);
4648 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
4649 DstPhi->addIncoming(Dst, Entry);
4650 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
4651 SrcPhi->addIncoming(Src, Entry);
4652 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
4653 SizePhi->addIncoming(Size, Entry);
4654 CharUnits WCharAlign =
4656 Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
4657 Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
4658 Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
4659 Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
4660
4661 EmitBlock(CmpLT);
4662 Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
4663 Builder.CreateCondBr(DstLtSrc, Exit, Next);
4664
4665 EmitBlock(Next);
4666 Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
4667 Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
4668 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
4669 Value *NextSizeEq0 =
4670 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
4671 Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
4672 DstPhi->addIncoming(NextDst, Next);
4673 SrcPhi->addIncoming(NextSrc, Next);
4674 SizePhi->addIncoming(NextSize, Next);
4675
4676 EmitBlock(Exit);
4677 PHINode *Ret = Builder.CreatePHI(IntTy, 4);
4678 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
4679 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
4680 Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT);
4681 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
4682 return RValue::get(Ret);
4683 }
4684 case Builtin::BI__builtin_dwarf_cfa: {
4685 // The offset in bytes from the first argument to the CFA.
4686 //
4687 // Why on earth is this in the frontend? Is there any reason at
4688 // all that the backend can't reasonably determine this while
4689 // lowering llvm.eh.dwarf.cfa()?
4690 //
4691 // TODO: If there's a satisfactory reason, add a target hook for
4692 // this instead of hard-coding 0, which is correct for most targets.
4693 int32_t Offset = 0;
4694
4695 Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
4696 return RValue::get(Builder.CreateCall(F,
4697 llvm::ConstantInt::get(Int32Ty, Offset)));
4698 }
4699 case Builtin::BI__builtin_return_address: {
4700 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
4701 getContext().UnsignedIntTy);
4702 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
4703 return RValue::get(Builder.CreateCall(F, Depth));
4704 }
4705 case Builtin::BI_ReturnAddress: {
4706 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
4707 return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
4708 }
4709 case Builtin::BI__builtin_frame_address: {
4710 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
4711 getContext().UnsignedIntTy);
4712 Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
4713 return RValue::get(Builder.CreateCall(F, Depth));
4714 }
4715 case Builtin::BI__builtin_extract_return_addr: {
4718 return RValue::get(Result);
4719 }
4720 case Builtin::BI__builtin_frob_return_addr: {
4723 return RValue::get(Result);
4724 }
4725 case Builtin::BI__builtin_dwarf_sp_column: {
4726 llvm::IntegerType *Ty
4729 if (Column == -1) {
4730 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
4731 return RValue::get(llvm::UndefValue::get(Ty));
4732 }
4733 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
4734 }
4735 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
4737 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
4738 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
4739 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
4740 }
4741 case Builtin::BI__builtin_eh_return: {
4742 Value *Int = EmitScalarExpr(E->getArg(0));
4743 Value *Ptr = EmitScalarExpr(E->getArg(1));
4744
4745 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
4746 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
4747 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
4748 Function *F =
4749 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
4750 : Intrinsic::eh_return_i64);
4751 Builder.CreateCall(F, {Int, Ptr});
4752 Builder.CreateUnreachable();
4753
4754 // We do need to preserve an insertion point.
4755 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
4756
4757 return RValue::get(nullptr);
4758 }
4759 case Builtin::BI__builtin_unwind_init: {
4760 Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
4761 Builder.CreateCall(F);
4762 return RValue::get(nullptr);
4763 }
4764 case Builtin::BI__builtin_extend_pointer: {
4765 // Extends a pointer to the size of an _Unwind_Word, which is
4766 // uint64_t on all platforms. Generally this gets poked into a
4767 // register and eventually used as an address, so if the
4768 // addressing registers are wider than pointers and the platform
4769 // doesn't implicitly ignore high-order bits when doing
4770 // addressing, we need to make sure we zext / sext based on
4771 // the platform's expectations.
4772 //
4773 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
4774
4775 // Cast the pointer to intptr_t.
4776 Value *Ptr = EmitScalarExpr(E->getArg(0));
4777 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
4778
4779 // If that's 64 bits, we're done.
4780 if (IntPtrTy->getBitWidth() == 64)
4781 return RValue::get(Result);
4782
4783 // Otherwise, ask the codegen data what to do.
4784 if (getTargetHooks().extendPointerWithSExt())
4785 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
4786 else
4787 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
4788 }
4789 case Builtin::BI__builtin_setjmp: {
4790 // Buffer is a void**.
4792
4793 if (getTarget().getTriple().getArch() == llvm::Triple::systemz) {
4794 // On this target, the back end fills in the context buffer completely.
4795 // It doesn't really matter if the frontend stores to the buffer before
4796 // calling setjmp, the back-end is going to overwrite them anyway.
4797 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
4798 return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
4799 }
4800
4801 // Store the frame pointer to the setjmp buffer.
4802 Value *FrameAddr = Builder.CreateCall(
4803 CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
4804 ConstantInt::get(Int32Ty, 0));
4805 Builder.CreateStore(FrameAddr, Buf);
4806
4807 // Store the stack pointer to the setjmp buffer.
4808 Value *StackAddr = Builder.CreateStackSave();
4809 assert(Buf.emitRawPointer(*this)->getType() == StackAddr->getType());
4810
4811 Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
4812 Builder.CreateStore(StackAddr, StackSaveSlot);
4813
4814 // Call LLVM's EH setjmp, which is lightweight.
4815 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
4816 return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
4817 }
4818 case Builtin::BI__builtin_longjmp: {
4819 Value *Buf = EmitScalarExpr(E->getArg(0));
4820
4821 // Call LLVM's EH longjmp, which is lightweight.
4822 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
4823
4824 // longjmp doesn't return; mark this as unreachable.
4825 Builder.CreateUnreachable();
4826
4827 // We do need to preserve an insertion point.
4828 EmitBlock(createBasicBlock("longjmp.cont"));
4829
4830 return RValue::get(nullptr);
4831 }
4832 case Builtin::BI__builtin_launder: {
4833 const Expr *Arg = E->getArg(0);
4834 QualType ArgTy = Arg->getType()->getPointeeType();
4835 Value *Ptr = EmitScalarExpr(Arg);
4836 if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
4837 Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
4838
4839 return RValue::get(Ptr);
4840 }
4841 case Builtin::BI__sync_fetch_and_add:
4842 case Builtin::BI__sync_fetch_and_sub:
4843 case Builtin::BI__sync_fetch_and_or:
4844 case Builtin::BI__sync_fetch_and_and:
4845 case Builtin::BI__sync_fetch_and_xor:
4846 case Builtin::BI__sync_fetch_and_nand:
4847 case Builtin::BI__sync_add_and_fetch:
4848 case Builtin::BI__sync_sub_and_fetch:
4849 case Builtin::BI__sync_and_and_fetch:
4850 case Builtin::BI__sync_or_and_fetch:
4851 case Builtin::BI__sync_xor_and_fetch:
4852 case Builtin::BI__sync_nand_and_fetch:
4853 case Builtin::BI__sync_val_compare_and_swap:
4854 case Builtin::BI__sync_bool_compare_and_swap:
4855 case Builtin::BI__sync_lock_test_and_set:
4856 case Builtin::BI__sync_lock_release:
4857 case Builtin::BI__sync_swap:
4858 llvm_unreachable("Shouldn't make it through sema");
4859 case Builtin::BI__sync_fetch_and_add_1:
4860 case Builtin::BI__sync_fetch_and_add_2:
4861 case Builtin::BI__sync_fetch_and_add_4:
4862 case Builtin::BI__sync_fetch_and_add_8:
4863 case Builtin::BI__sync_fetch_and_add_16:
4864 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
4865 case Builtin::BI__sync_fetch_and_sub_1:
4866 case Builtin::BI__sync_fetch_and_sub_2:
4867 case Builtin::BI__sync_fetch_and_sub_4:
4868 case Builtin::BI__sync_fetch_and_sub_8:
4869 case Builtin::BI__sync_fetch_and_sub_16:
4870 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
4871 case Builtin::BI__sync_fetch_and_or_1:
4872 case Builtin::BI__sync_fetch_and_or_2:
4873 case Builtin::BI__sync_fetch_and_or_4:
4874 case Builtin::BI__sync_fetch_and_or_8:
4875 case Builtin::BI__sync_fetch_and_or_16:
4876 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
4877 case Builtin::BI__sync_fetch_and_and_1:
4878 case Builtin::BI__sync_fetch_and_and_2:
4879 case Builtin::BI__sync_fetch_and_and_4:
4880 case Builtin::BI__sync_fetch_and_and_8:
4881 case Builtin::BI__sync_fetch_and_and_16:
4882 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
4883 case Builtin::BI__sync_fetch_and_xor_1:
4884 case Builtin::BI__sync_fetch_and_xor_2:
4885 case Builtin::BI__sync_fetch_and_xor_4:
4886 case Builtin::BI__sync_fetch_and_xor_8:
4887 case Builtin::BI__sync_fetch_and_xor_16:
4888 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
4889 case Builtin::BI__sync_fetch_and_nand_1:
4890 case Builtin::BI__sync_fetch_and_nand_2:
4891 case Builtin::BI__sync_fetch_and_nand_4:
4892 case Builtin::BI__sync_fetch_and_nand_8:
4893 case Builtin::BI__sync_fetch_and_nand_16:
4894 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
4895
4896 // Clang extensions: not overloaded yet.
4897 case Builtin::BI__sync_fetch_and_min:
4898 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
4899 case Builtin::BI__sync_fetch_and_max:
4900 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
4901 case Builtin::BI__sync_fetch_and_umin:
4902 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
4903 case Builtin::BI__sync_fetch_and_umax:
4904 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
4905
4906 case Builtin::BI__sync_add_and_fetch_1:
4907 case Builtin::BI__sync_add_and_fetch_2:
4908 case Builtin::BI__sync_add_and_fetch_4:
4909 case Builtin::BI__sync_add_and_fetch_8:
4910 case Builtin::BI__sync_add_and_fetch_16:
4911 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
4912 llvm::Instruction::Add);
4913 case Builtin::BI__sync_sub_and_fetch_1:
4914 case Builtin::BI__sync_sub_and_fetch_2:
4915 case Builtin::BI__sync_sub_and_fetch_4:
4916 case Builtin::BI__sync_sub_and_fetch_8:
4917 case Builtin::BI__sync_sub_and_fetch_16:
4918 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
4919 llvm::Instruction::Sub);
4920 case Builtin::BI__sync_and_and_fetch_1:
4921 case Builtin::BI__sync_and_and_fetch_2:
4922 case Builtin::BI__sync_and_and_fetch_4:
4923 case Builtin::BI__sync_and_and_fetch_8:
4924 case Builtin::BI__sync_and_and_fetch_16:
4925 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
4926 llvm::Instruction::And);
4927 case Builtin::BI__sync_or_and_fetch_1:
4928 case Builtin::BI__sync_or_and_fetch_2:
4929 case Builtin::BI__sync_or_and_fetch_4:
4930 case Builtin::BI__sync_or_and_fetch_8:
4931 case Builtin::BI__sync_or_and_fetch_16:
4932 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
4933 llvm::Instruction::Or);
4934 case Builtin::BI__sync_xor_and_fetch_1:
4935 case Builtin::BI__sync_xor_and_fetch_2:
4936 case Builtin::BI__sync_xor_and_fetch_4:
4937 case Builtin::BI__sync_xor_and_fetch_8:
4938 case Builtin::BI__sync_xor_and_fetch_16:
4939 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
4940 llvm::Instruction::Xor);
4941 case Builtin::BI__sync_nand_and_fetch_1:
4942 case Builtin::BI__sync_nand_and_fetch_2:
4943 case Builtin::BI__sync_nand_and_fetch_4:
4944 case Builtin::BI__sync_nand_and_fetch_8:
4945 case Builtin::BI__sync_nand_and_fetch_16:
4946 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
4947 llvm::Instruction::And, true);
4948
4949 case Builtin::BI__sync_val_compare_and_swap_1:
4950 case Builtin::BI__sync_val_compare_and_swap_2:
4951 case Builtin::BI__sync_val_compare_and_swap_4:
4952 case Builtin::BI__sync_val_compare_and_swap_8:
4953 case Builtin::BI__sync_val_compare_and_swap_16:
4954 return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
4955
4956 case Builtin::BI__sync_bool_compare_and_swap_1:
4957 case Builtin::BI__sync_bool_compare_and_swap_2:
4958 case Builtin::BI__sync_bool_compare_and_swap_4:
4959 case Builtin::BI__sync_bool_compare_and_swap_8:
4960 case Builtin::BI__sync_bool_compare_and_swap_16:
4961 return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
4962
4963 case Builtin::BI__sync_swap_1:
4964 case Builtin::BI__sync_swap_2:
4965 case Builtin::BI__sync_swap_4:
4966 case Builtin::BI__sync_swap_8:
4967 case Builtin::BI__sync_swap_16:
4968 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
4969
4970 case Builtin::BI__sync_lock_test_and_set_1:
4971 case Builtin::BI__sync_lock_test_and_set_2:
4972 case Builtin::BI__sync_lock_test_and_set_4:
4973 case Builtin::BI__sync_lock_test_and_set_8:
4974 case Builtin::BI__sync_lock_test_and_set_16:
4975 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
4976
4977 case Builtin::BI__sync_lock_release_1:
4978 case Builtin::BI__sync_lock_release_2:
4979 case Builtin::BI__sync_lock_release_4:
4980 case Builtin::BI__sync_lock_release_8:
4981 case Builtin::BI__sync_lock_release_16: {
4982 Address Ptr = CheckAtomicAlignment(*this, E);
4983 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
4984
4985 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
4986 getContext().getTypeSize(ElTy));
4987 llvm::StoreInst *Store =
4988 Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
4989 Store->setAtomic(llvm::AtomicOrdering::Release);
4990 return RValue::get(nullptr);
4991 }
4992
4993 case Builtin::BI__sync_synchronize: {
4994 // We assume this is supposed to correspond to a C++0x-style
4995 // sequentially-consistent fence (i.e. this is only usable for
4996 // synchronization, not device I/O or anything like that). This intrinsic
4997 // is really badly designed in the sense that in theory, there isn't
4998 // any way to safely use it... but in practice, it mostly works
4999 // to use it with non-atomic loads and stores to get acquire/release
5000 // semantics.
5001 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
5002 return RValue::get(nullptr);
5003 }
5004
5005 case Builtin::BI__builtin_nontemporal_load:
5006 return RValue::get(EmitNontemporalLoad(*this, E));
5007 case Builtin::BI__builtin_nontemporal_store:
5008 return RValue::get(EmitNontemporalStore(*this, E));
5009 case Builtin::BI__c11_atomic_is_lock_free:
5010 case Builtin::BI__atomic_is_lock_free: {
5011 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
5012 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
5013 // _Atomic(T) is always properly-aligned.
5014 const char *LibCallName = "__atomic_is_lock_free";
5015 CallArgList Args;
5016 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
5017 getContext().getSizeType());
5018 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
5019 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
5021 else
5022 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
5024 const CGFunctionInfo &FuncInfo =
5025 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
5026 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
5027 llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
5028 return EmitCall(FuncInfo, CGCallee::forDirect(Func),
5029 ReturnValueSlot(), Args);
5030 }
5031
5032 case Builtin::BI__atomic_thread_fence:
5033 case Builtin::BI__atomic_signal_fence:
5034 case Builtin::BI__c11_atomic_thread_fence:
5035 case Builtin::BI__c11_atomic_signal_fence: {
5036 llvm::SyncScope::ID SSID;
5037 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
5038 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
5039 SSID = llvm::SyncScope::SingleThread;
5040 else
5041 SSID = llvm::SyncScope::System;
5042 Value *Order = EmitScalarExpr(E->getArg(0));
5043 if (isa<llvm::ConstantInt>(Order)) {
5044 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
5045 switch (ord) {
5046 case 0: // memory_order_relaxed
5047 default: // invalid order
5048 break;
5049 case 1: // memory_order_consume
5050 case 2: // memory_order_acquire
5051 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
5052 break;
5053 case 3: // memory_order_release
5054 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
5055 break;
5056 case 4: // memory_order_acq_rel
5057 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
5058 break;
5059 case 5: // memory_order_seq_cst
5060 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
5061 break;
5062 }
5063 return RValue::get(nullptr);
5064 }
5065
5066 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
5067 AcquireBB = createBasicBlock("acquire", CurFn);
5068 ReleaseBB = createBasicBlock("release", CurFn);
5069 AcqRelBB = createBasicBlock("acqrel", CurFn);
5070 SeqCstBB = createBasicBlock("seqcst", CurFn);
5071 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
5072
5073 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
5074 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
5075
5076 Builder.SetInsertPoint(AcquireBB);
5077 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
5078 Builder.CreateBr(ContBB);
5079 SI->addCase(Builder.getInt32(1), AcquireBB);
5080 SI->addCase(Builder.getInt32(2), AcquireBB);
5081
5082 Builder.SetInsertPoint(ReleaseBB);
5083 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
5084 Builder.CreateBr(ContBB);
5085 SI->addCase(Builder.getInt32(3), ReleaseBB);
5086
5087 Builder.SetInsertPoint(AcqRelBB);
5088 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
5089 Builder.CreateBr(ContBB);
5090 SI->addCase(Builder.getInt32(4), AcqRelBB);
5091
5092 Builder.SetInsertPoint(SeqCstBB);
5093 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
5094 Builder.CreateBr(ContBB);
5095 SI->addCase(Builder.getInt32(5), SeqCstBB);
5096
5097 Builder.SetInsertPoint(ContBB);
5098 return RValue::get(nullptr);
5099 }
5100 case Builtin::BI__scoped_atomic_thread_fence: {
5102
5103 Value *Order = EmitScalarExpr(E->getArg(0));
5104 Value *Scope = EmitScalarExpr(E->getArg(1));
5105 auto Ord = dyn_cast<llvm::ConstantInt>(Order);
5106 auto Scp = dyn_cast<llvm::ConstantInt>(Scope);
5107 if (Ord && Scp) {
5108 SyncScope SS = ScopeModel->isValid(Scp->getZExtValue())
5109 ? ScopeModel->map(Scp->getZExtValue())
5110 : ScopeModel->map(ScopeModel->getFallBackValue());
5111 switch (Ord->getZExtValue()) {
5112 case 0: // memory_order_relaxed
5113 default: // invalid order
5114 break;
5115 case 1: // memory_order_consume
5116 case 2: // memory_order_acquire
5117 Builder.CreateFence(
5118 llvm::AtomicOrdering::Acquire,
5119 getTargetHooks().getLLVMSyncScopeID(getLangOpts(), SS,
5120 llvm::AtomicOrdering::Acquire,
5121 getLLVMContext()));
5122 break;
5123 case 3: // memory_order_release
5124 Builder.CreateFence(
5125 llvm::AtomicOrdering::Release,
5126 getTargetHooks().getLLVMSyncScopeID(getLangOpts(), SS,
5127 llvm::AtomicOrdering::Release,
5128 getLLVMContext()));
5129 break;
5130 case 4: // memory_order_acq_rel
5131 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease,
5132 getTargetHooks().getLLVMSyncScopeID(
5133 getLangOpts(), SS,
5134 llvm::AtomicOrdering::AcquireRelease,
5135 getLLVMContext()));
5136 break;
5137 case 5: // memory_order_seq_cst
5138 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
5139 getTargetHooks().getLLVMSyncScopeID(
5140 getLangOpts(), SS,
5141 llvm::AtomicOrdering::SequentiallyConsistent,
5142 getLLVMContext()));
5143 break;
5144 }
5145 return RValue::get(nullptr);
5146 }
5147
5148 llvm::BasicBlock *ContBB = createBasicBlock("atomic.scope.continue", CurFn);
5149
5151 OrderBBs;
5152 if (Ord) {
5153 switch (Ord->getZExtValue()) {
5154 case 0: // memory_order_relaxed
5155 default: // invalid order
5156 ContBB->eraseFromParent();
5157 return RValue::get(nullptr);
5158 case 1: // memory_order_consume
5159 case 2: // memory_order_acquire
5160 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5161 llvm::AtomicOrdering::Acquire);
5162 break;
5163 case 3: // memory_order_release
5164 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5165 llvm::AtomicOrdering::Release);
5166 break;
5167 case 4: // memory_order_acq_rel
5168 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5169 llvm::AtomicOrdering::AcquireRelease);
5170 break;
5171 case 5: // memory_order_seq_cst
5172 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5173 llvm::AtomicOrdering::SequentiallyConsistent);
5174 break;
5175 }
5176 } else {
5177 llvm::BasicBlock *AcquireBB = createBasicBlock("acquire", CurFn);
5178 llvm::BasicBlock *ReleaseBB = createBasicBlock("release", CurFn);
5179 llvm::BasicBlock *AcqRelBB = createBasicBlock("acqrel", CurFn);
5180 llvm::BasicBlock *SeqCstBB = createBasicBlock("seqcst", CurFn);
5181
5182 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
5183 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
5184 SI->addCase(Builder.getInt32(1), AcquireBB);
5185 SI->addCase(Builder.getInt32(2), AcquireBB);
5186 SI->addCase(Builder.getInt32(3), ReleaseBB);
5187 SI->addCase(Builder.getInt32(4), AcqRelBB);
5188 SI->addCase(Builder.getInt32(5), SeqCstBB);
5189
5190 OrderBBs.emplace_back(AcquireBB, llvm::AtomicOrdering::Acquire);
5191 OrderBBs.emplace_back(ReleaseBB, llvm::AtomicOrdering::Release);
5192 OrderBBs.emplace_back(AcqRelBB, llvm::AtomicOrdering::AcquireRelease);
5193 OrderBBs.emplace_back(SeqCstBB,
5194 llvm::AtomicOrdering::SequentiallyConsistent);
5195 }
5196
5197 for (auto &[OrderBB, Ordering] : OrderBBs) {
5198 Builder.SetInsertPoint(OrderBB);
5199 if (Scp) {
5200 SyncScope SS = ScopeModel->isValid(Scp->getZExtValue())
5201 ? ScopeModel->map(Scp->getZExtValue())
5202 : ScopeModel->map(ScopeModel->getFallBackValue());
5203 Builder.CreateFence(Ordering,
5204 getTargetHooks().getLLVMSyncScopeID(
5205 getLangOpts(), SS, Ordering, getLLVMContext()));
5206 Builder.CreateBr(ContBB);
5207 } else {
5208 llvm::DenseMap<unsigned, llvm::BasicBlock *> BBs;
5209 for (unsigned Scp : ScopeModel->getRuntimeValues())
5210 BBs[Scp] = createBasicBlock(getAsString(ScopeModel->map(Scp)), CurFn);
5211
5212 auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
5213 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, ContBB);
5214 for (unsigned Scp : ScopeModel->getRuntimeValues()) {
5215 auto *B = BBs[Scp];
5216 SI->addCase(Builder.getInt32(Scp), B);
5217
5218 Builder.SetInsertPoint(B);
5219 Builder.CreateFence(Ordering, getTargetHooks().getLLVMSyncScopeID(
5220 getLangOpts(), ScopeModel->map(Scp),
5221 Ordering, getLLVMContext()));
5222 Builder.CreateBr(ContBB);
5223 }
5224 }
5225 }
5226
5227 Builder.SetInsertPoint(ContBB);
5228 return RValue::get(nullptr);
5229 }
5230
5231 case Builtin::BI__builtin_signbit:
5232 case Builtin::BI__builtin_signbitf:
5233 case Builtin::BI__builtin_signbitl: {
5234 return RValue::get(
5235 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
5236 ConvertType(E->getType())));
5237 }
5238 case Builtin::BI__warn_memset_zero_len:
5239 return RValue::getIgnored();
5240 case Builtin::BI__annotation: {
5241 // Re-encode each wide string to UTF8 and make an MDString.
5243 for (const Expr *Arg : E->arguments()) {
5244 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
5245 assert(Str->getCharByteWidth() == 2);
5246 StringRef WideBytes = Str->getBytes();
5247 std::string StrUtf8;
5248 if (!convertUTF16ToUTF8String(
5249 ArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
5250 CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
5251 continue;
5252 }
5253 Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
5254 }
5255
5256 // Build and MDTuple of MDStrings and emit the intrinsic call.
5257 llvm::Function *F = CGM.getIntrinsic(Intrinsic::codeview_annotation, {});
5258 MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
5259 Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
5260 return RValue::getIgnored();
5261 }
5262 case Builtin::BI__builtin_annotation: {
5263 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
5264 llvm::Function *F = CGM.getIntrinsic(
5265 Intrinsic::annotation, {AnnVal->getType(), CGM.ConstGlobalsPtrTy});
5266
5267 // Get the annotation string, go through casts. Sema requires this to be a
5268 // non-wide string literal, potentially casted, so the cast<> is safe.
5269 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
5270 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
5271 return RValue::get(
5272 EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr));
5273 }
5274 case Builtin::BI__builtin_addcb:
5275 case Builtin::BI__builtin_addcs:
5276 case Builtin::BI__builtin_addc:
5277 case Builtin::BI__builtin_addcl:
5278 case Builtin::BI__builtin_addcll:
5279 case Builtin::BI__builtin_subcb:
5280 case Builtin::BI__builtin_subcs:
5281 case Builtin::BI__builtin_subc:
5282 case Builtin::BI__builtin_subcl:
5283 case Builtin::BI__builtin_subcll: {
5284
5285 // We translate all of these builtins from expressions of the form:
5286 // int x = ..., y = ..., carryin = ..., carryout, result;
5287 // result = __builtin_addc(x, y, carryin, &carryout);
5288 //
5289 // to LLVM IR of the form:
5290 //
5291 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
5292 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
5293 // %carry1 = extractvalue {i32, i1} %tmp1, 1
5294 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
5295 // i32 %carryin)
5296 // %result = extractvalue {i32, i1} %tmp2, 0
5297 // %carry2 = extractvalue {i32, i1} %tmp2, 1
5298 // %tmp3 = or i1 %carry1, %carry2
5299 // %tmp4 = zext i1 %tmp3 to i32
5300 // store i32 %tmp4, i32* %carryout
5301
5302 // Scalarize our inputs.
5303 llvm::Value *X = EmitScalarExpr(E->getArg(0));
5304 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
5305 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
5306 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
5307
5308 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
5309 Intrinsic::ID IntrinsicId;
5310 switch (BuiltinID) {
5311 default: llvm_unreachable("Unknown multiprecision builtin id.");
5312 case Builtin::BI__builtin_addcb:
5313 case Builtin::BI__builtin_addcs:
5314 case Builtin::BI__builtin_addc:
5315 case Builtin::BI__builtin_addcl:
5316 case Builtin::BI__builtin_addcll:
5317 IntrinsicId = Intrinsic::uadd_with_overflow;
5318 break;
5319 case Builtin::BI__builtin_subcb:
5320 case Builtin::BI__builtin_subcs:
5321 case Builtin::BI__builtin_subc:
5322 case Builtin::BI__builtin_subcl:
5323 case Builtin::BI__builtin_subcll:
5324 IntrinsicId = Intrinsic::usub_with_overflow;
5325 break;
5326 }
5327
5328 // Construct our resulting LLVM IR expression.
5329 llvm::Value *Carry1;
5330 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
5331 X, Y, Carry1);
5332 llvm::Value *Carry2;
5333 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
5334 Sum1, Carryin, Carry2);
5335 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
5336 X->getType());
5337 Builder.CreateStore(CarryOut, CarryOutPtr);
5338 return RValue::get(Sum2);
5339 }
5340
5341 case Builtin::BI__builtin_add_overflow:
5342 case Builtin::BI__builtin_sub_overflow:
5343 case Builtin::BI__builtin_mul_overflow: {
5344 const clang::Expr *LeftArg = E->getArg(0);
5345 const clang::Expr *RightArg = E->getArg(1);
5346 const clang::Expr *ResultArg = E->getArg(2);
5347
5348 clang::QualType ResultQTy =
5349 ResultArg->getType()->castAs<PointerType>()->getPointeeType();
5350
5351 WidthAndSignedness LeftInfo =
5352 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
5353 WidthAndSignedness RightInfo =
5354 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
5355 WidthAndSignedness ResultInfo =
5356 getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
5357
5358 // Handle mixed-sign multiplication as a special case, because adding
5359 // runtime or backend support for our generic irgen would be too expensive.
5360 if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
5361 return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
5362 RightInfo, ResultArg, ResultQTy,
5363 ResultInfo);
5364
5365 if (isSpecialUnsignedMultiplySignedResult(BuiltinID, LeftInfo, RightInfo,
5366 ResultInfo))
5368 *this, LeftArg, LeftInfo, RightArg, RightInfo, ResultArg, ResultQTy,
5369 ResultInfo);
5370
5371 WidthAndSignedness EncompassingInfo =
5372 EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
5373
5374 llvm::Type *EncompassingLLVMTy =
5375 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
5376
5377 llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
5378
5379 Intrinsic::ID IntrinsicId;
5380 switch (BuiltinID) {
5381 default:
5382 llvm_unreachable("Unknown overflow builtin id.");
5383 case Builtin::BI__builtin_add_overflow:
5384 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::sadd_with_overflow
5385 : Intrinsic::uadd_with_overflow;
5386 break;
5387 case Builtin::BI__builtin_sub_overflow:
5388 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::ssub_with_overflow
5389 : Intrinsic::usub_with_overflow;
5390 break;
5391 case Builtin::BI__builtin_mul_overflow:
5392 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::smul_with_overflow
5393 : Intrinsic::umul_with_overflow;
5394 break;
5395 }
5396
5397 llvm::Value *Left = EmitScalarExpr(LeftArg);
5398 llvm::Value *Right = EmitScalarExpr(RightArg);
5399 Address ResultPtr = EmitPointerWithAlignment(ResultArg);
5400
5401 // Extend each operand to the encompassing type.
5402 Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
5403 Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
5404
5405 // Perform the operation on the extended values.
5406 llvm::Value *Overflow, *Result;
5407 Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
5408
5409 if (EncompassingInfo.Width > ResultInfo.Width) {
5410 // The encompassing type is wider than the result type, so we need to
5411 // truncate it.
5412 llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
5413
5414 // To see if the truncation caused an overflow, we will extend
5415 // the result and then compare it to the original result.
5416 llvm::Value *ResultTruncExt = Builder.CreateIntCast(
5417 ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
5418 llvm::Value *TruncationOverflow =
5419 Builder.CreateICmpNE(Result, ResultTruncExt);
5420
5421 Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
5422 Result = ResultTrunc;
5423 }
5424
5425 // Finally, store the result using the pointer.
5426 bool isVolatile =
5427 ResultArg->getType()->getPointeeType().isVolatileQualified();
5428 Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
5429
5430 return RValue::get(Overflow);
5431 }
5432
5433 case Builtin::BI__builtin_uadd_overflow:
5434 case Builtin::BI__builtin_uaddl_overflow:
5435 case Builtin::BI__builtin_uaddll_overflow:
5436 case Builtin::BI__builtin_usub_overflow:
5437 case Builtin::BI__builtin_usubl_overflow:
5438 case Builtin::BI__builtin_usubll_overflow:
5439 case Builtin::BI__builtin_umul_overflow:
5440 case Builtin::BI__builtin_umull_overflow:
5441 case Builtin::BI__builtin_umulll_overflow:
5442 case Builtin::BI__builtin_sadd_overflow:
5443 case Builtin::BI__builtin_saddl_overflow:
5444 case Builtin::BI__builtin_saddll_overflow:
5445 case Builtin::BI__builtin_ssub_overflow:
5446 case Builtin::BI__builtin_ssubl_overflow:
5447 case Builtin::BI__builtin_ssubll_overflow:
5448 case Builtin::BI__builtin_smul_overflow:
5449 case Builtin::BI__builtin_smull_overflow:
5450 case Builtin::BI__builtin_smulll_overflow: {
5451
5452 // We translate all of these builtins directly to the relevant llvm IR node.
5453
5454 // Scalarize our inputs.
5455 llvm::Value *X = EmitScalarExpr(E->getArg(0));
5456 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
5457 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
5458
5459 // Decide which of the overflow intrinsics we are lowering to:
5460 Intrinsic::ID IntrinsicId;
5461 switch (BuiltinID) {
5462 default: llvm_unreachable("Unknown overflow builtin id.");
5463 case Builtin::BI__builtin_uadd_overflow:
5464 case Builtin::BI__builtin_uaddl_overflow:
5465 case Builtin::BI__builtin_uaddll_overflow:
5466 IntrinsicId = Intrinsic::uadd_with_overflow;
5467 break;
5468 case Builtin::BI__builtin_usub_overflow:
5469 case Builtin::BI__builtin_usubl_overflow:
5470 case Builtin::BI__builtin_usubll_overflow:
5471 IntrinsicId = Intrinsic::usub_with_overflow;
5472 break;
5473 case Builtin::BI__builtin_umul_overflow:
5474 case Builtin::BI__builtin_umull_overflow:
5475 case Builtin::BI__builtin_umulll_overflow:
5476 IntrinsicId = Intrinsic::umul_with_overflow;
5477 break;
5478 case Builtin::BI__builtin_sadd_overflow:
5479 case Builtin::BI__builtin_saddl_overflow:
5480 case Builtin::BI__builtin_saddll_overflow:
5481 IntrinsicId = Intrinsic::sadd_with_overflow;
5482 break;
5483 case Builtin::BI__builtin_ssub_overflow:
5484 case Builtin::BI__builtin_ssubl_overflow:
5485 case Builtin::BI__builtin_ssubll_overflow:
5486 IntrinsicId = Intrinsic::ssub_with_overflow;
5487 break;
5488 case Builtin::BI__builtin_smul_overflow:
5489 case Builtin::BI__builtin_smull_overflow:
5490 case Builtin::BI__builtin_smulll_overflow:
5491 IntrinsicId = Intrinsic::smul_with_overflow;
5492 break;
5493 }
5494
5495
5496 llvm::Value *Carry;
5497 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
5498 Builder.CreateStore(Sum, SumOutPtr);
5499
5500 return RValue::get(Carry);
5501 }
5502 case Builtin::BIaddressof:
5503 case Builtin::BI__addressof:
5504 case Builtin::BI__builtin_addressof:
5505 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
5506 case Builtin::BI__builtin_function_start:
5507 return RValue::get(CGM.GetFunctionStart(
5508 E->getArg(0)->getAsBuiltinConstantDeclRef(CGM.getContext())));
5509 case Builtin::BI__builtin_operator_new:
5511 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
5512 case Builtin::BI__builtin_operator_delete:
5514 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
5515 return RValue::get(nullptr);
5516
5517 case Builtin::BI__builtin_is_aligned:
5518 return EmitBuiltinIsAligned(E);
5519 case Builtin::BI__builtin_align_up:
5520 return EmitBuiltinAlignTo(E, true);
5521 case Builtin::BI__builtin_align_down:
5522 return EmitBuiltinAlignTo(E, false);
5523
5524 case Builtin::BI__noop:
5525 // __noop always evaluates to an integer literal zero.
5526 return RValue::get(ConstantInt::get(IntTy, 0));
5527 case Builtin::BI__builtin_call_with_static_chain: {
5528 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
5529 const Expr *Chain = E->getArg(1);
5530 return EmitCall(Call->getCallee()->getType(),
5531 EmitCallee(Call->getCallee()), Call, ReturnValue,
5532 EmitScalarExpr(Chain));
5533 }
5534 case Builtin::BI_InterlockedExchange8:
5535 case Builtin::BI_InterlockedExchange16:
5536 case Builtin::BI_InterlockedExchange:
5537 case Builtin::BI_InterlockedExchangePointer:
5538 return RValue::get(
5540 case Builtin::BI_InterlockedCompareExchangePointer:
5541 return RValue::get(
5543 case Builtin::BI_InterlockedCompareExchangePointer_nf:
5544 return RValue::get(
5546 case Builtin::BI_InterlockedCompareExchange8:
5547 case Builtin::BI_InterlockedCompareExchange16:
5548 case Builtin::BI_InterlockedCompareExchange:
5549 case Builtin::BI_InterlockedCompareExchange64:
5550 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
5551 case Builtin::BI_InterlockedIncrement16:
5552 case Builtin::BI_InterlockedIncrement:
5553 return RValue::get(
5555 case Builtin::BI_InterlockedDecrement16:
5556 case Builtin::BI_InterlockedDecrement:
5557 return RValue::get(
5559 case Builtin::BI_InterlockedAnd8:
5560 case Builtin::BI_InterlockedAnd16:
5561 case Builtin::BI_InterlockedAnd:
5563 case Builtin::BI_InterlockedExchangeAdd8:
5564 case Builtin::BI_InterlockedExchangeAdd16:
5565 case Builtin::BI_InterlockedExchangeAdd:
5566 return RValue::get(
5568 case Builtin::BI_InterlockedExchangeSub8:
5569 case Builtin::BI_InterlockedExchangeSub16:
5570 case Builtin::BI_InterlockedExchangeSub:
5571 return RValue::get(
5573 case Builtin::BI_InterlockedOr8:
5574 case Builtin::BI_InterlockedOr16:
5575 case Builtin::BI_InterlockedOr:
5577 case Builtin::BI_InterlockedXor8:
5578 case Builtin::BI_InterlockedXor16:
5579 case Builtin::BI_InterlockedXor:
5581
5582 case Builtin::BI_bittest64:
5583 case Builtin::BI_bittest:
5584 case Builtin::BI_bittestandcomplement64:
5585 case Builtin::BI_bittestandcomplement:
5586 case Builtin::BI_bittestandreset64:
5587 case Builtin::BI_bittestandreset:
5588 case Builtin::BI_bittestandset64:
5589 case Builtin::BI_bittestandset:
5590 case Builtin::BI_interlockedbittestandreset:
5591 case Builtin::BI_interlockedbittestandreset64:
5592 case Builtin::BI_interlockedbittestandreset64_acq:
5593 case Builtin::BI_interlockedbittestandreset64_rel:
5594 case Builtin::BI_interlockedbittestandreset64_nf:
5595 case Builtin::BI_interlockedbittestandset64:
5596 case Builtin::BI_interlockedbittestandset64_acq:
5597 case Builtin::BI_interlockedbittestandset64_rel:
5598 case Builtin::BI_interlockedbittestandset64_nf:
5599 case Builtin::BI_interlockedbittestandset:
5600 case Builtin::BI_interlockedbittestandset_acq:
5601 case Builtin::BI_interlockedbittestandset_rel:
5602 case Builtin::BI_interlockedbittestandset_nf:
5603 case Builtin::BI_interlockedbittestandreset_acq:
5604 case Builtin::BI_interlockedbittestandreset_rel:
5605 case Builtin::BI_interlockedbittestandreset_nf:
5606 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
5607
5608 // These builtins exist to emit regular volatile loads and stores not
5609 // affected by the -fms-volatile setting.
5610 case Builtin::BI__iso_volatile_load8:
5611 case Builtin::BI__iso_volatile_load16:
5612 case Builtin::BI__iso_volatile_load32:
5613 case Builtin::BI__iso_volatile_load64:
5614 return RValue::get(EmitISOVolatileLoad(*this, E));
5615 case Builtin::BI__iso_volatile_store8:
5616 case Builtin::BI__iso_volatile_store16:
5617 case Builtin::BI__iso_volatile_store32:
5618 case Builtin::BI__iso_volatile_store64:
5619 return RValue::get(EmitISOVolatileStore(*this, E));
5620
5621 case Builtin::BI__builtin_ptrauth_sign_constant:
5622 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
5623
5624 case Builtin::BI__builtin_ptrauth_auth:
5625 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5626 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5627 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5628 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5629 case Builtin::BI__builtin_ptrauth_strip: {
5630 // Emit the arguments.
5632 for (auto argExpr : E->arguments())
5633 Args.push_back(EmitScalarExpr(argExpr));
5634
5635 // Cast the value to intptr_t, saving its original type.
5636 llvm::Type *OrigValueType = Args[0]->getType();
5637 if (OrigValueType->isPointerTy())
5638 Args[0] = Builder.CreatePtrToInt(Args[0], IntPtrTy);
5639
5640 switch (BuiltinID) {
5641 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5642 if (Args[4]->getType()->isPointerTy())
5643 Args[4] = Builder.CreatePtrToInt(Args[4], IntPtrTy);
5644 [[fallthrough]];
5645
5646 case Builtin::BI__builtin_ptrauth_auth:
5647 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5648 if (Args[2]->getType()->isPointerTy())
5649 Args[2] = Builder.CreatePtrToInt(Args[2], IntPtrTy);
5650 break;
5651
5652 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5653 if (Args[1]->getType()->isPointerTy())
5654 Args[1] = Builder.CreatePtrToInt(Args[1], IntPtrTy);
5655 break;
5656
5657 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5658 case Builtin::BI__builtin_ptrauth_strip:
5659 break;
5660 }
5661
5662 // Call the intrinsic.
5663 auto IntrinsicID = [&]() -> unsigned {
5664 switch (BuiltinID) {
5665 case Builtin::BI__builtin_ptrauth_auth:
5666 return Intrinsic::ptrauth_auth;
5667 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5668 return Intrinsic::ptrauth_resign;
5669 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5670 return Intrinsic::ptrauth_blend;
5671 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5672 return Intrinsic::ptrauth_sign_generic;
5673 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5674 return Intrinsic::ptrauth_sign;
5675 case Builtin::BI__builtin_ptrauth_strip:
5676 return Intrinsic::ptrauth_strip;
5677 }
5678 llvm_unreachable("bad ptrauth intrinsic");
5679 }();
5680 auto Intrinsic = CGM.getIntrinsic(IntrinsicID);
5681 llvm::Value *Result = EmitRuntimeCall(Intrinsic, Args);
5682
5683 if (BuiltinID != Builtin::BI__builtin_ptrauth_sign_generic_data &&
5684 BuiltinID != Builtin::BI__builtin_ptrauth_blend_discriminator &&
5685 OrigValueType->isPointerTy()) {
5686 Result = Builder.CreateIntToPtr(Result, OrigValueType);
5687 }
5688 return RValue::get(Result);
5689 }
5690
5691 case Builtin::BI__builtin_get_vtable_pointer: {
5692 const Expr *Target = E->getArg(0);
5693 QualType TargetType = Target->getType();
5694 const CXXRecordDecl *Decl = TargetType->getPointeeCXXRecordDecl();
5695 assert(Decl);
5696 auto ThisAddress = EmitPointerWithAlignment(Target);
5697 assert(ThisAddress.isValid());
5698 llvm::Value *VTablePointer =
5700 return RValue::get(VTablePointer);
5701 }
5702
5703 case Builtin::BI__exception_code:
5704 case Builtin::BI_exception_code:
5706 case Builtin::BI__exception_info:
5707 case Builtin::BI_exception_info:
5709 case Builtin::BI__abnormal_termination:
5710 case Builtin::BI_abnormal_termination:
5712 case Builtin::BI_setjmpex:
5713 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
5714 E->getArg(0)->getType()->isPointerType())
5715 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
5716 break;
5717 case Builtin::BI_setjmp:
5718 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
5719 E->getArg(0)->getType()->isPointerType()) {
5720 if (getTarget().getTriple().getArch() == llvm::Triple::x86)
5721 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
5722 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
5723 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
5724 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
5725 }
5726 break;
5727
5728 // C++ std:: builtins.
5729 case Builtin::BImove:
5730 case Builtin::BImove_if_noexcept:
5731 case Builtin::BIforward:
5732 case Builtin::BIforward_like:
5733 case Builtin::BIas_const:
5734 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
5735 case Builtin::BI__GetExceptionInfo: {
5736 if (llvm::GlobalVariable *GV =
5737 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
5738 return RValue::get(GV);
5739 break;
5740 }
5741
5742 case Builtin::BI__fastfail:
5744
5745 case Builtin::BI__builtin_coro_id:
5746 return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
5747 case Builtin::BI__builtin_coro_promise:
5748 return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
5749 case Builtin::BI__builtin_coro_resume:
5750 EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
5751 return RValue::get(nullptr);
5752 case Builtin::BI__builtin_coro_frame:
5753 return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
5754 case Builtin::BI__builtin_coro_noop:
5755 return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
5756 case Builtin::BI__builtin_coro_free:
5757 return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
5758 case Builtin::BI__builtin_coro_destroy:
5759 EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
5760 return RValue::get(nullptr);
5761 case Builtin::BI__builtin_coro_done:
5762 return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
5763 case Builtin::BI__builtin_coro_alloc:
5764 return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
5765 case Builtin::BI__builtin_coro_begin:
5766 return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
5767 case Builtin::BI__builtin_coro_end:
5768 return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
5769 case Builtin::BI__builtin_coro_suspend:
5770 return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
5771 case Builtin::BI__builtin_coro_size:
5772 return EmitCoroutineIntrinsic(E, Intrinsic::coro_size);
5773 case Builtin::BI__builtin_coro_align:
5774 return EmitCoroutineIntrinsic(E, Intrinsic::coro_align);
5775
5776 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
5777 case Builtin::BIread_pipe:
5778 case Builtin::BIwrite_pipe: {
5779 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5780 *Arg1 = EmitScalarExpr(E->getArg(1));
5781 CGOpenCLRuntime OpenCLRT(CGM);
5782 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5783 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5784
5785 // Type of the generic packet parameter.
5786 unsigned GenericAS =
5788 llvm::Type *I8PTy = llvm::PointerType::get(getLLVMContext(), GenericAS);
5789
5790 // Testing which overloaded version we should generate the call for.
5791 if (2U == E->getNumArgs()) {
5792 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
5793 : "__write_pipe_2";
5794 // Creating a generic function type to be able to call with any builtin or
5795 // user defined type.
5796 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
5797 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5798 Value *ACast = Builder.CreateAddrSpaceCast(Arg1, I8PTy);
5799 return RValue::get(
5800 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5801 {Arg0, ACast, PacketSize, PacketAlign}));
5802 } else {
5803 assert(4 == E->getNumArgs() &&
5804 "Illegal number of parameters to pipe function");
5805 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
5806 : "__write_pipe_4";
5807
5808 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
5809 Int32Ty, Int32Ty};
5810 Value *Arg2 = EmitScalarExpr(E->getArg(2)),
5811 *Arg3 = EmitScalarExpr(E->getArg(3));
5812 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5813 Value *ACast = Builder.CreateAddrSpaceCast(Arg3, I8PTy);
5814 // We know the third argument is an integer type, but we may need to cast
5815 // it to i32.
5816 if (Arg2->getType() != Int32Ty)
5817 Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
5818 return RValue::get(
5819 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5820 {Arg0, Arg1, Arg2, ACast, PacketSize, PacketAlign}));
5821 }
5822 }
5823 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
5824 // functions
5825 case Builtin::BIreserve_read_pipe:
5826 case Builtin::BIreserve_write_pipe:
5827 case Builtin::BIwork_group_reserve_read_pipe:
5828 case Builtin::BIwork_group_reserve_write_pipe:
5829 case Builtin::BIsub_group_reserve_read_pipe:
5830 case Builtin::BIsub_group_reserve_write_pipe: {
5831 // Composing the mangled name for the function.
5832 const char *Name;
5833 if (BuiltinID == Builtin::BIreserve_read_pipe)
5834 Name = "__reserve_read_pipe";
5835 else if (BuiltinID == Builtin::BIreserve_write_pipe)
5836 Name = "__reserve_write_pipe";
5837 else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
5838 Name = "__work_group_reserve_read_pipe";
5839 else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
5840 Name = "__work_group_reserve_write_pipe";
5841 else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
5842 Name = "__sub_group_reserve_read_pipe";
5843 else
5844 Name = "__sub_group_reserve_write_pipe";
5845
5846 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5847 *Arg1 = EmitScalarExpr(E->getArg(1));
5848 llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
5849 CGOpenCLRuntime OpenCLRT(CGM);
5850 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5851 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5852
5853 // Building the generic function prototype.
5854 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
5855 llvm::FunctionType *FTy =
5856 llvm::FunctionType::get(ReservedIDTy, ArgTys, false);
5857 // We know the second argument is an integer type, but we may need to cast
5858 // it to i32.
5859 if (Arg1->getType() != Int32Ty)
5860 Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
5861 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5862 {Arg0, Arg1, PacketSize, PacketAlign}));
5863 }
5864 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
5865 // functions
5866 case Builtin::BIcommit_read_pipe:
5867 case Builtin::BIcommit_write_pipe:
5868 case Builtin::BIwork_group_commit_read_pipe:
5869 case Builtin::BIwork_group_commit_write_pipe:
5870 case Builtin::BIsub_group_commit_read_pipe:
5871 case Builtin::BIsub_group_commit_write_pipe: {
5872 const char *Name;
5873 if (BuiltinID == Builtin::BIcommit_read_pipe)
5874 Name = "__commit_read_pipe";
5875 else if (BuiltinID == Builtin::BIcommit_write_pipe)
5876 Name = "__commit_write_pipe";
5877 else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
5878 Name = "__work_group_commit_read_pipe";
5879 else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
5880 Name = "__work_group_commit_write_pipe";
5881 else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
5882 Name = "__sub_group_commit_read_pipe";
5883 else
5884 Name = "__sub_group_commit_write_pipe";
5885
5886 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5887 *Arg1 = EmitScalarExpr(E->getArg(1));
5888 CGOpenCLRuntime OpenCLRT(CGM);
5889 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5890 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5891
5892 // Building the generic function prototype.
5893 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
5894 llvm::FunctionType *FTy = llvm::FunctionType::get(
5895 llvm::Type::getVoidTy(getLLVMContext()), ArgTys, false);
5896
5897 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5898 {Arg0, Arg1, PacketSize, PacketAlign}));
5899 }
5900 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
5901 case Builtin::BIget_pipe_num_packets:
5902 case Builtin::BIget_pipe_max_packets: {
5903 const char *BaseName;
5904 const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
5905 if (BuiltinID == Builtin::BIget_pipe_num_packets)
5906 BaseName = "__get_pipe_num_packets";
5907 else
5908 BaseName = "__get_pipe_max_packets";
5909 std::string Name = std::string(BaseName) +
5910 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
5911
5912 // Building the generic function prototype.
5913 Value *Arg0 = EmitScalarExpr(E->getArg(0));
5914 CGOpenCLRuntime OpenCLRT(CGM);
5915 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5916 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5917 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
5918 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5919
5920 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5921 {Arg0, PacketSize, PacketAlign}));
5922 }
5923
5924 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
5925 case Builtin::BIto_global:
5926 case Builtin::BIto_local:
5927 case Builtin::BIto_private: {
5928 auto Arg0 = EmitScalarExpr(E->getArg(0));
5929 auto NewArgT = llvm::PointerType::get(
5931 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
5932 auto NewRetT = llvm::PointerType::get(
5934 CGM.getContext().getTargetAddressSpace(
5936 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
5937 llvm::Value *NewArg;
5938 if (Arg0->getType()->getPointerAddressSpace() !=
5939 NewArgT->getPointerAddressSpace())
5940 NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
5941 else
5942 NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
5943 auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
5944 auto NewCall =
5945 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
5946 return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
5947 ConvertType(E->getType())));
5948 }
5949
5950 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
5951 // Table 6.13.17.1 specifies four overload forms of enqueue_kernel.
5952 // The code below expands the builtin call to a call to one of the following
5953 // functions that an OpenCL runtime library will have to provide:
5954 // __enqueue_kernel_basic
5955 // __enqueue_kernel_varargs
5956 // __enqueue_kernel_basic_events
5957 // __enqueue_kernel_events_varargs
5958 case Builtin::BIenqueue_kernel: {
5959 StringRef Name; // Generated function call name
5960 unsigned NumArgs = E->getNumArgs();
5961
5962 llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
5963 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
5964 getContext().getTargetAddressSpace(LangAS::opencl_generic));
5965
5966 llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
5967 llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
5968 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
5969 llvm::Value *Range = NDRangeL.getAddress().emitRawPointer(*this);
5970
5971 // FIXME: Look through the addrspacecast which may exist to the stack
5972 // temporary as a hack.
5973 //
5974 // This is hardcoding the assumed ABI of the target function. This assumes
5975 // direct passing for every argument except NDRange, which is assumed to be
5976 // byval or byref indirect passed.
5977 //
5978 // This should be fixed to query a signature from CGOpenCLRuntime, and go
5979 // through EmitCallArgs to get the correct target ABI.
5980 Range = Range->stripPointerCasts();
5981
5982 llvm::Type *RangePtrTy = Range->getType();
5983
5984 if (NumArgs == 4) {
5985 // The most basic form of the call with parameters:
5986 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
5987 Name = "__enqueue_kernel_basic";
5988 llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangePtrTy, GenericVoidPtrTy,
5989 GenericVoidPtrTy};
5990 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5991
5992 auto Info =
5993 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
5994 llvm::Value *Kernel =
5995 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
5996 llvm::Value *Block =
5997 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
5998
5999 auto RTCall = EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6000 {Queue, Flags, Range, Kernel, Block});
6001 return RValue::get(RTCall);
6002 }
6003 assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
6004
6005 // Create a temporary array to hold the sizes of local pointer arguments
6006 // for the block. \p First is the position of the first size argument.
6007 auto CreateArrayForSizeVar =
6008 [=](unsigned First) -> std::pair<llvm::Value *, llvm::Value *> {
6009 llvm::APInt ArraySize(32, NumArgs - First);
6011 getContext().getSizeType(), ArraySize, nullptr,
6013 /*IndexTypeQuals=*/0);
6014 auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
6015 llvm::Value *TmpPtr = Tmp.getPointer();
6016 // The EmitLifetime* pair expect a naked Alloca as their last argument,
6017 // however for cases where the default AS is not the Alloca AS, Tmp is
6018 // actually the Alloca ascasted to the default AS, hence the
6019 // stripPointerCasts()
6020 llvm::Value *Alloca = TmpPtr->stripPointerCasts();
6021 llvm::Value *ElemPtr;
6022 EmitLifetimeStart(Alloca);
6023 // Each of the following arguments specifies the size of the corresponding
6024 // argument passed to the enqueued block.
6025 auto *Zero = llvm::ConstantInt::get(IntTy, 0);
6026 for (unsigned I = First; I < NumArgs; ++I) {
6027 auto *Index = llvm::ConstantInt::get(IntTy, I - First);
6028 auto *GEP =
6029 Builder.CreateGEP(Tmp.getElementType(), Alloca, {Zero, Index});
6030 if (I == First)
6031 ElemPtr = GEP;
6032 auto *V =
6033 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
6034 Builder.CreateAlignedStore(
6035 V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
6036 }
6037 // Return the Alloca itself rather than a potential ascast as this is only
6038 // used by the paired EmitLifetimeEnd.
6039 return {ElemPtr, Alloca};
6040 };
6041
6042 // Could have events and/or varargs.
6043 if (E->getArg(3)->getType()->isBlockPointerType()) {
6044 // No events passed, but has variadic arguments.
6045 Name = "__enqueue_kernel_varargs";
6046 auto Info =
6047 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
6048 llvm::Value *Kernel =
6049 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6050 auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6051 auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(4);
6052
6053 // Create a vector of the arguments, as well as a constant value to
6054 // express to the runtime the number of variadic arguments.
6055 llvm::Value *const Args[] = {Queue, Flags,
6056 Range, Kernel,
6057 Block, ConstantInt::get(IntTy, NumArgs - 4),
6058 ElemPtr};
6059 llvm::Type *const ArgTys[] = {
6060 QueueTy, IntTy, RangePtrTy, GenericVoidPtrTy,
6061 GenericVoidPtrTy, IntTy, ElemPtr->getType()};
6062
6063 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6064 auto Call = RValue::get(
6065 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6066 EmitLifetimeEnd(TmpPtr);
6067 return Call;
6068 }
6069 // Any calls now have event arguments passed.
6070 if (NumArgs >= 7) {
6071 llvm::PointerType *PtrTy = llvm::PointerType::get(
6072 CGM.getLLVMContext(),
6073 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
6074
6075 llvm::Value *NumEvents =
6076 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
6077
6078 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
6079 // to be a null pointer constant (including `0` literal), we can take it
6080 // into account and emit null pointer directly.
6081 llvm::Value *EventWaitList = nullptr;
6082 if (E->getArg(4)->isNullPointerConstant(
6084 EventWaitList = llvm::ConstantPointerNull::get(PtrTy);
6085 } else {
6086 EventWaitList =
6087 E->getArg(4)->getType()->isArrayType()
6089 : EmitScalarExpr(E->getArg(4));
6090 // Convert to generic address space.
6091 EventWaitList = Builder.CreatePointerCast(EventWaitList, PtrTy);
6092 }
6093 llvm::Value *EventRet = nullptr;
6094 if (E->getArg(5)->isNullPointerConstant(
6096 EventRet = llvm::ConstantPointerNull::get(PtrTy);
6097 } else {
6098 EventRet =
6099 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), PtrTy);
6100 }
6101
6102 auto Info =
6103 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
6104 llvm::Value *Kernel =
6105 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6106 llvm::Value *Block =
6107 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6108
6109 std::vector<llvm::Type *> ArgTys = {
6110 QueueTy, Int32Ty, RangePtrTy, Int32Ty,
6111 PtrTy, PtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
6112
6113 std::vector<llvm::Value *> Args = {Queue, Flags, Range,
6114 NumEvents, EventWaitList, EventRet,
6115 Kernel, Block};
6116
6117 if (NumArgs == 7) {
6118 // Has events but no variadics.
6119 Name = "__enqueue_kernel_basic_events";
6120 llvm::FunctionType *FTy =
6121 llvm::FunctionType::get(Int32Ty, ArgTys, false);
6122 return RValue::get(
6123 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6124 }
6125 // Has event info and variadics
6126 // Pass the number of variadics to the runtime function too.
6127 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
6128 ArgTys.push_back(Int32Ty);
6129 Name = "__enqueue_kernel_events_varargs";
6130
6131 auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(7);
6132 Args.push_back(ElemPtr);
6133 ArgTys.push_back(ElemPtr->getType());
6134
6135 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6136 auto Call = RValue::get(
6137 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6138 EmitLifetimeEnd(TmpPtr);
6139 return Call;
6140 }
6141 llvm_unreachable("Unexpected enqueue_kernel signature");
6142 }
6143 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
6144 // parameter.
6145 case Builtin::BIget_kernel_work_group_size: {
6146 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6147 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6148 auto Info =
6149 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
6150 Value *Kernel =
6151 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6152 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6154 CGM.CreateRuntimeFunction(
6155 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
6156 false),
6157 "__get_kernel_work_group_size_impl"),
6158 {Kernel, Arg}));
6159 }
6160 case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
6161 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6162 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6163 auto Info =
6164 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
6165 Value *Kernel =
6166 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6167 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6169 CGM.CreateRuntimeFunction(
6170 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
6171 false),
6172 "__get_kernel_preferred_work_group_size_multiple_impl"),
6173 {Kernel, Arg}));
6174 }
6175 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
6176 case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
6177 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6178 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6179 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
6180 llvm::Value *NDRange = NDRangeL.getAddress().emitRawPointer(*this);
6181 auto Info =
6182 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
6183 Value *Kernel =
6184 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6185 Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6186 const char *Name =
6187 BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
6188 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
6189 : "__get_kernel_sub_group_count_for_ndrange_impl";
6191 CGM.CreateRuntimeFunction(
6192 llvm::FunctionType::get(
6193 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
6194 false),
6195 Name),
6196 {NDRange, Kernel, Block}));
6197 }
6198 case Builtin::BI__builtin_store_half:
6199 case Builtin::BI__builtin_store_halff: {
6200 Value *Val = EmitScalarExpr(E->getArg(0));
6202 Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
6203 Builder.CreateStore(HalfVal, Address);
6204 return RValue::get(nullptr);
6205 }
6206 case Builtin::BI__builtin_load_half: {
6208 Value *HalfVal = Builder.CreateLoad(Address);
6209 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
6210 }
6211 case Builtin::BI__builtin_load_halff: {
6213 Value *HalfVal = Builder.CreateLoad(Address);
6214 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
6215 }
6216 case Builtin::BI__builtin_printf:
6217 case Builtin::BIprintf:
6218 if (getTarget().getTriple().isNVPTX() ||
6219 getTarget().getTriple().isAMDGCN() ||
6220 (getTarget().getTriple().isSPIRV() &&
6221 getTarget().getTriple().getVendor() == Triple::VendorType::AMD)) {
6222 if (getTarget().getTriple().isNVPTX())
6224 if ((getTarget().getTriple().isAMDGCN() ||
6225 getTarget().getTriple().isSPIRV()) &&
6226 getLangOpts().HIP)
6228 }
6229
6230 break;
6231 case Builtin::BI__builtin_canonicalize:
6232 case Builtin::BI__builtin_canonicalizef:
6233 case Builtin::BI__builtin_canonicalizef16:
6234 case Builtin::BI__builtin_canonicalizel:
6235 return RValue::get(
6236 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::canonicalize));
6237
6238 case Builtin::BI__builtin_thread_pointer: {
6239 if (!getContext().getTargetInfo().isTLSSupported())
6240 CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
6241
6242 return RValue::get(Builder.CreateIntrinsic(llvm::Intrinsic::thread_pointer,
6243 {GlobalsInt8PtrTy}, {}));
6244 }
6245 case Builtin::BI__builtin_os_log_format:
6246 return emitBuiltinOSLogFormat(*E);
6247
6248 case Builtin::BI__xray_customevent: {
6250 return RValue::getIgnored();
6251
6252 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
6254 return RValue::getIgnored();
6255
6256 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
6257 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
6258 return RValue::getIgnored();
6259
6260 Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
6261 auto FTy = F->getFunctionType();
6262 auto Arg0 = E->getArg(0);
6263 auto Arg0Val = EmitScalarExpr(Arg0);
6264 auto Arg0Ty = Arg0->getType();
6265 auto PTy0 = FTy->getParamType(0);
6266 if (PTy0 != Arg0Val->getType()) {
6267 if (Arg0Ty->isArrayType())
6268 Arg0Val = EmitArrayToPointerDecay(Arg0).emitRawPointer(*this);
6269 else
6270 Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
6271 }
6272 auto Arg1 = EmitScalarExpr(E->getArg(1));
6273 auto PTy1 = FTy->getParamType(1);
6274 if (PTy1 != Arg1->getType())
6275 Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
6276 return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
6277 }
6278
6279 case Builtin::BI__xray_typedevent: {
6280 // TODO: There should be a way to always emit events even if the current
6281 // function is not instrumented. Losing events in a stream can cripple
6282 // a trace.
6284 return RValue::getIgnored();
6285
6286 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
6288 return RValue::getIgnored();
6289
6290 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
6291 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
6292 return RValue::getIgnored();
6293
6294 Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
6295 auto FTy = F->getFunctionType();
6296 auto Arg0 = EmitScalarExpr(E->getArg(0));
6297 auto PTy0 = FTy->getParamType(0);
6298 if (PTy0 != Arg0->getType())
6299 Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
6300 auto Arg1 = E->getArg(1);
6301 auto Arg1Val = EmitScalarExpr(Arg1);
6302 auto Arg1Ty = Arg1->getType();
6303 auto PTy1 = FTy->getParamType(1);
6304 if (PTy1 != Arg1Val->getType()) {
6305 if (Arg1Ty->isArrayType())
6306 Arg1Val = EmitArrayToPointerDecay(Arg1).emitRawPointer(*this);
6307 else
6308 Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
6309 }
6310 auto Arg2 = EmitScalarExpr(E->getArg(2));
6311 auto PTy2 = FTy->getParamType(2);
6312 if (PTy2 != Arg2->getType())
6313 Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
6314 return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
6315 }
6316
6317 case Builtin::BI__builtin_ms_va_start:
6318 case Builtin::BI__builtin_ms_va_end:
6319 return RValue::get(
6321 BuiltinID == Builtin::BI__builtin_ms_va_start));
6322
6323 case Builtin::BI__builtin_ms_va_copy: {
6324 // Lower this manually. We can't reliably determine whether or not any
6325 // given va_copy() is for a Win64 va_list from the calling convention
6326 // alone, because it's legal to do this from a System V ABI function.
6327 // With opaque pointer types, we won't have enough information in LLVM
6328 // IR to determine this from the argument types, either. Best to do it
6329 // now, while we have enough information.
6330 Address DestAddr = EmitMSVAListRef(E->getArg(0));
6331 Address SrcAddr = EmitMSVAListRef(E->getArg(1));
6332
6333 DestAddr = DestAddr.withElementType(Int8PtrTy);
6334 SrcAddr = SrcAddr.withElementType(Int8PtrTy);
6335
6336 Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
6337 return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
6338 }
6339
6340 case Builtin::BI__builtin_get_device_side_mangled_name: {
6341 auto Name = CGM.getCUDARuntime().getDeviceSideName(
6342 cast<DeclRefExpr>(E->getArg(0)->IgnoreImpCasts())->getDecl());
6343 auto Str = CGM.GetAddrOfConstantCString(Name, "");
6344 return RValue::get(Str.getPointer());
6345 }
6346 }
6347
6348 // If this is an alias for a lib function (e.g. __builtin_sin), emit
6349 // the call using the normal call path, but using the unmangled
6350 // version of the function name.
6351 const auto &BI = getContext().BuiltinInfo;
6352 if (!shouldEmitBuiltinAsIR(BuiltinID, BI, *this) &&
6353 BI.isLibFunction(BuiltinID))
6354 return emitLibraryCall(*this, FD, E,
6355 CGM.getBuiltinLibFunction(FD, BuiltinID));
6356
6357 // If this is a predefined lib function (e.g. malloc), emit the call
6358 // using exactly the normal call path.
6359 if (BI.isPredefinedLibFunction(BuiltinID))
6360 return emitLibraryCall(*this, FD, E, CGM.getRawFunctionPointer(FD));
6361
6362 // Check that a call to a target specific builtin has the correct target
6363 // features.
6364 // This is down here to avoid non-target specific builtins, however, if
6365 // generic builtins start to require generic target features then we
6366 // can move this up to the beginning of the function.
6367 checkTargetFeatures(E, FD);
6368
6369 if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
6370 LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
6371
6372 // See if we have a target specific intrinsic.
6373 std::string Name = getContext().BuiltinInfo.getName(BuiltinID);
6374 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
6375 StringRef Prefix =
6376 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
6377 if (!Prefix.empty()) {
6378 IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin(Prefix.data(), Name);
6379 if (IntrinsicID == Intrinsic::not_intrinsic && Prefix == "spv" &&
6380 getTarget().getTriple().getOS() == llvm::Triple::OSType::AMDHSA)
6381 IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin("amdgcn", Name);
6382 // NOTE we don't need to perform a compatibility flag check here since the
6383 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
6384 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
6385 if (IntrinsicID == Intrinsic::not_intrinsic)
6386 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
6387 }
6388
6389 if (IntrinsicID != Intrinsic::not_intrinsic) {
6391
6392 // Find out if any arguments are required to be integer constant
6393 // expressions.
6394 unsigned ICEArguments = 0;
6396 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
6397 assert(Error == ASTContext::GE_None && "Should not codegen an error");
6398
6399 Function *F = CGM.getIntrinsic(IntrinsicID);
6400 llvm::FunctionType *FTy = F->getFunctionType();
6401
6402 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
6403 Value *ArgValue = EmitScalarOrConstFoldImmArg(ICEArguments, i, E);
6404 // If the intrinsic arg type is different from the builtin arg type
6405 // we need to do a bit cast.
6406 llvm::Type *PTy = FTy->getParamType(i);
6407 if (PTy != ArgValue->getType()) {
6408 // XXX - vector of pointers?
6409 if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
6410 if (PtrTy->getAddressSpace() !=
6411 ArgValue->getType()->getPointerAddressSpace()) {
6412 ArgValue = Builder.CreateAddrSpaceCast(
6413 ArgValue, llvm::PointerType::get(getLLVMContext(),
6414 PtrTy->getAddressSpace()));
6415 }
6416 }
6417
6418 // Cast vector type (e.g., v256i32) to x86_amx, this only happen
6419 // in amx intrinsics.
6420 if (PTy->isX86_AMXTy())
6421 ArgValue = Builder.CreateIntrinsic(Intrinsic::x86_cast_vector_to_tile,
6422 {ArgValue->getType()}, {ArgValue});
6423 else
6424 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
6425 }
6426
6427 Args.push_back(ArgValue);
6428 }
6429
6430 Value *V = Builder.CreateCall(F, Args);
6431 QualType BuiltinRetType = E->getType();
6432
6433 llvm::Type *RetTy = VoidTy;
6434 if (!BuiltinRetType->isVoidType())
6435 RetTy = ConvertType(BuiltinRetType);
6436
6437 if (RetTy != V->getType()) {
6438 // XXX - vector of pointers?
6439 if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
6440 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
6441 V = Builder.CreateAddrSpaceCast(
6442 V, llvm::PointerType::get(getLLVMContext(),
6443 PtrTy->getAddressSpace()));
6444 }
6445 }
6446
6447 // Cast x86_amx to vector type (e.g., v256i32), this only happen
6448 // in amx intrinsics.
6449 if (V->getType()->isX86_AMXTy())
6450 V = Builder.CreateIntrinsic(Intrinsic::x86_cast_tile_to_vector, {RetTy},
6451 {V});
6452 else
6453 V = Builder.CreateBitCast(V, RetTy);
6454 }
6455
6456 if (RetTy->isVoidTy())
6457 return RValue::get(nullptr);
6458
6459 return RValue::get(V);
6460 }
6461
6462 // Some target-specific builtins can have aggregate return values, e.g.
6463 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
6464 // ReturnValue to be non-null, so that the target-specific emission code can
6465 // always just emit into it.
6467 if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) {
6468 Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp");
6469 ReturnValue = ReturnValueSlot(DestPtr, false);
6470 }
6471
6472 // Now see if we can emit a target-specific builtin.
6473 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
6474 switch (EvalKind) {
6475 case TEK_Scalar:
6476 if (V->getType()->isVoidTy())
6477 return RValue::get(nullptr);
6478 return RValue::get(V);
6479 case TEK_Aggregate:
6480 return RValue::getAggregate(ReturnValue.getAddress(),
6481 ReturnValue.isVolatile());
6482 case TEK_Complex:
6483 llvm_unreachable("No current target builtin returns complex");
6484 }
6485 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
6486 }
6487
6488 // EmitHLSLBuiltinExpr will check getLangOpts().HLSL
6489 if (Value *V = EmitHLSLBuiltinExpr(BuiltinID, E, ReturnValue)) {
6490 switch (EvalKind) {
6491 case TEK_Scalar:
6492 if (V->getType()->isVoidTy())
6493 return RValue::get(nullptr);
6494 return RValue::get(V);
6495 case TEK_Aggregate:
6496 return RValue::getAggregate(ReturnValue.getAddress(),
6497 ReturnValue.isVolatile());
6498 case TEK_Complex:
6499 llvm_unreachable("No current hlsl builtin returns complex");
6500 }
6501 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
6502 }
6503
6504 if (getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice)
6505 return EmitHipStdParUnsupportedBuiltin(this, FD);
6506
6507 ErrorUnsupported(E, "builtin function");
6508
6509 // Unknown builtin, for now just dump it out and return undef.
6510 return GetUndefRValue(E->getType());
6511}
6512
6513namespace {
6514struct BuiltinAlignArgs {
6515 llvm::Value *Src = nullptr;
6516 llvm::Type *SrcType = nullptr;
6517 llvm::Value *Alignment = nullptr;
6518 llvm::Value *Mask = nullptr;
6519 llvm::IntegerType *IntType = nullptr;
6520
6521 BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
6522 QualType AstType = E->getArg(0)->getType();
6523 if (AstType->isArrayType())
6524 Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(CGF);
6525 else
6526 Src = CGF.EmitScalarExpr(E->getArg(0));
6527 SrcType = Src->getType();
6528 if (SrcType->isPointerTy()) {
6529 IntType = IntegerType::get(
6530 CGF.getLLVMContext(),
6531 CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType));
6532 } else {
6533 assert(SrcType->isIntegerTy());
6534 IntType = cast<llvm::IntegerType>(SrcType);
6535 }
6536 Alignment = CGF.EmitScalarExpr(E->getArg(1));
6537 Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment");
6538 auto *One = llvm::ConstantInt::get(IntType, 1);
6539 Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
6540 }
6541};
6542} // namespace
6543
6544/// Generate (x & (y-1)) == 0.
6546 BuiltinAlignArgs Args(E, *this);
6547 llvm::Value *SrcAddress = Args.Src;
6548 if (Args.SrcType->isPointerTy())
6549 SrcAddress =
6550 Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr");
6551 return RValue::get(Builder.CreateICmpEQ(
6552 Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"),
6553 llvm::Constant::getNullValue(Args.IntType), "is_aligned"));
6554}
6555
6556/// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
6557/// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
6558/// llvm.ptrmask intrinsic (with a GEP before in the align_up case).
6560 BuiltinAlignArgs Args(E, *this);
6561 llvm::Value *SrcForMask = Args.Src;
6562 if (AlignUp) {
6563 // When aligning up we have to first add the mask to ensure we go over the
6564 // next alignment value and then align down to the next valid multiple.
6565 // By adding the mask, we ensure that align_up on an already aligned
6566 // value will not change the value.
6567 if (Args.Src->getType()->isPointerTy()) {
6568 if (getLangOpts().PointerOverflowDefined)
6569 SrcForMask =
6570 Builder.CreateGEP(Int8Ty, SrcForMask, Args.Mask, "over_boundary");
6571 else
6572 SrcForMask = EmitCheckedInBoundsGEP(Int8Ty, SrcForMask, Args.Mask,
6573 /*SignedIndices=*/true,
6574 /*isSubtraction=*/false,
6575 E->getExprLoc(), "over_boundary");
6576 } else {
6577 SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
6578 }
6579 }
6580 // Invert the mask to only clear the lower bits.
6581 llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
6582 llvm::Value *Result = nullptr;
6583 if (Args.Src->getType()->isPointerTy()) {
6584 Result = Builder.CreateIntrinsic(
6585 Intrinsic::ptrmask, {Args.SrcType, Args.IntType},
6586 {SrcForMask, InvertedMask}, nullptr, "aligned_result");
6587 } else {
6588 Result = Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
6589 }
6590 assert(Result->getType() == Args.SrcType);
6591 return RValue::get(Result);
6592}
#define V(N, I)
static char bitActionToX86BTCode(BitTest::ActionKind A)
static Value * EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering SuccessOrdering)
static void emitSincosBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static CanQualType getOSLogArgType(ASTContext &C, int Size)
Get the argument type for arguments to os_log_helper.
static Value * EmitOverflowCheckedAbs(CodeGenFunction &CGF, const CallExpr *E, bool SanitizeOverflow)
static llvm::Value * EmitBitCountExpr(CodeGenFunction &CGF, const Expr *E)
static Value * tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID, Value *V)
static bool areBOSTypesCompatible(int From, int To)
Checks if using the result of __builtin_object_size(p, From) in place of __builtin_object_size(p,...
static std::pair< llvm::Value *, llvm::Value * > GetCountFieldAndIndex(CodeGenFunction &CGF, const MemberExpr *ME, const FieldDecl *ArrayFD, const FieldDecl *CountFD, const Expr *Idx, llvm::IntegerType *ResType, bool IsSigned)
Value * EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, QualType T, llvm::Type *ResultType)
static bool TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty, llvm::SmallPtrSetImpl< const Decl * > &Seen)
static Value * EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)
static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind, const CallExpr *E)
MSVC handles setjmp a bit differently on different platforms.
#define MUTATE_LDBL(func)
static Value * emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty)
Determine if the specified type requires laundering by checking if it is a dynamic class type or cont...
static Value * EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E)
static Value * EmitTargetArchBuiltinExpr(CodeGenFunction *CGF, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
Definition CGBuiltin.cpp:72
static Value * emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, Instruction::BinaryOps Op, bool Invert=false)
Utility to insert an atomic instruction based Intrinsic::ID and the expression node,...
static bool HasNoIndirectArgumentsOrResults(CGFunctionInfo const &FnInfo)
Checks no arguments or results are passed indirectly in the ABI (i.e.
Value * EmitToInt(CodeGenFunction &CGF, llvm::Value *V, QualType T, llvm::IntegerType *IntType)
Emit the conversions required to turn the given value into an integer of the given size.
static Value * emitBinaryExpMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID, Intrinsic::ID ConstrainedIntrinsicID)
static llvm::Value * EmitBitTestIntrinsic(CodeGenFunction &CGF, unsigned BuiltinID, const CallExpr *E)
Emit a _bittest* intrinsic.
static Value * EmitSignBit(CodeGenFunction &CGF, Value *V)
Emit the computation of the sign bit for a floating point value.
static Value * EmitFAbs(CodeGenFunction &CGF, Value *V)
EmitFAbs - Emit a call to @llvm.fabs().
static llvm::Value * EmitPositiveResultOrZero(CodeGenFunction &CGF, llvm::Value *Res, llvm::Value *Index, llvm::IntegerType *ResType, bool IsSigned)
static bool shouldEmitBuiltinAsIR(unsigned BuiltinID, const Builtin::Context &BI, const CodeGenFunction &CGF)
Some builtins do not have library implementation on some targets and are instead emitted as LLVM IRs ...
Definition CGBuiltin.cpp:48
static bool isSpecialUnsignedMultiplySignedResult(unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, WidthAndSignedness ResultInfo)
static llvm::Value * getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType)
static Value * emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static RValue EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, const clang::Expr *Op2, WidthAndSignedness Op2Info, const clang::Expr *ResultArg, QualType ResultQTy, WidthAndSignedness ResultInfo)
Emit a checked mixed-sign multiply.
static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID)
Value * MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E, bool ReturnBool)
Utility to insert an atomic cmpxchg instruction.
static RValue EmitBinaryAtomic(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E)
static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size, Align AlignmentInBytes)
static Value * EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering SuccessOrdering=AtomicOrdering::SequentiallyConsistent)
This function should be invoked to emit atomic cmpxchg for Microsoft's _InterlockedCompareExchange* i...
static bool isSpecialMixedSignMultiply(unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, WidthAndSignedness ResultInfo)
Determine if a binop is a checked mixed-sign multiply we can specialize.
static Value * emitFrexpBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static llvm::Value * emitModfBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static Value * EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E)
static const FieldDecl * FindFlexibleArrayMemberField(CodeGenFunction &CGF, ASTContext &Ctx, const RecordDecl *RD)
Find a struct's flexible array member.
static Value * EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E)
static RValue EmitHipStdParUnsupportedBuiltin(CodeGenFunction *CGF, const FunctionDecl *FD)
static llvm::Value * EmitX86BitTestIntrinsic(CodeGenFunction &CGF, BitTest BT, const CallExpr *E, Value *BitBase, Value *BitPos)
static RValue EmitCheckedUnsignedMultiplySignedResult(CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, const clang::Expr *Op2, WidthAndSignedness Op2Info, const clang::Expr *ResultArg, QualType ResultQTy, WidthAndSignedness ResultInfo)
Address CheckAtomicAlignment(CodeGenFunction &CGF, const CallExpr *E)
static Value * EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E)
static llvm::AtomicOrdering getBitTestAtomicOrdering(BitTest::InterlockingKind I)
static bool GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FD, int64_t &Offset)
Calculate the offset of a struct field.
Value * MakeBinaryAtomicValue(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, AtomicOrdering Ordering)
Utility to insert an atomic instruction based on Intrinsic::ID and the expression node.
llvm::Value * EmitOverflowIntrinsic(CodeGenFunction &CGF, const Intrinsic::ID IntrinsicID, llvm::Value *X, llvm::Value *Y, llvm::Value *&Carry)
Emit a call to llvm.
static Value * EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW)
static Value * EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)
llvm::Value * emitBuiltinWithOneOverloadedType(clang::CodeGen::CodeGenFunction &CGF, const clang::CallExpr *E, unsigned IntrinsicID, llvm::StringRef Name="")
Definition CGBuiltin.h:63
static RValue emitUnaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitLibraryCall(CIRGenFunction &cgf, const FunctionDecl *fd, const CallExpr *e, mlir::Operation *calleeValue)
static WidthAndSignedness getIntegerWidthAndSignedness(const clang::ASTContext &astContext, const clang::QualType type)
static struct WidthAndSignedness EncompassingIntegerType(ArrayRef< struct WidthAndSignedness > types)
TokenType getType() const
Returns the token's type, e.g.
FormatToken * Next
The next token in the unwrapped line.
#define X(type, name)
Definition Value.h:97
static unsigned getCharWidth(tok::TokenKind kind, const TargetInfo &Target)
llvm::MachO::Record Record
Definition MachO.h:31
SanitizerHandler
static QualType getPointeeType(const MemRegion *R)
__DEVICE__ float modf(float __x, float *__iptr)
__DEVICE__ double nan(const char *)
APSInt & getInt()
Definition APValue.h:489
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType VoidPtrTy
IdentifierTable & Idents
Definition ASTContext.h:776
Builtin::Context & BuiltinInfo
Definition ASTContext.h:778
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, const Expr *SizeExpr, ArraySizeModifier ASM, unsigned IndexTypeQuals) const
Return the unique reference to the type for a constant array of the specified element type.
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CanQualType VoidTy
QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error, unsigned *IntegerConstantArgs=nullptr) const
Return the type for the specified builtin.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:895
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
@ GE_None
No error.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
QualType getElementType() const
Definition TypeBase.h:3734
static std::unique_ptr< AtomicScopeModel > create(AtomicScopeModelKind K)
Create an atomic scope model by AtomicScopeModelKind.
Definition SyncScope.h:298
static bool isCommaOp(Opcode Opc)
Definition Expr.h:4075
Expr * getRHS() const
Definition Expr.h:4024
Holds information about both target-independent and target-specific builtins, allowing easy queries b...
Definition Builtins.h:235
bool shouldGenerateFPMathIntrinsic(unsigned BuiltinID, llvm::Triple Trip, std::optional< bool > ErrnoOverwritten, bool MathErrnoEnabled, bool HasOptNoneAttr, bool IsOptimizationEnabled) const
Determine whether we can generate LLVM intrinsics for the given builtin ID, based on whether it has s...
Definition Builtins.cpp:225
bool isConstWithoutErrnoAndExceptions(unsigned ID) const
Return true if this function has no side effects and doesn't read memory, except for possibly errno o...
Definition Builtins.h:412
std::string getName(unsigned ID) const
Return the identifier name for the specified builtin, e.g.
Definition Builtins.cpp:80
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3081
bool hasStoredFPFeatures() const
Definition Expr.h:3036
SourceLocation getBeginLoc() const
Definition Expr.h:3211
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3060
Expr * getCallee()
Definition Expr.h:3024
FPOptionsOverride getFPFeatures() const
Definition Expr.h:3176
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3068
arg_range arguments()
Definition Expr.h:3129
CastKind getCastKind() const
Definition Expr.h:3654
Expr * getSubExpr()
Definition Expr.h:3660
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
Address withAlignment(CharUnits NewAlignment) const
Return address with different alignment, but same pointer and element type.
Definition Address.h:269
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition CGBuilder.h:140
llvm::StoreInst * CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr, CharUnits Align, bool IsVolatile=false)
Definition CGBuilder.h:147
llvm::AtomicRMWInst * CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Ordering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition CGBuilder.h:184
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition CGBuilder.h:402
llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(Address Addr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition CGBuilder.h:173
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:112
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition CGBuilder.h:132
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition CGBuilder.h:350
All available information about a concrete callee.
Definition CGCall.h:63
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition CGCall.h:137
llvm::DILocation * CreateTrapFailureMessageFor(llvm::DebugLoc TrapLocation, StringRef Category, StringRef FailureMsg)
Create a debug location from TrapLocation that adds an artificial inline frame where the frame name i...
CGFunctionInfo - Class to encapsulate the information about a function definition.
MutableArrayRef< ArgInfo > arguments()
virtual llvm::Value * getPipeElemAlign(const Expr *PipeArg)
virtual llvm::Value * getPipeElemSize(const Expr *PipeArg)
llvm::StructType * getLLVMType() const
Return the "complete object" LLVM type associated with this record.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition CGCall.h:274
void add(RValue rvalue, QualType type)
Definition CGCall.h:302
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E)
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Definition CGClass.cpp:2752
RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E)
RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID)
llvm::Value * EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, const CallExpr *E)
SanitizerSet SanOpts
Sanitizers enabled for this function.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
llvm::Value * GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Definition CGExpr.cpp:1187
llvm::Type * ConvertType(QualType T)
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
BuiltinCheckKind
Specifies which type of sanitizer check to apply when handling a particular builtin.
llvm::Value * EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition SystemZ.cpp:39
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
Definition CGCall.cpp:5092
llvm::Value * EmitSEHAbnormalTermination()
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
Produce the code to do a retain.
Definition CGObjC.cpp:2328
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
llvm::Value * EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart)
Emits a call to an LLVM variable-argument intrinsic, either llvm.va_start or llvm....
llvm::Value * EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition AMDGPU.cpp:411
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3745
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:6773
RValue emitBuiltinOSLogFormat(const CallExpr &E)
Emit IR for __builtin_os_log_format.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
llvm::Function * generateBuiltinOSLogHelperFunction(const analyze_os_log::OSLogBufferLayout &Layout, CharUnits BufferAlignment)
const LangOptions & getLangOpts() const
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
llvm::Value * EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
Definition RISCV.cpp:1073
llvm::Value * EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind)
Emits an argument for a call to a builtin.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3635
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
Definition CGCall.cpp:4591
const TargetInfo & getTarget() const
RValue emitRotate(const CallExpr *E, bool IsRotateRight)
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location, const AnnotateAttr *Attr)
Emit an annotation call (intrinsic).
llvm::Value * EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
Definition ARM.cpp:2710
CGCallee EmitCallee(const Expr *E)
Definition CGExpr.cpp:6167
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression, potentially with an active ...
llvm::Value * EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition ARM.cpp:7825
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:225
llvm::Value * EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition PPC.cpp:73
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:3893
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
Definition CGDecl.cpp:1356
llvm::MDNode * buildAllocToken(QualType AllocType)
Build metadata used by the AllocToken instrumentation.
Definition CGExpr.cpp:1277
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2190
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition CGCall.cpp:5248
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp)
Emit IR for __builtin_align_up/__builtin_align_down.
void EmitLifetimeEnd(llvm::Value *Addr)
Definition CGDecl.cpp:1368
llvm::Value * EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
llvm::Value * EmitDirectXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition DirectX.cpp:22
llvm::Value * EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm::Triple::ArchType Arch)
Definition ARM.cpp:5020
llvm::Value * EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4323
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
Definition CGDecl.cpp:2331
llvm::Value * EmitSPIRVBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition SPIR.cpp:22
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
Address EmitVAListRef(const Expr *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
Definition CGExpr.cpp:1569
RValue EmitBuiltinIsAligned(const CallExpr *E)
Emit IR for __builtin_is_aligned.
RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, const CallExpr *TheCallExpr, bool IsDelete)
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition Hexagon.cpp:77
CodeGenTypes & getTypes() const
llvm::Value * EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition X86.cpp:737
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1552
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:188
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
Definition CGExpr.cpp:4308
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4235
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2220
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
Definition CGExpr.cpp:1230
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
llvm::Value * EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition NVPTX.cpp:420
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
Definition CGExpr.cpp:4223
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1668
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
EmitTargetBuiltinExpr - Emit the given builtin call.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
llvm::Value * EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Given a number of pointers, inform the optimizer that they're being intrinsically used up until this ...
Definition CGObjC.cpp:2167
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:652
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
llvm::Constant * getBuiltinLibFunction(const FunctionDecl *FD, unsigned BuiltinID)
Given a builtin id for a function like "__builtin_fabsf", return a Function* for "fabsf".
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
const llvm::Triple & getTriple() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition CGCall.cpp:1702
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
LValue - This represents an lvalue references.
Definition CGValue.h:183
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition CGValue.h:362
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
static RValue getIgnored()
Definition CGValue.h:94
static RValue get(llvm::Value *V)
Definition CGValue.h:99
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition CGValue.h:126
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition CGValue.h:109
An abstract representation of an aligned address.
Definition Address.h:42
static RawAddress invalid()
Definition Address.h:61
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition CGCall.h:379
virtual bool supportsLibCall() const
supportsLibCall - Query to whether or not target supports all lib calls.
Definition TargetInfo.h:78
virtual llvm::Value * encodeReturnAddress(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Performs the code-generation required to convert the address of an instruction into a return address ...
Definition TargetInfo.h:176
virtual llvm::Value * decodeReturnAddress(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Performs the code-generation required to convert a return address as stored by the system into the ac...
Definition TargetInfo.h:166
virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const
Determines the DWARF register number for the stack pointer, for exception-handling purposes.
Definition TargetInfo.h:148
virtual llvm::Value * testFPKind(llvm::Value *V, unsigned BuiltinID, CGBuilderTy &Builder, CodeGenModule &CGM) const
Performs a target specific test of a floating point value for things like IsNaN, Infinity,...
Definition TargetInfo.h:185
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3275
Represents a concrete matrix type with constant number of rows and columns.
Definition TypeBase.h:4373
Represents a sugar type with __counted_by or __sized_by annotations, including their _or_null variant...
Definition TypeBase.h:3436
DynamicCountPointerKind getKind() const
Definition TypeBase.h:3466
static bool isFlexibleArrayMemberLike(const ASTContext &Context, const Decl *D, QualType Ty, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, bool IgnoreTemplateOrMacroSubstitution)
Whether it resembles a flexible array member.
Definition DeclBase.cpp:459
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition DeclBase.h:593
FunctionDecl * getAsFunction() LLVM_READONLY
Returns the function itself, or the templated function if this is a function template.
Definition DeclBase.cpp:273
bool hasAttr() const
Definition DeclBase.h:577
Concrete class used by the front-end to report problems and issues.
Definition Diagnostic.h:232
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3113
Expr * IgnoreParenCasts() LLVM_READONLY
Skip past any parentheses and casts which might surround this expression until reaching a fixed point...
Definition Expr.cpp:3091
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3086
bool EvaluateAsFloat(llvm::APFloat &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsFloat - Return true if this is a constant which we can fold and convert to a floating point...
bool isPRValue() const
Definition Expr.h:285
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
Definition Expr.h:835
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3666
std::optional< std::string > tryEvaluateString(ASTContext &Ctx) const
If the current Expr can be evaluated to a pointer to a null-terminated constant string,...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3066
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Definition Expr.cpp:4043
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
bool tryEvaluateObjectSize(uint64_t &Result, ASTContext &Ctx, unsigned Type) const
If the current Expr is a pointer, this will try to statically determine the number of bytes available...
const ValueDecl * getAsBuiltinConstantDeclRef(const ASTContext &Context) const
If this expression is an unambiguous reference to a single declaration, in the style of __builtin_fun...
Definition Expr.cpp:222
Represents difference between two FPOptions values.
LangOptions::FPExceptionModeKind getExceptionMode() const
Represents a member of a struct/union/class.
Definition Decl.h:3160
const FieldDecl * findCountedByField() const
Find the FieldDecl specified in a FAM's "counted_by" attribute.
Definition Decl.cpp:4851
Represents a function declaration or definition.
Definition Decl.h:2000
const ParmVarDecl * getParamDecl(unsigned i) const
Definition Decl.h:2797
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition Decl.cpp:3758
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5254
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
IdentifierInfo & get(StringRef Name)
Return the identifier token info for the specified named identifier.
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition Decl.cpp:5529
@ FPE_Ignore
Assume that floating-point exceptions are masked.
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3298
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3381
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
std::string getNameAsString() const
Get a human-readable name for the declaration, even if it is one of the special kinds of names (C++ c...
Definition Decl.h:317
const Expr * getSubExpr() const
Definition Expr.h:2199
PipeType - OpenCL20.
Definition TypeBase.h:8096
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8362
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8404
Represents a struct/union/class.
Definition Decl.h:4321
field_range fields() const
Definition Decl.h:4524
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
Encodes a location in the source.
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:350
bool isUnion() const
Definition Decl.h:3922
Exposes information about the current target.
Definition TargetInfo.h:226
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool isBigEndian() const
virtual bool checkArithmeticFenceSupported() const
Controls if __arithmetic_fence is supported in the targeted backend.
unsigned getSuitableAlign() const
Return the alignment that is the largest alignment ever used for any scalar/SIMD data type on the tar...
Definition TargetInfo.h:747
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
The base class of the type hierarchy.
Definition TypeBase.h:1833
bool isBlockPointerType() const
Definition TypeBase.h:8535
bool isVoidType() const
Definition TypeBase.h:8871
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2205
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isArrayType() const
Definition TypeBase.h:8614
bool isCountAttributedType() const
Definition Type.cpp:741
bool isPointerType() const
Definition TypeBase.h:8515
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8915
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9158
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1909
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9091
Expr * getSubExpr() const
Definition Expr.h:2285
QualType getType() const
Definition Decl.h:723
QualType getType() const
Definition Value.cpp:237
Represents a GCC generic vector type.
Definition TypeBase.h:4175
QualType getElementType() const
Definition TypeBase.h:4189
SmallVector< OSLogBufferItem, 4 > Items
Definition OSLog.h:113
unsigned char getNumArgsByte() const
Definition OSLog.h:148
unsigned char getSummaryByte() const
Definition OSLog.h:139
Defines the clang::TargetInfo interface.
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
Definition CGValue.h:155
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:146
llvm::Constant * initializationPatternFor(CodeGenModule &, llvm::Type *)
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
constexpr XRayInstrMask Typed
Definition XRayInstr.h:42
constexpr XRayInstrMask Custom
Definition XRayInstr.h:41
bool computeOSLogBufferLayout(clang::ASTContext &Ctx, const clang::CallExpr *E, OSLogBufferLayout &layout)
Definition OSLog.cpp:192
bool Mul(InterpState &S, CodePtr OpPC)
Definition Interp.h:350
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ Success
Annotation was successful.
Definition Parser.h:65
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Expr * Cond
};
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
const FunctionProtoType * T
LangAS
Defines the address space values used by the address space qualifier of QualType.
SyncScope
Defines sync scope values used internally by clang.
Definition SyncScope.h:42
llvm::StringRef getAsString(SyncScope S)
Definition SyncScope.h:62
U cast(CodeGen::Address addr)
Definition Address.h:327
@ Other
Other implicit parameter.
Definition Decl.h:1746
unsigned long uint64_t
long int64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
void clear(SanitizerMask K=SanitizerKind::All)
Disable the sanitizers specified in K.
Definition Sanitizers.h:195
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition Sanitizers.h:187
#define sinh(__x)
Definition tgmath.h:373
#define asin(__x)
Definition tgmath.h:112
#define scalbln(__x, __y)
Definition tgmath.h:1182
#define sqrt(__x)
Definition tgmath.h:520
#define acos(__x)
Definition tgmath.h:83
#define fmin(__x, __y)
Definition tgmath.h:780
#define exp(__x)
Definition tgmath.h:431
#define ilogb(__x)
Definition tgmath.h:851
#define copysign(__x, __y)
Definition tgmath.h:618
#define erf(__x)
Definition tgmath.h:636
#define atanh(__x)
Definition tgmath.h:228
#define remquo(__x, __y, __z)
Definition tgmath.h:1111
#define nextafter(__x, __y)
Definition tgmath.h:1055
#define frexp(__x, __y)
Definition tgmath.h:816
#define asinh(__x)
Definition tgmath.h:199
#define erfc(__x)
Definition tgmath.h:653
#define atan2(__x, __y)
Definition tgmath.h:566
#define nexttoward(__x, __y)
Definition tgmath.h:1073
#define hypot(__x, __y)
Definition tgmath.h:833
#define exp2(__x)
Definition tgmath.h:670
#define sin(__x)
Definition tgmath.h:286
#define cbrt(__x)
Definition tgmath.h:584
#define log2(__x)
Definition tgmath.h:970
#define llround(__x)
Definition tgmath.h:919
#define cosh(__x)
Definition tgmath.h:344
#define trunc(__x)
Definition tgmath.h:1216
#define fmax(__x, __y)
Definition tgmath.h:762
#define ldexp(__x, __y)
Definition tgmath.h:868
#define acosh(__x)
Definition tgmath.h:170
#define tgamma(__x)
Definition tgmath.h:1199
#define scalbn(__x, __y)
Definition tgmath.h:1165
#define round(__x)
Definition tgmath.h:1148
#define fmod(__x, __y)
Definition tgmath.h:798
#define llrint(__x)
Definition tgmath.h:902
#define tan(__x)
Definition tgmath.h:315
#define cos(__x)
Definition tgmath.h:257
#define log10(__x)
Definition tgmath.h:936
#define fabs(__x)
Definition tgmath.h:549
#define pow(__x, __y)
Definition tgmath.h:490
#define log1p(__x)
Definition tgmath.h:953
#define rint(__x)
Definition tgmath.h:1131
#define expm1(__x)
Definition tgmath.h:687
#define remainder(__x, __y)
Definition tgmath.h:1090
#define fdim(__x, __y)
Definition tgmath.h:704
#define lgamma(__x)
Definition tgmath.h:885
#define tanh(__x)
Definition tgmath.h:402
#define lrint(__x)
Definition tgmath.h:1004
#define atan(__x)
Definition tgmath.h:141
#define floor(__x)
Definition tgmath.h:722
#define ceil(__x)
Definition tgmath.h:601
#define log(__x)
Definition tgmath.h:460
#define logb(__x)
Definition tgmath.h:987
#define nearbyint(__x)
Definition tgmath.h:1038
#define lround(__x)
Definition tgmath.h:1021
#define fma(__x, __y, __z)
Definition tgmath.h:742