clang 22.0.0git
CGBuiltin.cpp
Go to the documentation of this file.
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGBuiltin.h"
14#include "ABIInfo.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGDebugInfo.h"
18#include "CGObjCRuntime.h"
19#include "CGOpenCLRuntime.h"
20#include "CGRecordLayout.h"
21#include "CGValue.h"
22#include "CodeGenFunction.h"
23#include "CodeGenModule.h"
24#include "ConstantEmitter.h"
25#include "PatternInit.h"
26#include "TargetInfo.h"
27#include "clang/AST/OSLog.h"
31#include "llvm/IR/InlineAsm.h"
32#include "llvm/IR/Instruction.h"
33#include "llvm/IR/Intrinsics.h"
34#include "llvm/IR/IntrinsicsX86.h"
35#include "llvm/IR/MatrixBuilder.h"
36#include "llvm/Support/ConvertUTF.h"
37#include "llvm/Support/ScopedPrinter.h"
38#include <optional>
39#include <utility>
40
41using namespace clang;
42using namespace CodeGen;
43using namespace llvm;
44
45/// Some builtins do not have library implementation on some targets and
46/// are instead emitted as LLVM IRs by some target builtin emitters.
47/// FIXME: Remove this when library support is added
48static bool shouldEmitBuiltinAsIR(unsigned BuiltinID,
49 const Builtin::Context &BI,
50 const CodeGenFunction &CGF) {
51 if (!CGF.CGM.getLangOpts().MathErrno &&
55 switch (BuiltinID) {
56 default:
57 return false;
58 case Builtin::BIlogbf:
59 case Builtin::BI__builtin_logbf:
60 case Builtin::BIlogb:
61 case Builtin::BI__builtin_logb:
62 case Builtin::BIscalbnf:
63 case Builtin::BI__builtin_scalbnf:
64 case Builtin::BIscalbn:
65 case Builtin::BI__builtin_scalbn:
66 return true;
67 }
68 }
69 return false;
70}
71
73 unsigned BuiltinID, const CallExpr *E,
74 ReturnValueSlot ReturnValue,
75 llvm::Triple::ArchType Arch) {
76 // When compiling in HipStdPar mode we have to be conservative in rejecting
77 // target specific features in the FE, and defer the possible error to the
78 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
79 // referenced by an accelerator executable function, we emit an error.
80 // Returning nullptr here leads to the builtin being handled in
81 // EmitStdParUnsupportedBuiltin.
82 if (CGF->getLangOpts().HIPStdPar && CGF->getLangOpts().CUDAIsDevice &&
83 Arch != CGF->getTarget().getTriple().getArch())
84 return nullptr;
85
86 switch (Arch) {
87 case llvm::Triple::arm:
88 case llvm::Triple::armeb:
89 case llvm::Triple::thumb:
90 case llvm::Triple::thumbeb:
91 return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch);
92 case llvm::Triple::aarch64:
93 case llvm::Triple::aarch64_32:
94 case llvm::Triple::aarch64_be:
95 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
96 case llvm::Triple::bpfeb:
97 case llvm::Triple::bpfel:
98 return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
99 case llvm::Triple::dxil:
100 return CGF->EmitDirectXBuiltinExpr(BuiltinID, E);
101 case llvm::Triple::x86:
102 case llvm::Triple::x86_64:
103 return CGF->EmitX86BuiltinExpr(BuiltinID, E);
104 case llvm::Triple::ppc:
105 case llvm::Triple::ppcle:
106 case llvm::Triple::ppc64:
107 case llvm::Triple::ppc64le:
108 return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
109 case llvm::Triple::r600:
110 case llvm::Triple::amdgcn:
111 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
112 case llvm::Triple::systemz:
113 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
114 case llvm::Triple::nvptx:
115 case llvm::Triple::nvptx64:
116 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
117 case llvm::Triple::wasm32:
118 case llvm::Triple::wasm64:
119 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
120 case llvm::Triple::hexagon:
121 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
122 case llvm::Triple::riscv32:
123 case llvm::Triple::riscv64:
124 return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue);
125 case llvm::Triple::spirv32:
126 case llvm::Triple::spirv64:
127 if (CGF->getTarget().getTriple().getOS() == llvm::Triple::OSType::AMDHSA)
128 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
129 [[fallthrough]];
130 case llvm::Triple::spirv:
131 return CGF->EmitSPIRVBuiltinExpr(BuiltinID, E);
132 default:
133 return nullptr;
134 }
135}
136
138 const CallExpr *E,
140 if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
141 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
143 this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
144 ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
145 }
146
147 return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue,
148 getTarget().getTriple().getArch());
149}
150
151static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
152 Align AlignmentInBytes) {
153 ConstantInt *Byte;
154 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
156 // Nothing to initialize.
157 return;
159 Byte = CGF.Builder.getInt8(0x00);
160 break;
162 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
163 Byte = llvm::dyn_cast<llvm::ConstantInt>(
164 initializationPatternFor(CGF.CGM, Int8));
165 break;
166 }
167 }
168 if (CGF.CGM.stopAutoInit())
169 return;
170 auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
171 I->addAnnotationMetadata("auto-init");
172}
173
174/// getBuiltinLibFunction - Given a builtin id for a function like
175/// "__builtin_fabsf", return a Function* for "fabsf".
177 unsigned BuiltinID) {
178 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
179
180 // Get the name, skip over the __builtin_ prefix (if necessary). We may have
181 // to build this up so provide a small stack buffer to handle the vast
182 // majority of names.
184 GlobalDecl D(FD);
185
186 // TODO: This list should be expanded or refactored after all GCC-compatible
187 // std libcall builtins are implemented.
188 static SmallDenseMap<unsigned, StringRef, 64> F128Builtins{
189 {Builtin::BI__builtin___fprintf_chk, "__fprintf_chkieee128"},
190 {Builtin::BI__builtin___printf_chk, "__printf_chkieee128"},
191 {Builtin::BI__builtin___snprintf_chk, "__snprintf_chkieee128"},
192 {Builtin::BI__builtin___sprintf_chk, "__sprintf_chkieee128"},
193 {Builtin::BI__builtin___vfprintf_chk, "__vfprintf_chkieee128"},
194 {Builtin::BI__builtin___vprintf_chk, "__vprintf_chkieee128"},
195 {Builtin::BI__builtin___vsnprintf_chk, "__vsnprintf_chkieee128"},
196 {Builtin::BI__builtin___vsprintf_chk, "__vsprintf_chkieee128"},
197 {Builtin::BI__builtin_fprintf, "__fprintfieee128"},
198 {Builtin::BI__builtin_printf, "__printfieee128"},
199 {Builtin::BI__builtin_snprintf, "__snprintfieee128"},
200 {Builtin::BI__builtin_sprintf, "__sprintfieee128"},
201 {Builtin::BI__builtin_vfprintf, "__vfprintfieee128"},
202 {Builtin::BI__builtin_vprintf, "__vprintfieee128"},
203 {Builtin::BI__builtin_vsnprintf, "__vsnprintfieee128"},
204 {Builtin::BI__builtin_vsprintf, "__vsprintfieee128"},
205 {Builtin::BI__builtin_fscanf, "__fscanfieee128"},
206 {Builtin::BI__builtin_scanf, "__scanfieee128"},
207 {Builtin::BI__builtin_sscanf, "__sscanfieee128"},
208 {Builtin::BI__builtin_vfscanf, "__vfscanfieee128"},
209 {Builtin::BI__builtin_vscanf, "__vscanfieee128"},
210 {Builtin::BI__builtin_vsscanf, "__vsscanfieee128"},
211 {Builtin::BI__builtin_nexttowardf128, "__nexttowardieee128"},
212 };
213
214 // The AIX library functions frexpl, ldexpl, and modfl are for 128-bit
215 // IBM 'long double' (i.e. __ibm128). Map to the 'double' versions
216 // if it is 64-bit 'long double' mode.
217 static SmallDenseMap<unsigned, StringRef, 4> AIXLongDouble64Builtins{
218 {Builtin::BI__builtin_frexpl, "frexp"},
219 {Builtin::BI__builtin_ldexpl, "ldexp"},
220 {Builtin::BI__builtin_modfl, "modf"},
221 };
222
223 // If the builtin has been declared explicitly with an assembler label,
224 // use the mangled name. This differs from the plain label on platforms
225 // that prefix labels.
226 if (FD->hasAttr<AsmLabelAttr>())
227 Name = getMangledName(D);
228 else {
229 // TODO: This mutation should also be applied to other targets other than
230 // PPC, after backend supports IEEE 128-bit style libcalls.
231 if (getTriple().isPPC64() &&
232 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad() &&
233 F128Builtins.contains(BuiltinID))
234 Name = F128Builtins[BuiltinID];
235 else if (getTriple().isOSAIX() &&
236 &getTarget().getLongDoubleFormat() ==
237 &llvm::APFloat::IEEEdouble() &&
238 AIXLongDouble64Builtins.contains(BuiltinID))
239 Name = AIXLongDouble64Builtins[BuiltinID];
240 else
241 Name = Context.BuiltinInfo.getName(BuiltinID).substr(10);
242 }
243
244 llvm::FunctionType *Ty =
245 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
246
247 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
248}
249
250/// Emit the conversions required to turn the given value into an
251/// integer of the given size.
252Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
253 QualType T, llvm::IntegerType *IntType) {
254 V = CGF.EmitToMemory(V, T);
255
256 if (V->getType()->isPointerTy())
257 return CGF.Builder.CreatePtrToInt(V, IntType);
258
259 assert(V->getType() == IntType);
260 return V;
261}
262
263Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
264 QualType T, llvm::Type *ResultType) {
265 V = CGF.EmitFromMemory(V, T);
266
267 if (ResultType->isPointerTy())
268 return CGF.Builder.CreateIntToPtr(V, ResultType);
269
270 assert(V->getType() == ResultType);
271 return V;
272}
273
275 ASTContext &Ctx = CGF.getContext();
276 Address Ptr = CGF.EmitPointerWithAlignment(E->getArg(0));
277 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
278 unsigned Bytes = Ptr.getElementType()->isPointerTy()
280 : DL.getTypeStoreSize(Ptr.getElementType());
281 unsigned Align = Ptr.getAlignment().getQuantity();
282 if (Align % Bytes != 0) {
283 DiagnosticsEngine &Diags = CGF.CGM.getDiags();
284 Diags.Report(E->getBeginLoc(), diag::warn_sync_op_misaligned);
285 // Force address to be at least naturally-aligned.
286 return Ptr.withAlignment(CharUnits::fromQuantity(Bytes));
287 }
288 return Ptr;
289}
290
291/// Utility to insert an atomic instruction based on Intrinsic::ID
292/// and the expression node.
294 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
295 AtomicOrdering Ordering) {
296
297 QualType T = E->getType();
298 assert(E->getArg(0)->getType()->isPointerType());
300 E->getArg(0)->getType()->getPointeeType()));
301 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
302
303 Address DestAddr = CheckAtomicAlignment(CGF, E);
304
305 llvm::IntegerType *IntType = llvm::IntegerType::get(
306 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
307
308 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1));
309 llvm::Type *ValueType = Val->getType();
310 Val = EmitToInt(CGF, Val, T, IntType);
311
312 llvm::Value *Result =
313 CGF.Builder.CreateAtomicRMW(Kind, DestAddr, Val, Ordering);
314 return EmitFromInt(CGF, Result, T, ValueType);
315}
316
318 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
320
321 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
322 LValue LV = CGF.MakeAddrLValue(Addr, E->getArg(0)->getType());
323 LV.setNontemporal(true);
324 CGF.EmitStoreOfScalar(Val, LV, false);
325 return nullptr;
326}
327
330
331 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
332 LV.setNontemporal(true);
333 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
334}
335
337 llvm::AtomicRMWInst::BinOp Kind,
338 const CallExpr *E) {
339 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
340}
341
342/// Utility to insert an atomic instruction based Intrinsic::ID and
343/// the expression node, where the return value is the result of the
344/// operation.
346 llvm::AtomicRMWInst::BinOp Kind,
347 const CallExpr *E,
348 Instruction::BinaryOps Op,
349 bool Invert = false) {
350 QualType T = E->getType();
351 assert(E->getArg(0)->getType()->isPointerType());
353 E->getArg(0)->getType()->getPointeeType()));
354 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
355
356 Address DestAddr = CheckAtomicAlignment(CGF, E);
357
358 llvm::IntegerType *IntType = llvm::IntegerType::get(
359 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
360
361 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1));
362 llvm::Type *ValueType = Val->getType();
363 Val = EmitToInt(CGF, Val, T, IntType);
364
365 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
366 Kind, DestAddr, Val, llvm::AtomicOrdering::SequentiallyConsistent);
367 Result = CGF.Builder.CreateBinOp(Op, Result, Val);
368 if (Invert)
369 Result =
370 CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
371 llvm::ConstantInt::getAllOnesValue(IntType));
372 Result = EmitFromInt(CGF, Result, T, ValueType);
373 return RValue::get(Result);
374}
375
376/// Utility to insert an atomic cmpxchg instruction.
377///
378/// @param CGF The current codegen function.
379/// @param E Builtin call expression to convert to cmpxchg.
380/// arg0 - address to operate on
381/// arg1 - value to compare with
382/// arg2 - new value
383/// @param ReturnBool Specifies whether to return success flag of
384/// cmpxchg result or the old value.
385///
386/// @returns result of cmpxchg, according to ReturnBool
387///
388/// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
389/// invoke the function EmitAtomicCmpXchgForMSIntrin.
391 bool ReturnBool) {
392 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
393 Address DestAddr = CheckAtomicAlignment(CGF, E);
394
395 llvm::IntegerType *IntType = llvm::IntegerType::get(
396 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
397
398 Value *Cmp = CGF.EmitScalarExpr(E->getArg(1));
399 llvm::Type *ValueType = Cmp->getType();
400 Cmp = EmitToInt(CGF, Cmp, T, IntType);
401 Value *New = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
402
404 DestAddr, Cmp, New, llvm::AtomicOrdering::SequentiallyConsistent,
405 llvm::AtomicOrdering::SequentiallyConsistent);
406 if (ReturnBool)
407 // Extract boolean success flag and zext it to int.
408 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
409 CGF.ConvertType(E->getType()));
410 else
411 // Extract old value and emit it using the same type as compare value.
412 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
413 ValueType);
414}
415
416/// This function should be invoked to emit atomic cmpxchg for Microsoft's
417/// _InterlockedCompareExchange* intrinsics which have the following signature:
418/// T _InterlockedCompareExchange(T volatile *Destination,
419/// T Exchange,
420/// T Comparand);
421///
422/// Whereas the llvm 'cmpxchg' instruction has the following syntax:
423/// cmpxchg *Destination, Comparand, Exchange.
424/// So we need to swap Comparand and Exchange when invoking
425/// CreateAtomicCmpXchg. That is the reason we could not use the above utility
426/// function MakeAtomicCmpXchgValue since it expects the arguments to be
427/// already swapped.
428
429static
431 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
432 assert(E->getArg(0)->getType()->isPointerType());
434 E->getType(), E->getArg(0)->getType()->getPointeeType()));
435 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
436 E->getArg(1)->getType()));
437 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
438 E->getArg(2)->getType()));
439
440 Address DestAddr = CheckAtomicAlignment(CGF, E);
441
442 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
443 auto *RTy = Exchange->getType();
444
445 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
446
447 if (RTy->isPointerTy()) {
448 Exchange = CGF.Builder.CreatePtrToInt(Exchange, CGF.IntPtrTy);
449 Comparand = CGF.Builder.CreatePtrToInt(Comparand, CGF.IntPtrTy);
450 }
451
452 // For Release ordering, the failure ordering should be Monotonic.
453 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
454 AtomicOrdering::Monotonic :
455 SuccessOrdering;
456
457 // The atomic instruction is marked volatile for consistency with MSVC. This
458 // blocks the few atomics optimizations that LLVM has. If we want to optimize
459 // _Interlocked* operations in the future, we will have to remove the volatile
460 // marker.
461 auto *CmpXchg = CGF.Builder.CreateAtomicCmpXchg(
462 DestAddr, Comparand, Exchange, SuccessOrdering, FailureOrdering);
463 CmpXchg->setVolatile(true);
464
465 auto *Result = CGF.Builder.CreateExtractValue(CmpXchg, 0);
466 if (RTy->isPointerTy()) {
467 Result = CGF.Builder.CreateIntToPtr(Result, RTy);
468 }
469
470 return Result;
471}
472
473// 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
474// prototyped like this:
475//
476// unsigned char _InterlockedCompareExchange128...(
477// __int64 volatile * _Destination,
478// __int64 _ExchangeHigh,
479// __int64 _ExchangeLow,
480// __int64 * _ComparandResult);
481//
482// Note that Destination is assumed to be at least 16-byte aligned, despite
483// being typed int64.
484
486 const CallExpr *E,
487 AtomicOrdering SuccessOrdering) {
488 assert(E->getNumArgs() == 4);
489 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
490 llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1));
491 llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2));
492 Address ComparandAddr = CGF.EmitPointerWithAlignment(E->getArg(3));
493
494 assert(DestPtr->getType()->isPointerTy());
495 assert(!ExchangeHigh->getType()->isPointerTy());
496 assert(!ExchangeLow->getType()->isPointerTy());
497
498 // For Release ordering, the failure ordering should be Monotonic.
499 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
500 ? AtomicOrdering::Monotonic
501 : SuccessOrdering;
502
503 // Convert to i128 pointers and values. Alignment is also overridden for
504 // destination pointer.
505 llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128);
506 Address DestAddr(DestPtr, Int128Ty,
508 ComparandAddr = ComparandAddr.withElementType(Int128Ty);
509
510 // (((i128)hi) << 64) | ((i128)lo)
511 ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
512 ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty);
513 ExchangeHigh =
514 CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64));
515 llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow);
516
517 // Load the comparand for the instruction.
518 llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandAddr);
519
520 auto *CXI = CGF.Builder.CreateAtomicCmpXchg(DestAddr, Comparand, Exchange,
521 SuccessOrdering, FailureOrdering);
522
523 // The atomic instruction is marked volatile for consistency with MSVC. This
524 // blocks the few atomics optimizations that LLVM has. If we want to optimize
525 // _Interlocked* operations in the future, we will have to remove the volatile
526 // marker.
527 CXI->setVolatile(true);
528
529 // Store the result as an outparameter.
530 CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0),
531 ComparandAddr);
532
533 // Get the success boolean and zero extend it to i8.
534 Value *Success = CGF.Builder.CreateExtractValue(CXI, 1);
535 return CGF.Builder.CreateZExt(Success, CGF.Int8Ty);
536}
537
539 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
540 assert(E->getArg(0)->getType()->isPointerType());
541
542 auto *IntTy = CGF.ConvertType(E->getType());
543 Address DestAddr = CheckAtomicAlignment(CGF, E);
544 auto *Result = CGF.Builder.CreateAtomicRMW(
545 AtomicRMWInst::Add, DestAddr, ConstantInt::get(IntTy, 1), Ordering);
546 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
547}
548
550 CodeGenFunction &CGF, const CallExpr *E,
551 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
552 assert(E->getArg(0)->getType()->isPointerType());
553
554 auto *IntTy = CGF.ConvertType(E->getType());
555 Address DestAddr = CheckAtomicAlignment(CGF, E);
556 auto *Result = CGF.Builder.CreateAtomicRMW(
557 AtomicRMWInst::Sub, DestAddr, ConstantInt::get(IntTy, 1), Ordering);
558 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
559}
560
561// Build a plain volatile load.
563 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
564 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
565 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
566 llvm::Type *ITy =
567 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
568 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize);
569 Load->setAtomic(llvm::AtomicOrdering::Monotonic);
570 Load->setVolatile(true);
571 return Load;
572}
573
574// Build a plain volatile store.
576 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
577 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
578 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
579 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
580 llvm::StoreInst *Store =
581 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
582 Store->setAtomic(llvm::AtomicOrdering::Monotonic);
583 Store->setVolatile(true);
584 return Store;
585}
586
587// Emit a simple mangled intrinsic that has 1 argument and a return type
588// matching the argument type. Depending on mode, this may be a constrained
589// floating-point intrinsic.
591 const CallExpr *E, unsigned IntrinsicID,
592 unsigned ConstrainedIntrinsicID) {
593 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
594
595 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
596 if (CGF.Builder.getIsFPConstrained()) {
597 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
598 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
599 } else {
600 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
601 return CGF.Builder.CreateCall(F, Src0);
602 }
603}
604
605// Emit an intrinsic that has 2 operands of the same type as its result.
606// Depending on mode, this may be a constrained floating-point intrinsic.
608 const CallExpr *E, unsigned IntrinsicID,
609 unsigned ConstrainedIntrinsicID) {
610 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
611 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
612
613 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
614 if (CGF.Builder.getIsFPConstrained()) {
615 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
616 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
617 } else {
618 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
619 return CGF.Builder.CreateCall(F, { Src0, Src1 });
620 }
621}
622
623// Has second type mangled argument.
624static Value *
626 Intrinsic::ID IntrinsicID,
627 Intrinsic::ID ConstrainedIntrinsicID) {
628 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
629 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
630
631 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
632 if (CGF.Builder.getIsFPConstrained()) {
633 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
634 {Src0->getType(), Src1->getType()});
635 return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1});
636 }
637
638 Function *F =
639 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), Src1->getType()});
640 return CGF.Builder.CreateCall(F, {Src0, Src1});
641}
642
643// Emit an intrinsic that has 3 operands of the same type as its result.
644// Depending on mode, this may be a constrained floating-point intrinsic.
646 const CallExpr *E, unsigned IntrinsicID,
647 unsigned ConstrainedIntrinsicID) {
648 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
649 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
650 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
651
652 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
653 if (CGF.Builder.getIsFPConstrained()) {
654 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
655 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
656 } else {
657 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
658 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
659 }
660}
661
662// Emit an intrinsic that has overloaded integer result and fp operand.
663static Value *
665 unsigned IntrinsicID,
666 unsigned ConstrainedIntrinsicID) {
667 llvm::Type *ResultType = CGF.ConvertType(E->getType());
668 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
669
670 if (CGF.Builder.getIsFPConstrained()) {
671 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
672 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
673 {ResultType, Src0->getType()});
674 return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
675 } else {
676 Function *F =
677 CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
678 return CGF.Builder.CreateCall(F, Src0);
679 }
680}
681
683 Intrinsic::ID IntrinsicID) {
684 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
685 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
686
687 QualType IntPtrTy = E->getArg(1)->getType()->getPointeeType();
688 llvm::Type *IntTy = CGF.ConvertType(IntPtrTy);
689 llvm::Function *F =
690 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), IntTy});
691 llvm::Value *Call = CGF.Builder.CreateCall(F, Src0);
692
693 llvm::Value *Exp = CGF.Builder.CreateExtractValue(Call, 1);
694 LValue LV = CGF.MakeNaturalAlignAddrLValue(Src1, IntPtrTy);
695 CGF.EmitStoreOfScalar(Exp, LV);
696
697 return CGF.Builder.CreateExtractValue(Call, 0);
698}
699
700static void emitSincosBuiltin(CodeGenFunction &CGF, const CallExpr *E,
701 Intrinsic::ID IntrinsicID) {
702 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(0));
703 llvm::Value *Dest0 = CGF.EmitScalarExpr(E->getArg(1));
704 llvm::Value *Dest1 = CGF.EmitScalarExpr(E->getArg(2));
705
706 llvm::Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {Val->getType()});
707 llvm::Value *Call = CGF.Builder.CreateCall(F, Val);
708
709 llvm::Value *SinResult = CGF.Builder.CreateExtractValue(Call, 0);
710 llvm::Value *CosResult = CGF.Builder.CreateExtractValue(Call, 1);
711
712 QualType DestPtrType = E->getArg(1)->getType()->getPointeeType();
713 LValue SinLV = CGF.MakeNaturalAlignAddrLValue(Dest0, DestPtrType);
714 LValue CosLV = CGF.MakeNaturalAlignAddrLValue(Dest1, DestPtrType);
715
716 llvm::StoreInst *StoreSin =
717 CGF.Builder.CreateStore(SinResult, SinLV.getAddress());
718 llvm::StoreInst *StoreCos =
719 CGF.Builder.CreateStore(CosResult, CosLV.getAddress());
720
721 // Mark the two stores as non-aliasing with each other. The order of stores
722 // emitted by this builtin is arbitrary, enforcing a particular order will
723 // prevent optimizations later on.
724 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
725 MDNode *Domain = MDHelper.createAnonymousAliasScopeDomain();
726 MDNode *AliasScope = MDHelper.createAnonymousAliasScope(Domain);
727 MDNode *AliasScopeList = MDNode::get(Call->getContext(), AliasScope);
728 StoreSin->setMetadata(LLVMContext::MD_alias_scope, AliasScopeList);
729 StoreCos->setMetadata(LLVMContext::MD_noalias, AliasScopeList);
730}
731
732static llvm::Value *emitModfBuiltin(CodeGenFunction &CGF, const CallExpr *E,
733 Intrinsic::ID IntrinsicID) {
734 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(0));
735 llvm::Value *IntPartDest = CGF.EmitScalarExpr(E->getArg(1));
736
737 llvm::Value *Call =
738 CGF.Builder.CreateIntrinsic(IntrinsicID, {Val->getType()}, Val);
739
740 llvm::Value *FractionalResult = CGF.Builder.CreateExtractValue(Call, 0);
741 llvm::Value *IntegralResult = CGF.Builder.CreateExtractValue(Call, 1);
742
743 QualType DestPtrType = E->getArg(1)->getType()->getPointeeType();
744 LValue IntegralLV = CGF.MakeNaturalAlignAddrLValue(IntPartDest, DestPtrType);
745 CGF.EmitStoreOfScalar(IntegralResult, IntegralLV);
746
747 return FractionalResult;
748}
749
750/// EmitFAbs - Emit a call to @llvm.fabs().
752 Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
753 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
754 Call->setDoesNotAccessMemory();
755 return Call;
756}
757
758/// Emit the computation of the sign bit for a floating point value. Returns
759/// the i1 sign bit value.
761 LLVMContext &C = CGF.CGM.getLLVMContext();
762
763 llvm::Type *Ty = V->getType();
764 int Width = Ty->getPrimitiveSizeInBits();
765 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
766 V = CGF.Builder.CreateBitCast(V, IntTy);
767 if (Ty->isPPC_FP128Ty()) {
768 // We want the sign bit of the higher-order double. The bitcast we just
769 // did works as if the double-double was stored to memory and then
770 // read as an i128. The "store" will put the higher-order double in the
771 // lower address in both little- and big-Endian modes, but the "load"
772 // will treat those bits as a different part of the i128: the low bits in
773 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
774 // we need to shift the high bits down to the low before truncating.
775 Width >>= 1;
776 if (CGF.getTarget().isBigEndian()) {
777 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
778 V = CGF.Builder.CreateLShr(V, ShiftCst);
779 }
780 // We are truncating value in order to extract the higher-order
781 // double, which we will be using to extract the sign from.
782 IntTy = llvm::IntegerType::get(C, Width);
783 V = CGF.Builder.CreateTrunc(V, IntTy);
784 }
785 Value *Zero = llvm::Constant::getNullValue(IntTy);
786 return CGF.Builder.CreateICmpSLT(V, Zero);
787}
788
789/// Checks no arguments or results are passed indirectly in the ABI (i.e. via a
790/// hidden pointer). This is used to check annotating FP libcalls (that could
791/// set `errno`) with "int" TBAA metadata is safe. If any floating-point
792/// arguments are passed indirectly, setup for the call could be incorrectly
793/// optimized out.
795 auto IsIndirect = [&](ABIArgInfo const &info) {
796 return info.isIndirect() || info.isIndirectAliased() || info.isInAlloca();
797 };
798 return !IsIndirect(FnInfo.getReturnInfo()) &&
799 llvm::none_of(FnInfo.arguments(),
800 [&](CGFunctionInfoArgInfo const &ArgInfo) {
801 return IsIndirect(ArgInfo.info);
802 });
803}
804
806 const CallExpr *E, llvm::Constant *calleeValue) {
807 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
808 CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
809 llvm::CallBase *callOrInvoke = nullptr;
810 CGFunctionInfo const *FnInfo = nullptr;
811 RValue Call =
812 CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot(),
813 /*Chain=*/nullptr, &callOrInvoke, &FnInfo);
814
815 if (unsigned BuiltinID = FD->getBuiltinID()) {
816 // Check whether a FP math builtin function, such as BI__builtin_expf
817 ASTContext &Context = CGF.getContext();
818 bool ConstWithoutErrnoAndExceptions =
820 // Restrict to target with errno, for example, MacOS doesn't set errno.
821 // TODO: Support builtin function with complex type returned, eg: cacosh
822 if (ConstWithoutErrnoAndExceptions && CGF.CGM.getLangOpts().MathErrno &&
823 !CGF.Builder.getIsFPConstrained() && Call.isScalar() &&
825 // Emit "int" TBAA metadata on FP math libcalls.
826 clang::QualType IntTy = Context.IntTy;
827 TBAAAccessInfo TBAAInfo = CGF.CGM.getTBAAAccessInfo(IntTy);
828 CGF.CGM.DecorateInstructionWithTBAA(callOrInvoke, TBAAInfo);
829 }
830 }
831 return Call;
832}
833
834/// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
835/// depending on IntrinsicID.
836///
837/// \arg CGF The current codegen function.
838/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
839/// \arg X The first argument to the llvm.*.with.overflow.*.
840/// \arg Y The second argument to the llvm.*.with.overflow.*.
841/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
842/// \returns The result (i.e. sum/product) returned by the intrinsic.
844 const Intrinsic::ID IntrinsicID,
845 llvm::Value *X, llvm::Value *Y,
846 llvm::Value *&Carry) {
847 // Make sure we have integers of the same width.
848 assert(X->getType() == Y->getType() &&
849 "Arguments must be the same type. (Did you forget to make sure both "
850 "arguments have the same integer width?)");
851
852 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
853 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
854 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
855 return CGF.Builder.CreateExtractValue(Tmp, 0);
856}
857
858namespace {
859 struct WidthAndSignedness {
860 unsigned Width;
861 bool Signed;
862 };
863}
864
865static WidthAndSignedness
867 const clang::QualType Type) {
868 assert(Type->isIntegerType() && "Given type is not an integer.");
869 unsigned Width = context.getIntWidth(Type);
871 return {Width, Signed};
872}
873
874// Given one or more integer types, this function produces an integer type that
875// encompasses them: any value in one of the given types could be expressed in
876// the encompassing type.
877static struct WidthAndSignedness
878EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
879 assert(Types.size() > 0 && "Empty list of types.");
880
881 // If any of the given types is signed, we must return a signed type.
882 bool Signed = false;
883 for (const auto &Type : Types) {
884 Signed |= Type.Signed;
885 }
886
887 // The encompassing type must have a width greater than or equal to the width
888 // of the specified types. Additionally, if the encompassing type is signed,
889 // its width must be strictly greater than the width of any unsigned types
890 // given.
891 unsigned Width = 0;
892 for (const auto &Type : Types) {
893 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
894 if (Width < MinWidth) {
895 Width = MinWidth;
896 }
897 }
898
899 return {Width, Signed};
900}
901
902Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
903 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
904 return Builder.CreateCall(CGM.getIntrinsic(inst, {ArgValue->getType()}),
905 ArgValue);
906}
907
908/// Checks if using the result of __builtin_object_size(p, @p From) in place of
909/// __builtin_object_size(p, @p To) is correct
910static bool areBOSTypesCompatible(int From, int To) {
911 // Note: Our __builtin_object_size implementation currently treats Type=0 and
912 // Type=2 identically. Encoding this implementation detail here may make
913 // improving __builtin_object_size difficult in the future, so it's omitted.
914 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
915}
916
917static llvm::Value *
918getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
919 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
920}
921
922llvm::Value *
923CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
924 llvm::IntegerType *ResType,
925 llvm::Value *EmittedE,
926 bool IsDynamic) {
927 uint64_t ObjectSize;
928 if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
929 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
930 return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
931}
932
933namespace {
934
935/// StructFieldAccess is a simple visitor class to grab the first MemberExpr
936/// from an Expr. It records any ArraySubscriptExpr we meet along the way.
937class StructFieldAccess
938 : public ConstStmtVisitor<StructFieldAccess, const Expr *> {
939 bool AddrOfSeen = false;
940
941public:
942 const Expr *ArrayIndex = nullptr;
943 QualType ArrayElementTy;
944
945 const Expr *VisitMemberExpr(const MemberExpr *E) {
946 if (AddrOfSeen && E->getType()->isArrayType())
947 // Avoid forms like '&ptr->array'.
948 return nullptr;
949 return E;
950 }
951
952 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
953 if (ArrayIndex)
954 // We don't support multiple subscripts.
955 return nullptr;
956
957 AddrOfSeen = false; // '&ptr->array[idx]' is okay.
958 ArrayIndex = E->getIdx();
959 ArrayElementTy = E->getBase()->getType();
960 return Visit(E->getBase());
961 }
962 const Expr *VisitCastExpr(const CastExpr *E) {
963 if (E->getCastKind() == CK_LValueToRValue)
964 return E;
965 return Visit(E->getSubExpr());
966 }
967 const Expr *VisitParenExpr(const ParenExpr *E) {
968 return Visit(E->getSubExpr());
969 }
970 const Expr *VisitUnaryAddrOf(const clang::UnaryOperator *E) {
971 AddrOfSeen = true;
972 return Visit(E->getSubExpr());
973 }
974 const Expr *VisitUnaryDeref(const clang::UnaryOperator *E) {
975 AddrOfSeen = false;
976 return Visit(E->getSubExpr());
977 }
978 const Expr *VisitBinaryOperator(const clang::BinaryOperator *Op) {
979 return Op->isCommaOp() ? Visit(Op->getRHS()) : nullptr;
980 }
981};
982
983} // end anonymous namespace
984
985/// Find a struct's flexible array member. It may be embedded inside multiple
986/// sub-structs, but must still be the last field.
988 ASTContext &Ctx,
989 const RecordDecl *RD) {
990 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
991 CGF.getLangOpts().getStrictFlexArraysLevel();
992
993 if (RD->isImplicit())
994 return nullptr;
995
996 for (const FieldDecl *FD : RD->fields()) {
998 Ctx, FD, FD->getType(), StrictFlexArraysLevel,
999 /*IgnoreTemplateOrMacroSubstitution=*/true))
1000 return FD;
1001
1002 if (const auto *RD = FD->getType()->getAsRecordDecl())
1003 if (const FieldDecl *FD = FindFlexibleArrayMemberField(CGF, Ctx, RD))
1004 return FD;
1005 }
1006
1007 return nullptr;
1008}
1009
1010/// Calculate the offset of a struct field. It may be embedded inside multiple
1011/// sub-structs.
1012static bool GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD,
1013 const FieldDecl *FD, int64_t &Offset) {
1014 if (RD->isImplicit())
1015 return false;
1016
1017 // Keep track of the field number ourselves, because the other methods
1018 // (CGRecordLayout::getLLVMFieldNo) aren't always equivalent to how the AST
1019 // is laid out.
1020 uint32_t FieldNo = 0;
1021 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
1022
1023 for (const FieldDecl *Field : RD->fields()) {
1024 if (Field == FD) {
1025 Offset += Layout.getFieldOffset(FieldNo);
1026 return true;
1027 }
1028
1029 if (const auto *RD = Field->getType()->getAsRecordDecl()) {
1030 if (GetFieldOffset(Ctx, RD, FD, Offset)) {
1031 Offset += Layout.getFieldOffset(FieldNo);
1032 return true;
1033 }
1034 }
1035
1036 if (!RD->isUnion())
1037 ++FieldNo;
1038 }
1039
1040 return false;
1041}
1042
1043static std::optional<int64_t>
1044GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FD) {
1045 int64_t Offset = 0;
1046
1047 if (GetFieldOffset(Ctx, RD, FD, Offset))
1048 return std::optional<int64_t>(Offset);
1049
1050 return std::nullopt;
1051}
1052
1053llvm::Value *CodeGenFunction::emitCountedBySize(const Expr *E,
1054 llvm::Value *EmittedE,
1055 unsigned Type,
1056 llvm::IntegerType *ResType) {
1057 // Note: If the whole struct is specificed in the __bdos (i.e. Visitor
1058 // returns a DeclRefExpr). The calculation of the whole size of the structure
1059 // with a flexible array member can be done in two ways:
1060 //
1061 // 1) sizeof(struct S) + count * sizeof(typeof(fam))
1062 // 2) offsetof(struct S, fam) + count * sizeof(typeof(fam))
1063 //
1064 // The first will add additional padding after the end of the array
1065 // allocation while the second method is more precise, but not quite expected
1066 // from programmers. See
1067 // https://lore.kernel.org/lkml/ZvV6X5FPBBW7CO1f@archlinux/ for a discussion
1068 // of the topic.
1069 //
1070 // GCC isn't (currently) able to calculate __bdos on a pointer to the whole
1071 // structure. Therefore, because of the above issue, we choose to match what
1072 // GCC does for consistency's sake.
1073
1074 StructFieldAccess Visitor;
1075 E = Visitor.Visit(E);
1076 if (!E)
1077 return nullptr;
1078
1079 const Expr *Idx = Visitor.ArrayIndex;
1080 if (Idx) {
1081 if (Idx->HasSideEffects(getContext()))
1082 // We can't have side-effects.
1083 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1084
1085 if (const auto *IL = dyn_cast<IntegerLiteral>(Idx)) {
1086 int64_t Val = IL->getValue().getSExtValue();
1087 if (Val < 0)
1088 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1089
1090 // The index is 0, so we don't need to take it into account.
1091 if (Val == 0)
1092 Idx = nullptr;
1093 }
1094 }
1095
1096 // __counted_by on either a flexible array member or a pointer into a struct
1097 // with a flexible array member.
1098 if (const auto *ME = dyn_cast<MemberExpr>(E))
1099 return emitCountedByMemberSize(ME, Idx, EmittedE, Visitor.ArrayElementTy,
1100 Type, ResType);
1101
1102 // __counted_by on a pointer in a struct.
1103 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E);
1104 ICE && ICE->getCastKind() == CK_LValueToRValue)
1105 return emitCountedByPointerSize(ICE, Idx, EmittedE, Visitor.ArrayElementTy,
1106 Type, ResType);
1107
1108 return nullptr;
1109}
1110
1112 llvm::Value *Res,
1113 llvm::Value *Index,
1114 llvm::IntegerType *ResType,
1115 bool IsSigned) {
1116 // cmp = (array_size >= 0)
1117 Value *Cmp = CGF.Builder.CreateIsNotNeg(Res);
1118 if (Index)
1119 // cmp = (cmp && index >= 0)
1120 Cmp = CGF.Builder.CreateAnd(CGF.Builder.CreateIsNotNeg(Index), Cmp);
1121
1122 // return cmp ? result : 0
1123 return CGF.Builder.CreateSelect(Cmp, Res,
1124 ConstantInt::get(ResType, 0, IsSigned));
1125}
1126
1127static std::pair<llvm::Value *, llvm::Value *>
1129 const FieldDecl *ArrayFD, const FieldDecl *CountFD,
1130 const Expr *Idx, llvm::IntegerType *ResType,
1131 bool IsSigned) {
1132 // count = ptr->count;
1133 Value *Count = CGF.EmitLoadOfCountedByField(ME, ArrayFD, CountFD);
1134 if (!Count)
1135 return std::make_pair<Value *>(nullptr, nullptr);
1136 Count = CGF.Builder.CreateIntCast(Count, ResType, IsSigned, "count");
1137
1138 // index = ptr->index;
1139 Value *Index = nullptr;
1140 if (Idx) {
1141 bool IdxSigned = Idx->getType()->isSignedIntegerType();
1142 Index = CGF.EmitScalarExpr(Idx);
1143 Index = CGF.Builder.CreateIntCast(Index, ResType, IdxSigned, "index");
1144 }
1145
1146 return std::make_pair(Count, Index);
1147}
1148
1149llvm::Value *CodeGenFunction::emitCountedByPointerSize(
1150 const ImplicitCastExpr *E, const Expr *Idx, llvm::Value *EmittedE,
1151 QualType CastedArrayElementTy, unsigned Type, llvm::IntegerType *ResType) {
1152 assert(E->getCastKind() == CK_LValueToRValue &&
1153 "must be an LValue to RValue cast");
1154
1155 const MemberExpr *ME =
1156 dyn_cast<MemberExpr>(E->getSubExpr()->IgnoreParenNoopCasts(getContext()));
1157 if (!ME)
1158 return nullptr;
1159
1160 const auto *ArrayBaseFD = dyn_cast<FieldDecl>(ME->getMemberDecl());
1161 if (!ArrayBaseFD || !ArrayBaseFD->getType()->isPointerType() ||
1162 !ArrayBaseFD->getType()->isCountAttributedType())
1163 return nullptr;
1164
1165 // Get the 'count' FieldDecl.
1166 const FieldDecl *CountFD = ArrayBaseFD->findCountedByField();
1167 if (!CountFD)
1168 // Can't find the field referenced by the "counted_by" attribute.
1169 return nullptr;
1170
1171 // Calculate the array's object size using these formulae. (Note: if the
1172 // calculation is negative, we return 0.):
1173 //
1174 // struct p;
1175 // struct s {
1176 // /* ... */
1177 // struct p **array __attribute__((counted_by(count)));
1178 // int count;
1179 // };
1180 //
1181 // 1) 'ptr->array':
1182 //
1183 // count = ptr->count;
1184 //
1185 // array_element_size = sizeof (*ptr->array);
1186 // array_size = count * array_element_size;
1187 //
1188 // result = array_size;
1189 //
1190 // cmp = (result >= 0)
1191 // return cmp ? result : 0;
1192 //
1193 // 2) '&((cast) ptr->array)[idx]':
1194 //
1195 // count = ptr->count;
1196 // index = idx;
1197 //
1198 // array_element_size = sizeof (*ptr->array);
1199 // array_size = count * array_element_size;
1200 //
1201 // casted_array_element_size = sizeof (*((cast) ptr->array));
1202 //
1203 // index_size = index * casted_array_element_size;
1204 // result = array_size - index_size;
1205 //
1206 // cmp = (result >= 0)
1207 // if (index)
1208 // cmp = (cmp && index > 0)
1209 // return cmp ? result : 0;
1210
1211 auto GetElementBaseSize = [&](QualType ElementTy) {
1212 CharUnits ElementSize =
1213 getContext().getTypeSizeInChars(ElementTy->getPointeeType());
1214
1215 if (ElementSize.isZero()) {
1216 // This might be a __sized_by (or __counted_by) on a
1217 // 'void *', which counts bytes, not elements.
1218 [[maybe_unused]] auto *CAT = ElementTy->getAs<CountAttributedType>();
1219 assert(CAT && "must have an CountAttributedType");
1220
1221 ElementSize = CharUnits::One();
1222 }
1223
1224 return std::optional<CharUnits>(ElementSize);
1225 };
1226
1227 // Get the sizes of the original array element and the casted array element,
1228 // if different.
1229 std::optional<CharUnits> ArrayElementBaseSize =
1230 GetElementBaseSize(ArrayBaseFD->getType());
1231 if (!ArrayElementBaseSize)
1232 return nullptr;
1233
1234 std::optional<CharUnits> CastedArrayElementBaseSize = ArrayElementBaseSize;
1235 if (!CastedArrayElementTy.isNull() && CastedArrayElementTy->isPointerType()) {
1236 CastedArrayElementBaseSize = GetElementBaseSize(CastedArrayElementTy);
1237 if (!CastedArrayElementBaseSize)
1238 return nullptr;
1239 }
1240
1241 bool IsSigned = CountFD->getType()->isSignedIntegerType();
1242
1243 // count = ptr->count;
1244 // index = ptr->index;
1245 Value *Count, *Index;
1246 std::tie(Count, Index) = GetCountFieldAndIndex(
1247 *this, ME, ArrayBaseFD, CountFD, Idx, ResType, IsSigned);
1248 if (!Count)
1249 return nullptr;
1250
1251 // array_element_size = sizeof (*ptr->array)
1252 auto *ArrayElementSize = llvm::ConstantInt::get(
1253 ResType, ArrayElementBaseSize->getQuantity(), IsSigned);
1254
1255 // casted_array_element_size = sizeof (*((cast) ptr->array));
1256 auto *CastedArrayElementSize = llvm::ConstantInt::get(
1257 ResType, CastedArrayElementBaseSize->getQuantity(), IsSigned);
1258
1259 // array_size = count * array_element_size;
1260 Value *ArraySize = Builder.CreateMul(Count, ArrayElementSize, "array_size",
1261 !IsSigned, IsSigned);
1262
1263 // Option (1) 'ptr->array'
1264 // result = array_size
1265 Value *Result = ArraySize;
1266
1267 if (Idx) { // Option (2) '&((cast) ptr->array)[idx]'
1268 // index_size = index * casted_array_element_size;
1269 Value *IndexSize = Builder.CreateMul(Index, CastedArrayElementSize,
1270 "index_size", !IsSigned, IsSigned);
1271
1272 // result = result - index_size;
1273 Result =
1274 Builder.CreateSub(Result, IndexSize, "result", !IsSigned, IsSigned);
1275 }
1276
1277 return EmitPositiveResultOrZero(*this, Result, Index, ResType, IsSigned);
1278}
1279
1280llvm::Value *CodeGenFunction::emitCountedByMemberSize(
1281 const MemberExpr *ME, const Expr *Idx, llvm::Value *EmittedE,
1282 QualType CastedArrayElementTy, unsigned Type, llvm::IntegerType *ResType) {
1283 const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
1284 if (!FD)
1285 return nullptr;
1286
1287 // Find the flexible array member and check that it has the __counted_by
1288 // attribute.
1289 ASTContext &Ctx = getContext();
1290 const RecordDecl *RD = FD->getDeclContext()->getOuterLexicalRecordContext();
1291 const FieldDecl *FlexibleArrayMemberFD = nullptr;
1292
1294 Ctx, FD, FD->getType(), getLangOpts().getStrictFlexArraysLevel(),
1295 /*IgnoreTemplateOrMacroSubstitution=*/true))
1296 FlexibleArrayMemberFD = FD;
1297 else
1298 FlexibleArrayMemberFD = FindFlexibleArrayMemberField(*this, Ctx, RD);
1299
1300 if (!FlexibleArrayMemberFD ||
1301 !FlexibleArrayMemberFD->getType()->isCountAttributedType())
1302 return nullptr;
1303
1304 // Get the 'count' FieldDecl.
1305 const FieldDecl *CountFD = FlexibleArrayMemberFD->findCountedByField();
1306 if (!CountFD)
1307 // Can't find the field referenced by the "counted_by" attribute.
1308 return nullptr;
1309
1310 // Calculate the flexible array member's object size using these formulae.
1311 // (Note: if the calculation is negative, we return 0.):
1312 //
1313 // struct p;
1314 // struct s {
1315 // /* ... */
1316 // int count;
1317 // struct p *array[] __attribute__((counted_by(count)));
1318 // };
1319 //
1320 // 1) 'ptr->array':
1321 //
1322 // count = ptr->count;
1323 //
1324 // flexible_array_member_element_size = sizeof (*ptr->array);
1325 // flexible_array_member_size =
1326 // count * flexible_array_member_element_size;
1327 //
1328 // result = flexible_array_member_size;
1329 //
1330 // cmp = (result >= 0)
1331 // return cmp ? result : 0;
1332 //
1333 // 2) '&((cast) ptr->array)[idx]':
1334 //
1335 // count = ptr->count;
1336 // index = idx;
1337 //
1338 // flexible_array_member_element_size = sizeof (*ptr->array);
1339 // flexible_array_member_size =
1340 // count * flexible_array_member_element_size;
1341 //
1342 // casted_flexible_array_member_element_size =
1343 // sizeof (*((cast) ptr->array));
1344 // index_size = index * casted_flexible_array_member_element_size;
1345 //
1346 // result = flexible_array_member_size - index_size;
1347 //
1348 // cmp = (result >= 0)
1349 // if (index != 0)
1350 // cmp = (cmp && index >= 0)
1351 // return cmp ? result : 0;
1352 //
1353 // 3) '&ptr->field':
1354 //
1355 // count = ptr->count;
1356 // sizeof_struct = sizeof (struct s);
1357 //
1358 // flexible_array_member_element_size = sizeof (*ptr->array);
1359 // flexible_array_member_size =
1360 // count * flexible_array_member_element_size;
1361 //
1362 // field_offset = offsetof (struct s, field);
1363 // offset_diff = sizeof_struct - field_offset;
1364 //
1365 // result = offset_diff + flexible_array_member_size;
1366 //
1367 // cmp = (result >= 0)
1368 // return cmp ? result : 0;
1369 //
1370 // 4) '&((cast) ptr->field_array)[idx]':
1371 //
1372 // count = ptr->count;
1373 // index = idx;
1374 // sizeof_struct = sizeof (struct s);
1375 //
1376 // flexible_array_member_element_size = sizeof (*ptr->array);
1377 // flexible_array_member_size =
1378 // count * flexible_array_member_element_size;
1379 //
1380 // casted_field_element_size = sizeof (*((cast) ptr->field_array));
1381 // field_offset = offsetof (struct s, field)
1382 // field_offset += index * casted_field_element_size;
1383 //
1384 // offset_diff = sizeof_struct - field_offset;
1385 //
1386 // result = offset_diff + flexible_array_member_size;
1387 //
1388 // cmp = (result >= 0)
1389 // if (index != 0)
1390 // cmp = (cmp && index >= 0)
1391 // return cmp ? result : 0;
1392
1393 bool IsSigned = CountFD->getType()->isSignedIntegerType();
1394
1395 QualType FlexibleArrayMemberTy = FlexibleArrayMemberFD->getType();
1396
1397 // Explicit cast because otherwise the CharWidth will promote an i32's into
1398 // u64's leading to overflows.
1399 int64_t CharWidth = static_cast<int64_t>(CGM.getContext().getCharWidth());
1400
1401 // field_offset = offsetof (struct s, field);
1402 Value *FieldOffset = nullptr;
1403 if (FlexibleArrayMemberFD != FD) {
1404 std::optional<int64_t> Offset = GetFieldOffset(Ctx, RD, FD);
1405 if (!Offset)
1406 return nullptr;
1407 FieldOffset =
1408 llvm::ConstantInt::get(ResType, *Offset / CharWidth, IsSigned);
1409 }
1410
1411 // count = ptr->count;
1412 // index = ptr->index;
1413 Value *Count, *Index;
1414 std::tie(Count, Index) = GetCountFieldAndIndex(
1415 *this, ME, FlexibleArrayMemberFD, CountFD, Idx, ResType, IsSigned);
1416 if (!Count)
1417 return nullptr;
1418
1419 // flexible_array_member_element_size = sizeof (*ptr->array);
1420 const ArrayType *ArrayTy = Ctx.getAsArrayType(FlexibleArrayMemberTy);
1421 CharUnits BaseSize = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
1422 auto *FlexibleArrayMemberElementSize =
1423 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1424
1425 // flexible_array_member_size = count * flexible_array_member_element_size;
1426 Value *FlexibleArrayMemberSize =
1427 Builder.CreateMul(Count, FlexibleArrayMemberElementSize,
1428 "flexible_array_member_size", !IsSigned, IsSigned);
1429
1430 Value *Result = nullptr;
1431 if (FlexibleArrayMemberFD == FD) {
1432 if (Idx) { // Option (2) '&((cast) ptr->array)[idx]'
1433 // casted_flexible_array_member_element_size =
1434 // sizeof (*((cast) ptr->array));
1435 llvm::ConstantInt *CastedFlexibleArrayMemberElementSize =
1436 FlexibleArrayMemberElementSize;
1437 if (!CastedArrayElementTy.isNull() &&
1438 CastedArrayElementTy->isPointerType()) {
1439 CharUnits BaseSize =
1440 Ctx.getTypeSizeInChars(CastedArrayElementTy->getPointeeType());
1441 CastedFlexibleArrayMemberElementSize =
1442 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1443 }
1444
1445 // index_size = index * casted_flexible_array_member_element_size;
1446 Value *IndexSize =
1447 Builder.CreateMul(Index, CastedFlexibleArrayMemberElementSize,
1448 "index_size", !IsSigned, IsSigned);
1449
1450 // result = flexible_array_member_size - index_size;
1451 Result = Builder.CreateSub(FlexibleArrayMemberSize, IndexSize, "result",
1452 !IsSigned, IsSigned);
1453 } else { // Option (1) 'ptr->array'
1454 // result = flexible_array_member_size;
1455 Result = FlexibleArrayMemberSize;
1456 }
1457 } else {
1458 // sizeof_struct = sizeof (struct s);
1459 llvm::StructType *StructTy = getTypes().getCGRecordLayout(RD).getLLVMType();
1460 const llvm::DataLayout &Layout = CGM.getDataLayout();
1461 TypeSize Size = Layout.getTypeSizeInBits(StructTy);
1462 Value *SizeofStruct =
1463 llvm::ConstantInt::get(ResType, Size.getKnownMinValue() / CharWidth);
1464
1465 if (Idx) { // Option (4) '&((cast) ptr->field_array)[idx]'
1466 // casted_field_element_size = sizeof (*((cast) ptr->field_array));
1467 CharUnits BaseSize;
1468 if (!CastedArrayElementTy.isNull() &&
1469 CastedArrayElementTy->isPointerType()) {
1470 BaseSize =
1471 Ctx.getTypeSizeInChars(CastedArrayElementTy->getPointeeType());
1472 } else {
1473 const ArrayType *ArrayTy = Ctx.getAsArrayType(FD->getType());
1474 BaseSize = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
1475 }
1476
1477 llvm::ConstantInt *CastedFieldElementSize =
1478 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1479
1480 // field_offset += index * casted_field_element_size;
1481 Value *Mul = Builder.CreateMul(Index, CastedFieldElementSize,
1482 "field_offset", !IsSigned, IsSigned);
1483 FieldOffset = Builder.CreateAdd(FieldOffset, Mul);
1484 }
1485 // Option (3) '&ptr->field', and Option (4) continuation.
1486 // offset_diff = flexible_array_member_offset - field_offset;
1487 Value *OffsetDiff = Builder.CreateSub(SizeofStruct, FieldOffset,
1488 "offset_diff", !IsSigned, IsSigned);
1489
1490 // result = offset_diff + flexible_array_member_size;
1491 Result = Builder.CreateAdd(FlexibleArrayMemberSize, OffsetDiff, "result");
1492 }
1493
1494 return EmitPositiveResultOrZero(*this, Result, Index, ResType, IsSigned);
1495}
1496
1497/// Returns a Value corresponding to the size of the given expression.
1498/// This Value may be either of the following:
1499/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
1500/// it)
1501/// - A call to the @llvm.objectsize intrinsic
1502///
1503/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
1504/// and we wouldn't otherwise try to reference a pass_object_size parameter,
1505/// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
1506llvm::Value *
1507CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
1508 llvm::IntegerType *ResType,
1509 llvm::Value *EmittedE, bool IsDynamic) {
1510 // We need to reference an argument if the pointer is a parameter with the
1511 // pass_object_size attribute.
1512 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
1513 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
1514 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
1515 if (Param != nullptr && PS != nullptr &&
1516 areBOSTypesCompatible(PS->getType(), Type)) {
1517 auto Iter = SizeArguments.find(Param);
1518 assert(Iter != SizeArguments.end());
1519
1520 const ImplicitParamDecl *D = Iter->second;
1521 auto DIter = LocalDeclMap.find(D);
1522 assert(DIter != LocalDeclMap.end());
1523
1524 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
1525 getContext().getSizeType(), E->getBeginLoc());
1526 }
1527 }
1528
1529 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
1530 // evaluate E for side-effects. In either case, we shouldn't lower to
1531 // @llvm.objectsize.
1532 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
1533 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1534
1535 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
1536 assert(Ptr->getType()->isPointerTy() &&
1537 "Non-pointer passed to __builtin_object_size?");
1538
1539 if (IsDynamic)
1540 // Emit special code for a flexible array member with the "counted_by"
1541 // attribute.
1542 if (Value *V = emitCountedBySize(E, Ptr, Type, ResType))
1543 return V;
1544
1545 Function *F =
1546 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
1547
1548 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
1549 Value *Min = Builder.getInt1((Type & 2) != 0);
1550 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
1551 Value *NullIsUnknown = Builder.getTrue();
1552 Value *Dynamic = Builder.getInt1(IsDynamic);
1553 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
1554}
1555
1556namespace {
1557/// A struct to generically describe a bit test intrinsic.
1558struct BitTest {
1559 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
1560 enum InterlockingKind : uint8_t {
1561 Unlocked,
1562 Sequential,
1563 Acquire,
1564 Release,
1565 NoFence
1566 };
1567
1568 ActionKind Action;
1569 InterlockingKind Interlocking;
1570 bool Is64Bit;
1571
1572 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
1573};
1574
1575} // namespace
1576
1577BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
1578 switch (BuiltinID) {
1579 // Main portable variants.
1580 case Builtin::BI_bittest:
1581 return {TestOnly, Unlocked, false};
1582 case Builtin::BI_bittestandcomplement:
1583 return {Complement, Unlocked, false};
1584 case Builtin::BI_bittestandreset:
1585 return {Reset, Unlocked, false};
1586 case Builtin::BI_bittestandset:
1587 return {Set, Unlocked, false};
1588 case Builtin::BI_interlockedbittestandreset:
1589 return {Reset, Sequential, false};
1590 case Builtin::BI_interlockedbittestandset:
1591 return {Set, Sequential, false};
1592
1593 // 64-bit variants.
1594 case Builtin::BI_bittest64:
1595 return {TestOnly, Unlocked, true};
1596 case Builtin::BI_bittestandcomplement64:
1597 return {Complement, Unlocked, true};
1598 case Builtin::BI_bittestandreset64:
1599 return {Reset, Unlocked, true};
1600 case Builtin::BI_bittestandset64:
1601 return {Set, Unlocked, true};
1602 case Builtin::BI_interlockedbittestandreset64:
1603 return {Reset, Sequential, true};
1604 case Builtin::BI_interlockedbittestandset64:
1605 return {Set, Sequential, true};
1606
1607 // ARM/AArch64-specific ordering variants.
1608 case Builtin::BI_interlockedbittestandset_acq:
1609 return {Set, Acquire, false};
1610 case Builtin::BI_interlockedbittestandset_rel:
1611 return {Set, Release, false};
1612 case Builtin::BI_interlockedbittestandset_nf:
1613 return {Set, NoFence, false};
1614 case Builtin::BI_interlockedbittestandreset_acq:
1615 return {Reset, Acquire, false};
1616 case Builtin::BI_interlockedbittestandreset_rel:
1617 return {Reset, Release, false};
1618 case Builtin::BI_interlockedbittestandreset_nf:
1619 return {Reset, NoFence, false};
1620 case Builtin::BI_interlockedbittestandreset64_acq:
1621 return {Reset, Acquire, false};
1622 case Builtin::BI_interlockedbittestandreset64_rel:
1623 return {Reset, Release, false};
1624 case Builtin::BI_interlockedbittestandreset64_nf:
1625 return {Reset, NoFence, false};
1626 case Builtin::BI_interlockedbittestandset64_acq:
1627 return {Set, Acquire, false};
1628 case Builtin::BI_interlockedbittestandset64_rel:
1629 return {Set, Release, false};
1630 case Builtin::BI_interlockedbittestandset64_nf:
1631 return {Set, NoFence, false};
1632 }
1633 llvm_unreachable("expected only bittest intrinsics");
1634}
1635
1636static char bitActionToX86BTCode(BitTest::ActionKind A) {
1637 switch (A) {
1638 case BitTest::TestOnly: return '\0';
1639 case BitTest::Complement: return 'c';
1640 case BitTest::Reset: return 'r';
1641 case BitTest::Set: return 's';
1642 }
1643 llvm_unreachable("invalid action");
1644}
1645
1647 BitTest BT,
1648 const CallExpr *E, Value *BitBase,
1649 Value *BitPos) {
1650 char Action = bitActionToX86BTCode(BT.Action);
1651 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
1652
1653 // Build the assembly.
1655 raw_svector_ostream AsmOS(Asm);
1656 if (BT.Interlocking != BitTest::Unlocked)
1657 AsmOS << "lock ";
1658 AsmOS << "bt";
1659 if (Action)
1660 AsmOS << Action;
1661 AsmOS << SizeSuffix << " $2, ($1)";
1662
1663 // Build the constraints. FIXME: We should support immediates when possible.
1664 std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
1665 std::string_view MachineClobbers = CGF.getTarget().getClobbers();
1666 if (!MachineClobbers.empty()) {
1667 Constraints += ',';
1668 Constraints += MachineClobbers;
1669 }
1670 llvm::IntegerType *IntType = llvm::IntegerType::get(
1671 CGF.getLLVMContext(),
1672 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
1673 llvm::FunctionType *FTy =
1674 llvm::FunctionType::get(CGF.Int8Ty, {CGF.DefaultPtrTy, IntType}, false);
1675
1676 llvm::InlineAsm *IA =
1677 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1678 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
1679}
1680
1681static llvm::AtomicOrdering
1682getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
1683 switch (I) {
1684 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
1685 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
1686 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
1687 case BitTest::Release: return llvm::AtomicOrdering::Release;
1688 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
1689 }
1690 llvm_unreachable("invalid interlocking");
1691}
1692
1693static llvm::Value *EmitBitCountExpr(CodeGenFunction &CGF, const Expr *E) {
1694 llvm::Value *ArgValue = CGF.EmitScalarExpr(E);
1695 llvm::Type *ArgType = ArgValue->getType();
1696
1697 // Boolean vectors can be casted directly to its bitfield representation. We
1698 // intentionally do not round up to the next power of two size and let LLVM
1699 // handle the trailing bits.
1700 if (auto *VT = dyn_cast<llvm::FixedVectorType>(ArgType);
1701 VT && VT->getElementType()->isIntegerTy(1)) {
1702 llvm::Type *StorageType =
1703 llvm::Type::getIntNTy(CGF.getLLVMContext(), VT->getNumElements());
1704 ArgValue = CGF.Builder.CreateBitCast(ArgValue, StorageType);
1705 }
1706
1707 return ArgValue;
1708}
1709
1710/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
1711/// bits and a bit position and read and optionally modify the bit at that
1712/// position. The position index can be arbitrarily large, i.e. it can be larger
1713/// than 31 or 63, so we need an indexed load in the general case.
1714static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
1715 unsigned BuiltinID,
1716 const CallExpr *E) {
1717 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
1718 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
1719
1720 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
1721
1722 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
1723 // indexing operation internally. Use them if possible.
1724 if (CGF.getTarget().getTriple().isX86())
1725 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
1726
1727 // Otherwise, use generic code to load one byte and test the bit. Use all but
1728 // the bottom three bits as the array index, and the bottom three bits to form
1729 // a mask.
1730 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
1731 Value *ByteIndex = CGF.Builder.CreateAShr(
1732 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
1733 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBase, ByteIndex,
1734 "bittest.byteaddr"),
1735 CGF.Int8Ty, CharUnits::One());
1736 Value *PosLow =
1737 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
1738 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
1739
1740 // The updating instructions will need a mask.
1741 Value *Mask = nullptr;
1742 if (BT.Action != BitTest::TestOnly) {
1743 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
1744 "bittest.mask");
1745 }
1746
1747 // Check the action and ordering of the interlocked intrinsics.
1748 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
1749
1750 Value *OldByte = nullptr;
1751 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
1752 // Emit a combined atomicrmw load/store operation for the interlocked
1753 // intrinsics.
1754 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
1755 if (BT.Action == BitTest::Reset) {
1756 Mask = CGF.Builder.CreateNot(Mask);
1757 RMWOp = llvm::AtomicRMWInst::And;
1758 }
1759 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr, Mask, Ordering);
1760 } else {
1761 // Emit a plain load for the non-interlocked intrinsics.
1762 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
1763 Value *NewByte = nullptr;
1764 switch (BT.Action) {
1765 case BitTest::TestOnly:
1766 // Don't store anything.
1767 break;
1768 case BitTest::Complement:
1769 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
1770 break;
1771 case BitTest::Reset:
1772 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
1773 break;
1774 case BitTest::Set:
1775 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
1776 break;
1777 }
1778 if (NewByte)
1779 CGF.Builder.CreateStore(NewByte, ByteAddr);
1780 }
1781
1782 // However we loaded the old byte, either by plain load or atomicrmw, shift
1783 // the bit into the low position and mask it to 0 or 1.
1784 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
1785 return CGF.Builder.CreateAnd(
1786 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
1787}
1788
1789namespace {
1790enum class MSVCSetJmpKind {
1791 _setjmpex,
1792 _setjmp3,
1793 _setjmp
1794};
1795}
1796
1797/// MSVC handles setjmp a bit differently on different platforms. On every
1798/// architecture except 32-bit x86, the frame address is passed. On x86, extra
1799/// parameters can be passed as variadic arguments, but we always pass none.
1800static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
1801 const CallExpr *E) {
1802 llvm::Value *Arg1 = nullptr;
1803 llvm::Type *Arg1Ty = nullptr;
1804 StringRef Name;
1805 bool IsVarArg = false;
1806 if (SJKind == MSVCSetJmpKind::_setjmp3) {
1807 Name = "_setjmp3";
1808 Arg1Ty = CGF.Int32Ty;
1809 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
1810 IsVarArg = true;
1811 } else {
1812 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
1813 Arg1Ty = CGF.Int8PtrTy;
1814 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
1815 Arg1 = CGF.Builder.CreateCall(
1816 CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
1817 } else
1818 Arg1 = CGF.Builder.CreateCall(
1819 CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
1820 llvm::ConstantInt::get(CGF.Int32Ty, 0));
1821 }
1822
1823 // Mark the call site and declaration with ReturnsTwice.
1824 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
1825 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
1826 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
1827 llvm::Attribute::ReturnsTwice);
1828 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
1829 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
1830 ReturnsTwiceAttr, /*Local=*/true);
1831
1832 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
1833 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
1834 llvm::Value *Args[] = {Buf, Arg1};
1835 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
1836 CB->setAttributes(ReturnsTwiceAttr);
1837 return RValue::get(CB);
1838}
1839
1840// Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated.
1842 const CallExpr *E) {
1843 switch (BuiltinID) {
1846 Address IndexAddress(EmitPointerWithAlignment(E->getArg(0)));
1847 Value *ArgValue = EmitScalarExpr(E->getArg(1));
1848
1849 llvm::Type *ArgType = ArgValue->getType();
1850 llvm::Type *IndexType = IndexAddress.getElementType();
1851 llvm::Type *ResultType = ConvertType(E->getType());
1852
1853 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
1854 Value *ResZero = llvm::Constant::getNullValue(ResultType);
1855 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
1856
1857 BasicBlock *Begin = Builder.GetInsertBlock();
1858 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
1859 Builder.SetInsertPoint(End);
1860 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
1861
1862 Builder.SetInsertPoint(Begin);
1863 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
1864 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
1865 Builder.CreateCondBr(IsZero, End, NotZero);
1866 Result->addIncoming(ResZero, Begin);
1867
1868 Builder.SetInsertPoint(NotZero);
1869
1870 if (BuiltinID == MSVCIntrin::_BitScanForward) {
1871 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1872 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1873 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1874 Builder.CreateStore(ZeroCount, IndexAddress, false);
1875 } else {
1876 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
1877 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
1878
1879 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1880 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1881 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1882 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
1883 Builder.CreateStore(Index, IndexAddress, false);
1884 }
1885 Builder.CreateBr(End);
1886 Result->addIncoming(ResOne, NotZero);
1887
1888 Builder.SetInsertPoint(End);
1889 return Result;
1890 }
1892 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
1894 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
1896 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
1898 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
1900 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
1902 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
1904 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1905 AtomicOrdering::Acquire);
1907 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1908 AtomicOrdering::Release);
1910 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1911 AtomicOrdering::Monotonic);
1913 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1914 AtomicOrdering::Acquire);
1916 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1917 AtomicOrdering::Release);
1919 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1920 AtomicOrdering::Monotonic);
1922 return EmitAtomicCmpXchgForMSIntrin(*this, E);
1924 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
1926 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
1928 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1931 *this, E, AtomicOrdering::SequentiallyConsistent);
1933 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire);
1935 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release);
1937 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1939 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1940 AtomicOrdering::Acquire);
1942 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1943 AtomicOrdering::Release);
1945 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1946 AtomicOrdering::Monotonic);
1948 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1949 AtomicOrdering::Acquire);
1951 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1952 AtomicOrdering::Release);
1954 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1955 AtomicOrdering::Monotonic);
1957 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1958 AtomicOrdering::Acquire);
1960 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1961 AtomicOrdering::Release);
1963 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1964 AtomicOrdering::Monotonic);
1966 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1968 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1970 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1972 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1974 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1976 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1977
1979 return EmitAtomicDecrementValue(*this, E);
1981 return EmitAtomicIncrementValue(*this, E);
1982
1984 // Request immediate process termination from the kernel. The instruction
1985 // sequences to do this are documented on MSDN:
1986 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1987 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1988 StringRef Asm, Constraints;
1989 switch (ISA) {
1990 default:
1991 ErrorUnsupported(E, "__fastfail call for this architecture");
1992 break;
1993 case llvm::Triple::x86:
1994 case llvm::Triple::x86_64:
1995 Asm = "int $$0x29";
1996 Constraints = "{cx}";
1997 break;
1998 case llvm::Triple::thumb:
1999 Asm = "udf #251";
2000 Constraints = "{r0}";
2001 break;
2002 case llvm::Triple::aarch64:
2003 Asm = "brk #0xF003";
2004 Constraints = "{w0}";
2005 }
2006 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
2007 llvm::InlineAsm *IA =
2008 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
2009 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
2010 getLLVMContext(), llvm::AttributeList::FunctionIndex,
2011 llvm::Attribute::NoReturn);
2012 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
2013 CI->setAttributes(NoReturnAttr);
2014 return CI;
2015 }
2016 }
2017 llvm_unreachable("Incorrect MSVC intrinsic!");
2018}
2019
2020namespace {
2021// ARC cleanup for __builtin_os_log_format
2022struct CallObjCArcUse final : EHScopeStack::Cleanup {
2023 CallObjCArcUse(llvm::Value *object) : object(object) {}
2024 llvm::Value *object;
2025
2026 void Emit(CodeGenFunction &CGF, Flags flags) override {
2027 CGF.EmitARCIntrinsicUse(object);
2028 }
2029};
2030}
2031
2033 BuiltinCheckKind Kind) {
2034 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) &&
2035 "Unsupported builtin check kind");
2036
2037 Value *ArgValue = EmitBitCountExpr(*this, E);
2038 if (!SanOpts.has(SanitizerKind::Builtin))
2039 return ArgValue;
2040
2041 auto CheckOrdinal = SanitizerKind::SO_Builtin;
2042 auto CheckHandler = SanitizerHandler::InvalidBuiltin;
2043 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2044 Value *Cond = Builder.CreateICmpNE(
2045 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
2046 EmitCheck(std::make_pair(Cond, CheckOrdinal), CheckHandler,
2048 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
2049 {});
2050 return ArgValue;
2051}
2052
2054 Value *ArgValue = EvaluateExprAsBool(E);
2055 if (!SanOpts.has(SanitizerKind::Builtin))
2056 return ArgValue;
2057
2058 auto CheckOrdinal = SanitizerKind::SO_Builtin;
2059 auto CheckHandler = SanitizerHandler::InvalidBuiltin;
2060 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2061 EmitCheck(
2062 std::make_pair(ArgValue, CheckOrdinal), CheckHandler,
2064 llvm::ConstantInt::get(Builder.getInt8Ty(), BCK_AssumePassedFalse)},
2065 {});
2066 return ArgValue;
2067}
2068
2069static Value *EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW) {
2070 return CGF.Builder.CreateBinaryIntrinsic(
2071 Intrinsic::abs, ArgValue,
2072 ConstantInt::get(CGF.Builder.getInt1Ty(), HasNSW));
2073}
2074
2076 bool SanitizeOverflow) {
2077 Value *ArgValue = CGF.EmitScalarExpr(E->getArg(0));
2078
2079 // Try to eliminate overflow check.
2080 if (const auto *VCI = dyn_cast<llvm::ConstantInt>(ArgValue)) {
2081 if (!VCI->isMinSignedValue())
2082 return EmitAbs(CGF, ArgValue, true);
2083 }
2084
2086 SanitizerHandler CheckHandler;
2087 if (SanitizeOverflow) {
2088 Ordinals.push_back(SanitizerKind::SO_SignedIntegerOverflow);
2089 CheckHandler = SanitizerHandler::NegateOverflow;
2090 } else
2091 CheckHandler = SanitizerHandler::SubOverflow;
2092
2093 SanitizerDebugLocation SanScope(&CGF, Ordinals, CheckHandler);
2094
2095 Constant *Zero = Constant::getNullValue(ArgValue->getType());
2096 Value *ResultAndOverflow = CGF.Builder.CreateBinaryIntrinsic(
2097 Intrinsic::ssub_with_overflow, Zero, ArgValue);
2098 Value *Result = CGF.Builder.CreateExtractValue(ResultAndOverflow, 0);
2099 Value *NotOverflow = CGF.Builder.CreateNot(
2100 CGF.Builder.CreateExtractValue(ResultAndOverflow, 1));
2101
2102 // TODO: support -ftrapv-handler.
2103 if (SanitizeOverflow) {
2104 CGF.EmitCheck({{NotOverflow, SanitizerKind::SO_SignedIntegerOverflow}},
2105 CheckHandler,
2108 {ArgValue});
2109 } else
2110 CGF.EmitTrapCheck(NotOverflow, CheckHandler);
2111
2112 Value *CmpResult = CGF.Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
2113 return CGF.Builder.CreateSelect(CmpResult, Result, ArgValue, "abs");
2114}
2115
2116/// Get the argument type for arguments to os_log_helper.
2118 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
2119 return C.getCanonicalType(UnsignedTy);
2120}
2121
2124 CharUnits BufferAlignment) {
2125 ASTContext &Ctx = getContext();
2126
2128 {
2129 raw_svector_ostream OS(Name);
2130 OS << "__os_log_helper";
2131 OS << "_" << BufferAlignment.getQuantity();
2132 OS << "_" << int(Layout.getSummaryByte());
2133 OS << "_" << int(Layout.getNumArgsByte());
2134 for (const auto &Item : Layout.Items)
2135 OS << "_" << int(Item.getSizeByte()) << "_"
2136 << int(Item.getDescriptorByte());
2137 }
2138
2139 if (llvm::Function *F = CGM.getModule().getFunction(Name))
2140 return F;
2141
2143 FunctionArgList Args;
2144 Args.push_back(ImplicitParamDecl::Create(
2145 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
2147 ArgTys.emplace_back(Ctx.VoidPtrTy);
2148
2149 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
2150 char Size = Layout.Items[I].getSizeByte();
2151 if (!Size)
2152 continue;
2153
2154 QualType ArgTy = getOSLogArgType(Ctx, Size);
2155 Args.push_back(ImplicitParamDecl::Create(
2156 Ctx, nullptr, SourceLocation(),
2157 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
2159 ArgTys.emplace_back(ArgTy);
2160 }
2161
2162 QualType ReturnTy = Ctx.VoidTy;
2163
2164 // The helper function has linkonce_odr linkage to enable the linker to merge
2165 // identical functions. To ensure the merging always happens, 'noinline' is
2166 // attached to the function when compiling with -Oz.
2167 const CGFunctionInfo &FI =
2168 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
2169 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
2170 llvm::Function *Fn = llvm::Function::Create(
2171 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
2172 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
2173 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn, /*IsThunk=*/false);
2174 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
2175 Fn->setDoesNotThrow();
2176
2177 // Attach 'noinline' at -Oz.
2178 if (CGM.getCodeGenOpts().OptimizeSize == 2)
2179 Fn->addFnAttr(llvm::Attribute::NoInline);
2180
2181 auto NL = ApplyDebugLocation::CreateEmpty(*this);
2182 StartFunction(GlobalDecl(), ReturnTy, Fn, FI, Args);
2183
2184 // Create a scope with an artificial location for the body of this function.
2185 auto AL = ApplyDebugLocation::CreateArtificial(*this);
2186
2187 CharUnits Offset;
2189 Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), Ctx.VoidTy,
2190 BufferAlignment);
2191 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
2192 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
2193 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
2194 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
2195
2196 unsigned I = 1;
2197 for (const auto &Item : Layout.Items) {
2198 Builder.CreateStore(
2199 Builder.getInt8(Item.getDescriptorByte()),
2200 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
2201 Builder.CreateStore(
2202 Builder.getInt8(Item.getSizeByte()),
2203 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
2204
2205 CharUnits Size = Item.size();
2206 if (!Size.getQuantity())
2207 continue;
2208
2209 Address Arg = GetAddrOfLocalVar(Args[I]);
2210 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
2211 Addr = Addr.withElementType(Arg.getElementType());
2212 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
2213 Offset += Size;
2214 ++I;
2215 }
2216
2218
2219 return Fn;
2220}
2221
2223 assert(E.getNumArgs() >= 2 &&
2224 "__builtin_os_log_format takes at least 2 arguments");
2225 ASTContext &Ctx = getContext();
2228 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
2229
2230 // Ignore argument 1, the format string. It is not currently used.
2231 CallArgList Args;
2232 Args.add(RValue::get(BufAddr.emitRawPointer(*this)), Ctx.VoidPtrTy);
2233
2234 for (const auto &Item : Layout.Items) {
2235 int Size = Item.getSizeByte();
2236 if (!Size)
2237 continue;
2238
2239 llvm::Value *ArgVal;
2240
2241 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
2242 uint64_t Val = 0;
2243 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
2244 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
2245 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
2246 } else if (const Expr *TheExpr = Item.getExpr()) {
2247 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
2248
2249 // If a temporary object that requires destruction after the full
2250 // expression is passed, push a lifetime-extended cleanup to extend its
2251 // lifetime to the end of the enclosing block scope.
2252 auto LifetimeExtendObject = [&](const Expr *E) {
2253 E = E->IgnoreParenCasts();
2254 // Extend lifetimes of objects returned by function calls and message
2255 // sends.
2256
2257 // FIXME: We should do this in other cases in which temporaries are
2258 // created including arguments of non-ARC types (e.g., C++
2259 // temporaries).
2261 return true;
2262 return false;
2263 };
2264
2265 if (TheExpr->getType()->isObjCRetainableType() &&
2266 getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
2267 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
2268 "Only scalar can be a ObjC retainable type");
2269 if (!isa<Constant>(ArgVal)) {
2270 CleanupKind Cleanup = getARCCleanupKind();
2271 QualType Ty = TheExpr->getType();
2273 RawAddress Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
2274 ArgVal = EmitARCRetain(Ty, ArgVal);
2275 Builder.CreateStore(ArgVal, Addr);
2276 pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
2278 Cleanup & EHCleanup);
2279
2280 // Push a clang.arc.use call to ensure ARC optimizer knows that the
2281 // argument has to be alive.
2282 if (CGM.getCodeGenOpts().OptimizationLevel != 0)
2284 }
2285 }
2286 } else {
2287 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
2288 }
2289
2290 unsigned ArgValSize =
2291 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
2292 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
2293 ArgValSize);
2294 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
2295 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
2296 // If ArgVal has type x86_fp80, zero-extend ArgVal.
2297 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
2298 Args.add(RValue::get(ArgVal), ArgTy);
2299 }
2300
2301 const CGFunctionInfo &FI =
2302 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
2303 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
2304 Layout, BufAddr.getAlignment());
2306 return RValue::get(BufAddr, *this);
2307}
2308
2310 unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info,
2311 WidthAndSignedness ResultInfo) {
2312 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2313 Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width &&
2314 !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed;
2315}
2316
2318 CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info,
2319 const clang::Expr *Op2, WidthAndSignedness Op2Info,
2320 const clang::Expr *ResultArg, QualType ResultQTy,
2321 WidthAndSignedness ResultInfo) {
2323 Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&
2324 "Cannot specialize this multiply");
2325
2326 llvm::Value *V1 = CGF.EmitScalarExpr(Op1);
2327 llvm::Value *V2 = CGF.EmitScalarExpr(Op2);
2328
2329 llvm::Value *HasOverflow;
2330 llvm::Value *Result = EmitOverflowIntrinsic(
2331 CGF, Intrinsic::umul_with_overflow, V1, V2, HasOverflow);
2332
2333 // The intrinsic call will detect overflow when the value is > UINT_MAX,
2334 // however, since the original builtin had a signed result, we need to report
2335 // an overflow when the result is greater than INT_MAX.
2336 auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width);
2337 llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax);
2338
2339 llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(Result, IntMaxValue);
2340 HasOverflow = CGF.Builder.CreateOr(HasOverflow, IntMaxOverflow);
2341
2342 bool isVolatile =
2343 ResultArg->getType()->getPointeeType().isVolatileQualified();
2344 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2345 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2346 isVolatile);
2347 return RValue::get(HasOverflow);
2348}
2349
2350/// Determine if a binop is a checked mixed-sign multiply we can specialize.
2351static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
2352 WidthAndSignedness Op1Info,
2353 WidthAndSignedness Op2Info,
2354 WidthAndSignedness ResultInfo) {
2355 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2356 std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
2357 Op1Info.Signed != Op2Info.Signed;
2358}
2359
2360/// Emit a checked mixed-sign multiply. This is a cheaper specialization of
2361/// the generic checked-binop irgen.
2362static RValue
2364 WidthAndSignedness Op1Info, const clang::Expr *Op2,
2365 WidthAndSignedness Op2Info,
2366 const clang::Expr *ResultArg, QualType ResultQTy,
2367 WidthAndSignedness ResultInfo) {
2368 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,
2369 Op2Info, ResultInfo) &&
2370 "Not a mixed-sign multipliction we can specialize");
2371
2372 // Emit the signed and unsigned operands.
2373 const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
2374 const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
2375 llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
2376 llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
2377 unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
2378 unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
2379
2380 // One of the operands may be smaller than the other. If so, [s|z]ext it.
2381 if (SignedOpWidth < UnsignedOpWidth)
2382 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
2383 if (UnsignedOpWidth < SignedOpWidth)
2384 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
2385
2386 llvm::Type *OpTy = Signed->getType();
2387 llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
2388 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2389 llvm::Type *ResTy = CGF.getTypes().ConvertType(ResultQTy);
2390 unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
2391
2392 // Take the absolute value of the signed operand.
2393 llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
2394 llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
2395 llvm::Value *AbsSigned =
2396 CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
2397
2398 // Perform a checked unsigned multiplication.
2399 llvm::Value *UnsignedOverflow;
2400 llvm::Value *UnsignedResult =
2401 EmitOverflowIntrinsic(CGF, Intrinsic::umul_with_overflow, AbsSigned,
2402 Unsigned, UnsignedOverflow);
2403
2404 llvm::Value *Overflow, *Result;
2405 if (ResultInfo.Signed) {
2406 // Signed overflow occurs if the result is greater than INT_MAX or lesser
2407 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
2408 auto IntMax =
2409 llvm::APInt::getSignedMaxValue(ResultInfo.Width).zext(OpWidth);
2410 llvm::Value *MaxResult =
2411 CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
2412 CGF.Builder.CreateZExt(IsNegative, OpTy));
2413 llvm::Value *SignedOverflow =
2414 CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
2415 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
2416
2417 // Prepare the signed result (possibly by negating it).
2418 llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
2419 llvm::Value *SignedResult =
2420 CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
2421 Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
2422 } else {
2423 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
2424 llvm::Value *Underflow = CGF.Builder.CreateAnd(
2425 IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
2426 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
2427 if (ResultInfo.Width < OpWidth) {
2428 auto IntMax =
2429 llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
2430 llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
2431 UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
2432 Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
2433 }
2434
2435 // Negate the product if it would be negative in infinite precision.
2436 Result = CGF.Builder.CreateSelect(
2437 IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
2438
2439 Result = CGF.Builder.CreateTrunc(Result, ResTy);
2440 }
2441 assert(Overflow && Result && "Missing overflow or result");
2442
2443 bool isVolatile =
2444 ResultArg->getType()->getPointeeType().isVolatileQualified();
2445 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2446 isVolatile);
2447 return RValue::get(Overflow);
2448}
2449
2450static bool
2452 llvm::SmallPtrSetImpl<const Decl *> &Seen) {
2453 if (const auto *Arr = Ctx.getAsArrayType(Ty))
2454 Ty = Ctx.getBaseElementType(Arr);
2455
2456 const auto *Record = Ty->getAsCXXRecordDecl();
2457 if (!Record)
2458 return false;
2459
2460 // We've already checked this type, or are in the process of checking it.
2461 if (!Seen.insert(Record).second)
2462 return false;
2463
2464 assert(Record->hasDefinition() &&
2465 "Incomplete types should already be diagnosed");
2466
2467 if (Record->isDynamicClass())
2468 return true;
2469
2470 for (FieldDecl *F : Record->fields()) {
2471 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
2472 return true;
2473 }
2474 return false;
2475}
2476
2477/// Determine if the specified type requires laundering by checking if it is a
2478/// dynamic class type or contains a subobject which is a dynamic class type.
2480 if (!CGM.getCodeGenOpts().StrictVTablePointers)
2481 return false;
2483 return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
2484}
2485
2486RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
2487 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
2488 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
2489
2490 // The builtin's shift arg may have a different type than the source arg and
2491 // result, but the LLVM intrinsic uses the same type for all values.
2492 llvm::Type *Ty = Src->getType();
2493 ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
2494
2495 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
2496 unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
2497 Function *F = CGM.getIntrinsic(IID, Ty);
2498 return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
2499}
2500
2501// Map math builtins for long-double to f128 version.
2502static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {
2503 switch (BuiltinID) {
2504#define MUTATE_LDBL(func) \
2505 case Builtin::BI__builtin_##func##l: \
2506 return Builtin::BI__builtin_##func##f128;
2537 MUTATE_LDBL(nans)
2538 MUTATE_LDBL(inf)
2557 MUTATE_LDBL(huge_val)
2567#undef MUTATE_LDBL
2568 default:
2569 return BuiltinID;
2570 }
2571}
2572
2573static Value *tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID,
2574 Value *V) {
2575 if (CGF.Builder.getIsFPConstrained() &&
2576 CGF.Builder.getDefaultConstrainedExcept() != fp::ebIgnore) {
2577 if (Value *Result =
2578 CGF.getTargetHooks().testFPKind(V, BuiltinID, CGF.Builder, CGF.CGM))
2579 return Result;
2580 }
2581 return nullptr;
2582}
2583
2585 const FunctionDecl *FD) {
2586 auto Name = FD->getNameAsString() + "__hipstdpar_unsupported";
2587 auto FnTy = CGF->CGM.getTypes().GetFunctionType(FD);
2588 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2589
2591 for (auto &&FormalTy : FnTy->params())
2592 Args.push_back(llvm::PoisonValue::get(FormalTy));
2593
2594 return RValue::get(CGF->Builder.CreateCall(UBF, Args));
2595}
2596
2598 const CallExpr *E,
2600 assert(!getContext().BuiltinInfo.isImmediate(BuiltinID) &&
2601 "Should not codegen for consteval builtins");
2602
2603 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
2604 // See if we can constant fold this builtin. If so, don't emit it at all.
2605 // TODO: Extend this handling to all builtin calls that we can constant-fold.
2607 if (E->isPRValue() && E->EvaluateAsRValue(Result, CGM.getContext()) &&
2608 !Result.hasSideEffects()) {
2609 if (Result.Val.isInt())
2610 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
2611 Result.Val.getInt()));
2612 if (Result.Val.isFloat())
2613 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
2614 Result.Val.getFloat()));
2615 }
2616
2617 // If current long-double semantics is IEEE 128-bit, replace math builtins
2618 // of long-double with f128 equivalent.
2619 // TODO: This mutation should also be applied to other targets other than PPC,
2620 // after backend supports IEEE 128-bit style libcalls.
2621 if (getTarget().getTriple().isPPC64() &&
2622 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
2623 BuiltinID = mutateLongDoubleBuiltin(BuiltinID);
2624
2625 // If the builtin has been declared explicitly with an assembler label,
2626 // disable the specialized emitting below. Ideally we should communicate the
2627 // rename in IR, or at least avoid generating the intrinsic calls that are
2628 // likely to get lowered to the renamed library functions.
2629 const unsigned BuiltinIDIfNoAsmLabel =
2630 FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID;
2631
2632 std::optional<bool> ErrnoOverriden;
2633 // ErrnoOverriden is true if math-errno is overriden via the
2634 // '#pragma float_control(precise, on)'. This pragma disables fast-math,
2635 // which implies math-errno.
2636 if (E->hasStoredFPFeatures()) {
2638 if (OP.hasMathErrnoOverride())
2639 ErrnoOverriden = OP.getMathErrnoOverride();
2640 }
2641 // True if 'attribute__((optnone))' is used. This attribute overrides
2642 // fast-math which implies math-errno.
2643 bool OptNone = CurFuncDecl && CurFuncDecl->hasAttr<OptimizeNoneAttr>();
2644
2645 bool IsOptimizationEnabled = CGM.getCodeGenOpts().OptimizationLevel != 0;
2646
2647 bool GenerateFPMathIntrinsics =
2649 BuiltinID, CGM.getTriple(), ErrnoOverriden, getLangOpts().MathErrno,
2650 OptNone, IsOptimizationEnabled);
2651
2652 if (GenerateFPMathIntrinsics) {
2653 switch (BuiltinIDIfNoAsmLabel) {
2654 case Builtin::BIacos:
2655 case Builtin::BIacosf:
2656 case Builtin::BIacosl:
2657 case Builtin::BI__builtin_acos:
2658 case Builtin::BI__builtin_acosf:
2659 case Builtin::BI__builtin_acosf16:
2660 case Builtin::BI__builtin_acosl:
2661 case Builtin::BI__builtin_acosf128:
2662 case Builtin::BI__builtin_elementwise_acos:
2664 *this, E, Intrinsic::acos, Intrinsic::experimental_constrained_acos));
2665
2666 case Builtin::BIasin:
2667 case Builtin::BIasinf:
2668 case Builtin::BIasinl:
2669 case Builtin::BI__builtin_asin:
2670 case Builtin::BI__builtin_asinf:
2671 case Builtin::BI__builtin_asinf16:
2672 case Builtin::BI__builtin_asinl:
2673 case Builtin::BI__builtin_asinf128:
2674 case Builtin::BI__builtin_elementwise_asin:
2676 *this, E, Intrinsic::asin, Intrinsic::experimental_constrained_asin));
2677
2678 case Builtin::BIatan:
2679 case Builtin::BIatanf:
2680 case Builtin::BIatanl:
2681 case Builtin::BI__builtin_atan:
2682 case Builtin::BI__builtin_atanf:
2683 case Builtin::BI__builtin_atanf16:
2684 case Builtin::BI__builtin_atanl:
2685 case Builtin::BI__builtin_atanf128:
2686 case Builtin::BI__builtin_elementwise_atan:
2688 *this, E, Intrinsic::atan, Intrinsic::experimental_constrained_atan));
2689
2690 case Builtin::BIatan2:
2691 case Builtin::BIatan2f:
2692 case Builtin::BIatan2l:
2693 case Builtin::BI__builtin_atan2:
2694 case Builtin::BI__builtin_atan2f:
2695 case Builtin::BI__builtin_atan2f16:
2696 case Builtin::BI__builtin_atan2l:
2697 case Builtin::BI__builtin_atan2f128:
2698 case Builtin::BI__builtin_elementwise_atan2:
2700 *this, E, Intrinsic::atan2,
2701 Intrinsic::experimental_constrained_atan2));
2702
2703 case Builtin::BIceil:
2704 case Builtin::BIceilf:
2705 case Builtin::BIceill:
2706 case Builtin::BI__builtin_ceil:
2707 case Builtin::BI__builtin_ceilf:
2708 case Builtin::BI__builtin_ceilf16:
2709 case Builtin::BI__builtin_ceill:
2710 case Builtin::BI__builtin_ceilf128:
2711 case Builtin::BI__builtin_elementwise_ceil:
2713 Intrinsic::ceil,
2714 Intrinsic::experimental_constrained_ceil));
2715
2716 case Builtin::BIcopysign:
2717 case Builtin::BIcopysignf:
2718 case Builtin::BIcopysignl:
2719 case Builtin::BI__builtin_copysign:
2720 case Builtin::BI__builtin_copysignf:
2721 case Builtin::BI__builtin_copysignf16:
2722 case Builtin::BI__builtin_copysignl:
2723 case Builtin::BI__builtin_copysignf128:
2724 return RValue::get(
2725 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::copysign));
2726
2727 case Builtin::BIcos:
2728 case Builtin::BIcosf:
2729 case Builtin::BIcosl:
2730 case Builtin::BI__builtin_cos:
2731 case Builtin::BI__builtin_cosf:
2732 case Builtin::BI__builtin_cosf16:
2733 case Builtin::BI__builtin_cosl:
2734 case Builtin::BI__builtin_cosf128:
2735 case Builtin::BI__builtin_elementwise_cos:
2737 Intrinsic::cos,
2738 Intrinsic::experimental_constrained_cos));
2739
2740 case Builtin::BIcosh:
2741 case Builtin::BIcoshf:
2742 case Builtin::BIcoshl:
2743 case Builtin::BI__builtin_cosh:
2744 case Builtin::BI__builtin_coshf:
2745 case Builtin::BI__builtin_coshf16:
2746 case Builtin::BI__builtin_coshl:
2747 case Builtin::BI__builtin_coshf128:
2748 case Builtin::BI__builtin_elementwise_cosh:
2750 *this, E, Intrinsic::cosh, Intrinsic::experimental_constrained_cosh));
2751
2752 case Builtin::BIexp:
2753 case Builtin::BIexpf:
2754 case Builtin::BIexpl:
2755 case Builtin::BI__builtin_exp:
2756 case Builtin::BI__builtin_expf:
2757 case Builtin::BI__builtin_expf16:
2758 case Builtin::BI__builtin_expl:
2759 case Builtin::BI__builtin_expf128:
2760 case Builtin::BI__builtin_elementwise_exp:
2762 Intrinsic::exp,
2763 Intrinsic::experimental_constrained_exp));
2764
2765 case Builtin::BIexp2:
2766 case Builtin::BIexp2f:
2767 case Builtin::BIexp2l:
2768 case Builtin::BI__builtin_exp2:
2769 case Builtin::BI__builtin_exp2f:
2770 case Builtin::BI__builtin_exp2f16:
2771 case Builtin::BI__builtin_exp2l:
2772 case Builtin::BI__builtin_exp2f128:
2773 case Builtin::BI__builtin_elementwise_exp2:
2775 Intrinsic::exp2,
2776 Intrinsic::experimental_constrained_exp2));
2777 case Builtin::BI__builtin_exp10:
2778 case Builtin::BI__builtin_exp10f:
2779 case Builtin::BI__builtin_exp10f16:
2780 case Builtin::BI__builtin_exp10l:
2781 case Builtin::BI__builtin_exp10f128:
2782 case Builtin::BI__builtin_elementwise_exp10: {
2783 // TODO: strictfp support
2784 if (Builder.getIsFPConstrained())
2785 break;
2786 return RValue::get(
2787 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::exp10));
2788 }
2789 case Builtin::BIfabs:
2790 case Builtin::BIfabsf:
2791 case Builtin::BIfabsl:
2792 case Builtin::BI__builtin_fabs:
2793 case Builtin::BI__builtin_fabsf:
2794 case Builtin::BI__builtin_fabsf16:
2795 case Builtin::BI__builtin_fabsl:
2796 case Builtin::BI__builtin_fabsf128:
2797 return RValue::get(
2798 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::fabs));
2799
2800 case Builtin::BIfloor:
2801 case Builtin::BIfloorf:
2802 case Builtin::BIfloorl:
2803 case Builtin::BI__builtin_floor:
2804 case Builtin::BI__builtin_floorf:
2805 case Builtin::BI__builtin_floorf16:
2806 case Builtin::BI__builtin_floorl:
2807 case Builtin::BI__builtin_floorf128:
2808 case Builtin::BI__builtin_elementwise_floor:
2810 Intrinsic::floor,
2811 Intrinsic::experimental_constrained_floor));
2812
2813 case Builtin::BIfma:
2814 case Builtin::BIfmaf:
2815 case Builtin::BIfmal:
2816 case Builtin::BI__builtin_fma:
2817 case Builtin::BI__builtin_fmaf:
2818 case Builtin::BI__builtin_fmaf16:
2819 case Builtin::BI__builtin_fmal:
2820 case Builtin::BI__builtin_fmaf128:
2821 case Builtin::BI__builtin_elementwise_fma:
2823 Intrinsic::fma,
2824 Intrinsic::experimental_constrained_fma));
2825
2826 case Builtin::BIfmax:
2827 case Builtin::BIfmaxf:
2828 case Builtin::BIfmaxl:
2829 case Builtin::BI__builtin_fmax:
2830 case Builtin::BI__builtin_fmaxf:
2831 case Builtin::BI__builtin_fmaxf16:
2832 case Builtin::BI__builtin_fmaxl:
2833 case Builtin::BI__builtin_fmaxf128:
2835 Intrinsic::maxnum,
2836 Intrinsic::experimental_constrained_maxnum));
2837
2838 case Builtin::BIfmin:
2839 case Builtin::BIfminf:
2840 case Builtin::BIfminl:
2841 case Builtin::BI__builtin_fmin:
2842 case Builtin::BI__builtin_fminf:
2843 case Builtin::BI__builtin_fminf16:
2844 case Builtin::BI__builtin_fminl:
2845 case Builtin::BI__builtin_fminf128:
2847 Intrinsic::minnum,
2848 Intrinsic::experimental_constrained_minnum));
2849
2850 case Builtin::BIfmaximum_num:
2851 case Builtin::BIfmaximum_numf:
2852 case Builtin::BIfmaximum_numl:
2853 case Builtin::BI__builtin_fmaximum_num:
2854 case Builtin::BI__builtin_fmaximum_numf:
2855 case Builtin::BI__builtin_fmaximum_numf16:
2856 case Builtin::BI__builtin_fmaximum_numl:
2857 case Builtin::BI__builtin_fmaximum_numf128:
2858 return RValue::get(
2859 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::maximumnum));
2860
2861 case Builtin::BIfminimum_num:
2862 case Builtin::BIfminimum_numf:
2863 case Builtin::BIfminimum_numl:
2864 case Builtin::BI__builtin_fminimum_num:
2865 case Builtin::BI__builtin_fminimum_numf:
2866 case Builtin::BI__builtin_fminimum_numf16:
2867 case Builtin::BI__builtin_fminimum_numl:
2868 case Builtin::BI__builtin_fminimum_numf128:
2869 return RValue::get(
2870 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::minimumnum));
2871
2872 // fmod() is a special-case. It maps to the frem instruction rather than an
2873 // LLVM intrinsic.
2874 case Builtin::BIfmod:
2875 case Builtin::BIfmodf:
2876 case Builtin::BIfmodl:
2877 case Builtin::BI__builtin_fmod:
2878 case Builtin::BI__builtin_fmodf:
2879 case Builtin::BI__builtin_fmodf16:
2880 case Builtin::BI__builtin_fmodl:
2881 case Builtin::BI__builtin_fmodf128:
2882 case Builtin::BI__builtin_elementwise_fmod: {
2883 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2884 Value *Arg1 = EmitScalarExpr(E->getArg(0));
2885 Value *Arg2 = EmitScalarExpr(E->getArg(1));
2886 if (Builder.getIsFPConstrained()) {
2887 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_frem,
2888 Arg1->getType());
2889 return RValue::get(Builder.CreateConstrainedFPCall(F, {Arg1, Arg2}));
2890 } else {
2891 return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
2892 }
2893 }
2894
2895 case Builtin::BIlog:
2896 case Builtin::BIlogf:
2897 case Builtin::BIlogl:
2898 case Builtin::BI__builtin_log:
2899 case Builtin::BI__builtin_logf:
2900 case Builtin::BI__builtin_logf16:
2901 case Builtin::BI__builtin_logl:
2902 case Builtin::BI__builtin_logf128:
2903 case Builtin::BI__builtin_elementwise_log:
2905 Intrinsic::log,
2906 Intrinsic::experimental_constrained_log));
2907
2908 case Builtin::BIlog10:
2909 case Builtin::BIlog10f:
2910 case Builtin::BIlog10l:
2911 case Builtin::BI__builtin_log10:
2912 case Builtin::BI__builtin_log10f:
2913 case Builtin::BI__builtin_log10f16:
2914 case Builtin::BI__builtin_log10l:
2915 case Builtin::BI__builtin_log10f128:
2916 case Builtin::BI__builtin_elementwise_log10:
2918 Intrinsic::log10,
2919 Intrinsic::experimental_constrained_log10));
2920
2921 case Builtin::BIlog2:
2922 case Builtin::BIlog2f:
2923 case Builtin::BIlog2l:
2924 case Builtin::BI__builtin_log2:
2925 case Builtin::BI__builtin_log2f:
2926 case Builtin::BI__builtin_log2f16:
2927 case Builtin::BI__builtin_log2l:
2928 case Builtin::BI__builtin_log2f128:
2929 case Builtin::BI__builtin_elementwise_log2:
2931 Intrinsic::log2,
2932 Intrinsic::experimental_constrained_log2));
2933
2934 case Builtin::BInearbyint:
2935 case Builtin::BInearbyintf:
2936 case Builtin::BInearbyintl:
2937 case Builtin::BI__builtin_nearbyint:
2938 case Builtin::BI__builtin_nearbyintf:
2939 case Builtin::BI__builtin_nearbyintl:
2940 case Builtin::BI__builtin_nearbyintf128:
2941 case Builtin::BI__builtin_elementwise_nearbyint:
2943 Intrinsic::nearbyint,
2944 Intrinsic::experimental_constrained_nearbyint));
2945
2946 case Builtin::BIpow:
2947 case Builtin::BIpowf:
2948 case Builtin::BIpowl:
2949 case Builtin::BI__builtin_pow:
2950 case Builtin::BI__builtin_powf:
2951 case Builtin::BI__builtin_powf16:
2952 case Builtin::BI__builtin_powl:
2953 case Builtin::BI__builtin_powf128:
2954 case Builtin::BI__builtin_elementwise_pow:
2956 Intrinsic::pow,
2957 Intrinsic::experimental_constrained_pow));
2958
2959 case Builtin::BIrint:
2960 case Builtin::BIrintf:
2961 case Builtin::BIrintl:
2962 case Builtin::BI__builtin_rint:
2963 case Builtin::BI__builtin_rintf:
2964 case Builtin::BI__builtin_rintf16:
2965 case Builtin::BI__builtin_rintl:
2966 case Builtin::BI__builtin_rintf128:
2967 case Builtin::BI__builtin_elementwise_rint:
2969 Intrinsic::rint,
2970 Intrinsic::experimental_constrained_rint));
2971
2972 case Builtin::BIround:
2973 case Builtin::BIroundf:
2974 case Builtin::BIroundl:
2975 case Builtin::BI__builtin_round:
2976 case Builtin::BI__builtin_roundf:
2977 case Builtin::BI__builtin_roundf16:
2978 case Builtin::BI__builtin_roundl:
2979 case Builtin::BI__builtin_roundf128:
2980 case Builtin::BI__builtin_elementwise_round:
2982 Intrinsic::round,
2983 Intrinsic::experimental_constrained_round));
2984
2985 case Builtin::BIroundeven:
2986 case Builtin::BIroundevenf:
2987 case Builtin::BIroundevenl:
2988 case Builtin::BI__builtin_roundeven:
2989 case Builtin::BI__builtin_roundevenf:
2990 case Builtin::BI__builtin_roundevenf16:
2991 case Builtin::BI__builtin_roundevenl:
2992 case Builtin::BI__builtin_roundevenf128:
2993 case Builtin::BI__builtin_elementwise_roundeven:
2995 Intrinsic::roundeven,
2996 Intrinsic::experimental_constrained_roundeven));
2997
2998 case Builtin::BIsin:
2999 case Builtin::BIsinf:
3000 case Builtin::BIsinl:
3001 case Builtin::BI__builtin_sin:
3002 case Builtin::BI__builtin_sinf:
3003 case Builtin::BI__builtin_sinf16:
3004 case Builtin::BI__builtin_sinl:
3005 case Builtin::BI__builtin_sinf128:
3006 case Builtin::BI__builtin_elementwise_sin:
3008 Intrinsic::sin,
3009 Intrinsic::experimental_constrained_sin));
3010
3011 case Builtin::BIsinh:
3012 case Builtin::BIsinhf:
3013 case Builtin::BIsinhl:
3014 case Builtin::BI__builtin_sinh:
3015 case Builtin::BI__builtin_sinhf:
3016 case Builtin::BI__builtin_sinhf16:
3017 case Builtin::BI__builtin_sinhl:
3018 case Builtin::BI__builtin_sinhf128:
3019 case Builtin::BI__builtin_elementwise_sinh:
3021 *this, E, Intrinsic::sinh, Intrinsic::experimental_constrained_sinh));
3022
3023 case Builtin::BI__builtin_sincospi:
3024 case Builtin::BI__builtin_sincospif:
3025 case Builtin::BI__builtin_sincospil:
3026 if (Builder.getIsFPConstrained())
3027 break; // TODO: Emit constrained sincospi intrinsic once one exists.
3028 emitSincosBuiltin(*this, E, Intrinsic::sincospi);
3029 return RValue::get(nullptr);
3030
3031 case Builtin::BIsincos:
3032 case Builtin::BIsincosf:
3033 case Builtin::BIsincosl:
3034 case Builtin::BI__builtin_sincos:
3035 case Builtin::BI__builtin_sincosf:
3036 case Builtin::BI__builtin_sincosf16:
3037 case Builtin::BI__builtin_sincosl:
3038 case Builtin::BI__builtin_sincosf128:
3039 if (Builder.getIsFPConstrained())
3040 break; // TODO: Emit constrained sincos intrinsic once one exists.
3041 emitSincosBuiltin(*this, E, Intrinsic::sincos);
3042 return RValue::get(nullptr);
3043
3044 case Builtin::BIsqrt:
3045 case Builtin::BIsqrtf:
3046 case Builtin::BIsqrtl:
3047 case Builtin::BI__builtin_sqrt:
3048 case Builtin::BI__builtin_sqrtf:
3049 case Builtin::BI__builtin_sqrtf16:
3050 case Builtin::BI__builtin_sqrtl:
3051 case Builtin::BI__builtin_sqrtf128:
3052 case Builtin::BI__builtin_elementwise_sqrt: {
3054 *this, E, Intrinsic::sqrt, Intrinsic::experimental_constrained_sqrt);
3056 return RValue::get(Call);
3057 }
3058
3059 case Builtin::BItan:
3060 case Builtin::BItanf:
3061 case Builtin::BItanl:
3062 case Builtin::BI__builtin_tan:
3063 case Builtin::BI__builtin_tanf:
3064 case Builtin::BI__builtin_tanf16:
3065 case Builtin::BI__builtin_tanl:
3066 case Builtin::BI__builtin_tanf128:
3067 case Builtin::BI__builtin_elementwise_tan:
3069 *this, E, Intrinsic::tan, Intrinsic::experimental_constrained_tan));
3070
3071 case Builtin::BItanh:
3072 case Builtin::BItanhf:
3073 case Builtin::BItanhl:
3074 case Builtin::BI__builtin_tanh:
3075 case Builtin::BI__builtin_tanhf:
3076 case Builtin::BI__builtin_tanhf16:
3077 case Builtin::BI__builtin_tanhl:
3078 case Builtin::BI__builtin_tanhf128:
3079 case Builtin::BI__builtin_elementwise_tanh:
3081 *this, E, Intrinsic::tanh, Intrinsic::experimental_constrained_tanh));
3082
3083 case Builtin::BItrunc:
3084 case Builtin::BItruncf:
3085 case Builtin::BItruncl:
3086 case Builtin::BI__builtin_trunc:
3087 case Builtin::BI__builtin_truncf:
3088 case Builtin::BI__builtin_truncf16:
3089 case Builtin::BI__builtin_truncl:
3090 case Builtin::BI__builtin_truncf128:
3091 case Builtin::BI__builtin_elementwise_trunc:
3093 Intrinsic::trunc,
3094 Intrinsic::experimental_constrained_trunc));
3095
3096 case Builtin::BIlround:
3097 case Builtin::BIlroundf:
3098 case Builtin::BIlroundl:
3099 case Builtin::BI__builtin_lround:
3100 case Builtin::BI__builtin_lroundf:
3101 case Builtin::BI__builtin_lroundl:
3102 case Builtin::BI__builtin_lroundf128:
3104 *this, E, Intrinsic::lround,
3105 Intrinsic::experimental_constrained_lround));
3106
3107 case Builtin::BIllround:
3108 case Builtin::BIllroundf:
3109 case Builtin::BIllroundl:
3110 case Builtin::BI__builtin_llround:
3111 case Builtin::BI__builtin_llroundf:
3112 case Builtin::BI__builtin_llroundl:
3113 case Builtin::BI__builtin_llroundf128:
3115 *this, E, Intrinsic::llround,
3116 Intrinsic::experimental_constrained_llround));
3117
3118 case Builtin::BIlrint:
3119 case Builtin::BIlrintf:
3120 case Builtin::BIlrintl:
3121 case Builtin::BI__builtin_lrint:
3122 case Builtin::BI__builtin_lrintf:
3123 case Builtin::BI__builtin_lrintl:
3124 case Builtin::BI__builtin_lrintf128:
3126 *this, E, Intrinsic::lrint,
3127 Intrinsic::experimental_constrained_lrint));
3128
3129 case Builtin::BIllrint:
3130 case Builtin::BIllrintf:
3131 case Builtin::BIllrintl:
3132 case Builtin::BI__builtin_llrint:
3133 case Builtin::BI__builtin_llrintf:
3134 case Builtin::BI__builtin_llrintl:
3135 case Builtin::BI__builtin_llrintf128:
3137 *this, E, Intrinsic::llrint,
3138 Intrinsic::experimental_constrained_llrint));
3139 case Builtin::BI__builtin_ldexp:
3140 case Builtin::BI__builtin_ldexpf:
3141 case Builtin::BI__builtin_ldexpl:
3142 case Builtin::BI__builtin_ldexpf16:
3143 case Builtin::BI__builtin_ldexpf128:
3144 case Builtin::BI__builtin_elementwise_ldexp:
3146 *this, E, Intrinsic::ldexp,
3147 Intrinsic::experimental_constrained_ldexp));
3148 default:
3149 break;
3150 }
3151 }
3152
3153 // Check NonnullAttribute/NullabilityArg and Alignment.
3154 auto EmitArgCheck = [&](TypeCheckKind Kind, Address A, const Expr *Arg,
3155 unsigned ParmNum) {
3156 Value *Val = A.emitRawPointer(*this);
3157 EmitNonNullArgCheck(RValue::get(Val), Arg->getType(), Arg->getExprLoc(), FD,
3158 ParmNum);
3159
3160 if (SanOpts.has(SanitizerKind::Alignment)) {
3161 SanitizerSet SkippedChecks;
3162 SkippedChecks.set(SanitizerKind::All);
3163 SkippedChecks.clear(SanitizerKind::Alignment);
3164 SourceLocation Loc = Arg->getExprLoc();
3165 // Strip an implicit cast.
3166 if (auto *CE = dyn_cast<ImplicitCastExpr>(Arg))
3167 if (CE->getCastKind() == CK_BitCast)
3168 Arg = CE->getSubExpr();
3169 EmitTypeCheck(Kind, Loc, Val, Arg->getType(), A.getAlignment(),
3170 SkippedChecks);
3171 }
3172 };
3173
3174 switch (BuiltinIDIfNoAsmLabel) {
3175 default: break;
3176 case Builtin::BI__builtin___CFStringMakeConstantString:
3177 case Builtin::BI__builtin___NSStringMakeConstantString:
3178 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
3179 case Builtin::BI__builtin_stdarg_start:
3180 case Builtin::BI__builtin_va_start:
3181 case Builtin::BI__va_start:
3182 case Builtin::BI__builtin_c23_va_start:
3183 case Builtin::BI__builtin_va_end:
3184 EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
3185 ? EmitScalarExpr(E->getArg(0))
3186 : EmitVAListRef(E->getArg(0)).emitRawPointer(*this),
3187 BuiltinID != Builtin::BI__builtin_va_end);
3188 return RValue::get(nullptr);
3189 case Builtin::BI__builtin_va_copy: {
3190 Value *DstPtr = EmitVAListRef(E->getArg(0)).emitRawPointer(*this);
3191 Value *SrcPtr = EmitVAListRef(E->getArg(1)).emitRawPointer(*this);
3192 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy, {DstPtr->getType()}),
3193 {DstPtr, SrcPtr});
3194 return RValue::get(nullptr);
3195 }
3196 case Builtin::BIabs:
3197 case Builtin::BIlabs:
3198 case Builtin::BIllabs:
3199 case Builtin::BI__builtin_abs:
3200 case Builtin::BI__builtin_labs:
3201 case Builtin::BI__builtin_llabs: {
3202 bool SanitizeOverflow = SanOpts.has(SanitizerKind::SignedIntegerOverflow);
3203
3204 Value *Result;
3205 switch (getLangOpts().getSignedOverflowBehavior()) {
3207 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), false);
3208 break;
3210 if (!SanitizeOverflow) {
3211 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), true);
3212 break;
3213 }
3214 [[fallthrough]];
3216 // TODO: Somehow handle the corner case when the address of abs is taken.
3217 Result = EmitOverflowCheckedAbs(*this, E, SanitizeOverflow);
3218 break;
3219 }
3220 return RValue::get(Result);
3221 }
3222 case Builtin::BI__builtin_complex: {
3223 Value *Real = EmitScalarExpr(E->getArg(0));
3224 Value *Imag = EmitScalarExpr(E->getArg(1));
3225 return RValue::getComplex({Real, Imag});
3226 }
3227 case Builtin::BI__builtin_conj:
3228 case Builtin::BI__builtin_conjf:
3229 case Builtin::BI__builtin_conjl:
3230 case Builtin::BIconj:
3231 case Builtin::BIconjf:
3232 case Builtin::BIconjl: {
3233 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3234 Value *Real = ComplexVal.first;
3235 Value *Imag = ComplexVal.second;
3236 Imag = Builder.CreateFNeg(Imag, "neg");
3237 return RValue::getComplex(std::make_pair(Real, Imag));
3238 }
3239 case Builtin::BI__builtin_creal:
3240 case Builtin::BI__builtin_crealf:
3241 case Builtin::BI__builtin_creall:
3242 case Builtin::BIcreal:
3243 case Builtin::BIcrealf:
3244 case Builtin::BIcreall: {
3245 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3246 return RValue::get(ComplexVal.first);
3247 }
3248
3249 case Builtin::BI__builtin_preserve_access_index: {
3250 // Only enabled preserved access index region when debuginfo
3251 // is available as debuginfo is needed to preserve user-level
3252 // access pattern.
3253 if (!getDebugInfo()) {
3254 CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
3255 return RValue::get(EmitScalarExpr(E->getArg(0)));
3256 }
3257
3258 // Nested builtin_preserve_access_index() not supported
3260 CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
3261 return RValue::get(EmitScalarExpr(E->getArg(0)));
3262 }
3263
3264 IsInPreservedAIRegion = true;
3265 Value *Res = EmitScalarExpr(E->getArg(0));
3266 IsInPreservedAIRegion = false;
3267 return RValue::get(Res);
3268 }
3269
3270 case Builtin::BI__builtin_cimag:
3271 case Builtin::BI__builtin_cimagf:
3272 case Builtin::BI__builtin_cimagl:
3273 case Builtin::BIcimag:
3274 case Builtin::BIcimagf:
3275 case Builtin::BIcimagl: {
3276 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3277 return RValue::get(ComplexVal.second);
3278 }
3279
3280 case Builtin::BI__builtin_clrsb:
3281 case Builtin::BI__builtin_clrsbl:
3282 case Builtin::BI__builtin_clrsbll: {
3283 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
3284 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3285
3286 llvm::Type *ArgType = ArgValue->getType();
3287 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3288
3289 llvm::Type *ResultType = ConvertType(E->getType());
3290 Value *Zero = llvm::Constant::getNullValue(ArgType);
3291 Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
3292 Value *Inverse = Builder.CreateNot(ArgValue, "not");
3293 Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
3294 Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
3295 Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
3296 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3297 "cast");
3298 return RValue::get(Result);
3299 }
3300 case Builtin::BI__builtin_ctzs:
3301 case Builtin::BI__builtin_ctz:
3302 case Builtin::BI__builtin_ctzl:
3303 case Builtin::BI__builtin_ctzll:
3304 case Builtin::BI__builtin_ctzg:
3305 case Builtin::BI__builtin_elementwise_ctzg: {
3306 bool HasFallback =
3307 (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_ctzg ||
3308 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_ctzg) &&
3309 E->getNumArgs() > 1;
3310
3311 Value *ArgValue =
3312 HasFallback ? EmitBitCountExpr(*this, E->getArg(0))
3314
3315 llvm::Type *ArgType = ArgValue->getType();
3316 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
3317
3318 llvm::Type *ResultType = ConvertType(E->getType());
3319 // The elementwise builtins always exhibit zero-is-undef behaviour
3320 Value *ZeroUndef = Builder.getInt1(
3321 HasFallback || getTarget().isCLZForZeroUndef() ||
3322 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_ctzg);
3323 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
3324 if (Result->getType() != ResultType)
3325 Result =
3326 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3327 if (!HasFallback)
3328 return RValue::get(Result);
3329
3330 Value *Zero = Constant::getNullValue(ArgType);
3331 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3332 Value *FallbackValue = EmitScalarExpr(E->getArg(1));
3333 Value *ResultOrFallback =
3334 Builder.CreateSelect(IsZero, FallbackValue, Result, "ctzg");
3335 return RValue::get(ResultOrFallback);
3336 }
3337 case Builtin::BI__builtin_clzs:
3338 case Builtin::BI__builtin_clz:
3339 case Builtin::BI__builtin_clzl:
3340 case Builtin::BI__builtin_clzll:
3341 case Builtin::BI__builtin_clzg:
3342 case Builtin::BI__builtin_elementwise_clzg: {
3343 bool HasFallback =
3344 (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_clzg ||
3345 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_clzg) &&
3346 E->getNumArgs() > 1;
3347
3348 Value *ArgValue =
3349 HasFallback ? EmitBitCountExpr(*this, E->getArg(0))
3351
3352 llvm::Type *ArgType = ArgValue->getType();
3353 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3354
3355 llvm::Type *ResultType = ConvertType(E->getType());
3356 // The elementwise builtins always exhibit zero-is-undef behaviour
3357 Value *ZeroUndef = Builder.getInt1(
3358 HasFallback || getTarget().isCLZForZeroUndef() ||
3359 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_clzg);
3360 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
3361 if (Result->getType() != ResultType)
3362 Result =
3363 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3364 if (!HasFallback)
3365 return RValue::get(Result);
3366
3367 Value *Zero = Constant::getNullValue(ArgType);
3368 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3369 Value *FallbackValue = EmitScalarExpr(E->getArg(1));
3370 Value *ResultOrFallback =
3371 Builder.CreateSelect(IsZero, FallbackValue, Result, "clzg");
3372 return RValue::get(ResultOrFallback);
3373 }
3374 case Builtin::BI__builtin_ffs:
3375 case Builtin::BI__builtin_ffsl:
3376 case Builtin::BI__builtin_ffsll: {
3377 // ffs(x) -> x ? cttz(x) + 1 : 0
3378 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3379
3380 llvm::Type *ArgType = ArgValue->getType();
3381 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
3382
3383 llvm::Type *ResultType = ConvertType(E->getType());
3384 Value *Tmp =
3385 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
3386 llvm::ConstantInt::get(ArgType, 1));
3387 Value *Zero = llvm::Constant::getNullValue(ArgType);
3388 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3389 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
3390 if (Result->getType() != ResultType)
3391 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3392 "cast");
3393 return RValue::get(Result);
3394 }
3395 case Builtin::BI__builtin_parity:
3396 case Builtin::BI__builtin_parityl:
3397 case Builtin::BI__builtin_parityll: {
3398 // parity(x) -> ctpop(x) & 1
3399 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3400
3401 llvm::Type *ArgType = ArgValue->getType();
3402 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3403
3404 llvm::Type *ResultType = ConvertType(E->getType());
3405 Value *Tmp = Builder.CreateCall(F, ArgValue);
3406 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
3407 if (Result->getType() != ResultType)
3408 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3409 "cast");
3410 return RValue::get(Result);
3411 }
3412 case Builtin::BI__lzcnt16:
3413 case Builtin::BI__lzcnt:
3414 case Builtin::BI__lzcnt64: {
3415 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3416
3417 llvm::Type *ArgType = ArgValue->getType();
3418 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3419
3420 llvm::Type *ResultType = ConvertType(E->getType());
3421 Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
3422 if (Result->getType() != ResultType)
3423 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3424 "cast");
3425 return RValue::get(Result);
3426 }
3427 case Builtin::BI__popcnt16:
3428 case Builtin::BI__popcnt:
3429 case Builtin::BI__popcnt64:
3430 case Builtin::BI__builtin_popcount:
3431 case Builtin::BI__builtin_popcountl:
3432 case Builtin::BI__builtin_popcountll:
3433 case Builtin::BI__builtin_popcountg: {
3434 Value *ArgValue = EmitBitCountExpr(*this, E->getArg(0));
3435
3436 llvm::Type *ArgType = ArgValue->getType();
3437 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3438
3439 llvm::Type *ResultType = ConvertType(E->getType());
3440 Value *Result = Builder.CreateCall(F, ArgValue);
3441 if (Result->getType() != ResultType)
3442 Result =
3443 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3444 return RValue::get(Result);
3445 }
3446 case Builtin::BI__builtin_unpredictable: {
3447 // Always return the argument of __builtin_unpredictable. LLVM does not
3448 // handle this builtin. Metadata for this builtin should be added directly
3449 // to instructions such as branches or switches that use it.
3450 return RValue::get(EmitScalarExpr(E->getArg(0)));
3451 }
3452 case Builtin::BI__builtin_expect: {
3453 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3454 llvm::Type *ArgType = ArgValue->getType();
3455
3456 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
3457 // Don't generate llvm.expect on -O0 as the backend won't use it for
3458 // anything.
3459 // Note, we still IRGen ExpectedValue because it could have side-effects.
3460 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3461 return RValue::get(ArgValue);
3462
3463 Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
3464 Value *Result =
3465 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
3466 return RValue::get(Result);
3467 }
3468 case Builtin::BI__builtin_expect_with_probability: {
3469 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3470 llvm::Type *ArgType = ArgValue->getType();
3471
3472 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
3473 llvm::APFloat Probability(0.0);
3474 const Expr *ProbArg = E->getArg(2);
3475 bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
3476 assert(EvalSucceed && "probability should be able to evaluate as float");
3477 (void)EvalSucceed;
3478 bool LoseInfo = false;
3479 Probability.convert(llvm::APFloat::IEEEdouble(),
3480 llvm::RoundingMode::Dynamic, &LoseInfo);
3481 llvm::Type *Ty = ConvertType(ProbArg->getType());
3482 Constant *Confidence = ConstantFP::get(Ty, Probability);
3483 // Don't generate llvm.expect.with.probability on -O0 as the backend
3484 // won't use it for anything.
3485 // Note, we still IRGen ExpectedValue because it could have side-effects.
3486 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3487 return RValue::get(ArgValue);
3488
3489 Function *FnExpect =
3490 CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
3491 Value *Result = Builder.CreateCall(
3492 FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
3493 return RValue::get(Result);
3494 }
3495 case Builtin::BI__builtin_assume_aligned: {
3496 const Expr *Ptr = E->getArg(0);
3497 Value *PtrValue = EmitScalarExpr(Ptr);
3498 Value *OffsetValue =
3499 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
3500
3501 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
3502 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
3503 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
3504 AlignmentCI = ConstantInt::get(AlignmentCI->getIntegerType(),
3505 llvm::Value::MaximumAlignment);
3506
3507 emitAlignmentAssumption(PtrValue, Ptr,
3508 /*The expr loc is sufficient.*/ SourceLocation(),
3509 AlignmentCI, OffsetValue);
3510 return RValue::get(PtrValue);
3511 }
3512 case Builtin::BI__builtin_assume_dereferenceable: {
3513 const Expr *Ptr = E->getArg(0);
3514 const Expr *Size = E->getArg(1);
3515 Value *PtrValue = EmitScalarExpr(Ptr);
3516 Value *SizeValue = EmitScalarExpr(Size);
3517 if (SizeValue->getType() != IntPtrTy)
3518 SizeValue =
3519 Builder.CreateIntCast(SizeValue, IntPtrTy, false, "casted.size");
3520 Builder.CreateDereferenceableAssumption(PtrValue, SizeValue);
3521 return RValue::get(nullptr);
3522 }
3523 case Builtin::BI__assume:
3524 case Builtin::BI__builtin_assume: {
3525 if (E->getArg(0)->HasSideEffects(getContext()))
3526 return RValue::get(nullptr);
3527
3528 Value *ArgValue = EmitCheckedArgForAssume(E->getArg(0));
3529 Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
3530 Builder.CreateCall(FnAssume, ArgValue);
3531 return RValue::get(nullptr);
3532 }
3533 case Builtin::BI__builtin_assume_separate_storage: {
3534 const Expr *Arg0 = E->getArg(0);
3535 const Expr *Arg1 = E->getArg(1);
3536
3537 Value *Value0 = EmitScalarExpr(Arg0);
3538 Value *Value1 = EmitScalarExpr(Arg1);
3539
3540 Value *Values[] = {Value0, Value1};
3541 OperandBundleDefT<Value *> OBD("separate_storage", Values);
3542 Builder.CreateAssumption(ConstantInt::getTrue(getLLVMContext()), {OBD});
3543 return RValue::get(nullptr);
3544 }
3545 case Builtin::BI__builtin_allow_runtime_check: {
3546 StringRef Kind =
3547 cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts())->getString();
3548 LLVMContext &Ctx = CGM.getLLVMContext();
3549 llvm::Value *Allow = Builder.CreateCall(
3550 CGM.getIntrinsic(Intrinsic::allow_runtime_check),
3551 llvm::MetadataAsValue::get(Ctx, llvm::MDString::get(Ctx, Kind)));
3552 return RValue::get(Allow);
3553 }
3554 case Builtin::BI__arithmetic_fence: {
3555 // Create the builtin call if FastMath is selected, and the target
3556 // supports the builtin, otherwise just return the argument.
3557 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3558 llvm::FastMathFlags FMF = Builder.getFastMathFlags();
3559 bool isArithmeticFenceEnabled =
3560 FMF.allowReassoc() &&
3562 QualType ArgType = E->getArg(0)->getType();
3563 if (ArgType->isComplexType()) {
3564 if (isArithmeticFenceEnabled) {
3565 QualType ElementType = ArgType->castAs<ComplexType>()->getElementType();
3566 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3567 Value *Real = Builder.CreateArithmeticFence(ComplexVal.first,
3568 ConvertType(ElementType));
3569 Value *Imag = Builder.CreateArithmeticFence(ComplexVal.second,
3570 ConvertType(ElementType));
3571 return RValue::getComplex(std::make_pair(Real, Imag));
3572 }
3573 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3574 Value *Real = ComplexVal.first;
3575 Value *Imag = ComplexVal.second;
3576 return RValue::getComplex(std::make_pair(Real, Imag));
3577 }
3578 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3579 if (isArithmeticFenceEnabled)
3580 return RValue::get(
3581 Builder.CreateArithmeticFence(ArgValue, ConvertType(ArgType)));
3582 return RValue::get(ArgValue);
3583 }
3584 case Builtin::BI__builtin_bswapg: {
3585 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3586 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(ArgValue->getType());
3587 assert(IntTy && "LLVM's __builtin_bswapg only supports integer variants");
3588 if (IntTy->getBitWidth() == 1 || IntTy->getBitWidth() == 8)
3589 return RValue::get(ArgValue);
3590 assert(((IntTy->getBitWidth() % 16 == 0 && IntTy->getBitWidth() != 0)) &&
3591 "LLVM's __builtin_bswapg only supports integer variants that has a "
3592 "multiple of 16 bits as well as a single byte");
3593 return RValue::get(
3594 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bswap));
3595 }
3596 case Builtin::BI__builtin_bswap16:
3597 case Builtin::BI__builtin_bswap32:
3598 case Builtin::BI__builtin_bswap64:
3599 case Builtin::BI_byteswap_ushort:
3600 case Builtin::BI_byteswap_ulong:
3601 case Builtin::BI_byteswap_uint64: {
3602 return RValue::get(
3603 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bswap));
3604 }
3605 case Builtin::BI__builtin_bitreverse8:
3606 case Builtin::BI__builtin_bitreverse16:
3607 case Builtin::BI__builtin_bitreverse32:
3608 case Builtin::BI__builtin_bitreverse64: {
3609 return RValue::get(
3610 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bitreverse));
3611 }
3612 case Builtin::BI__builtin_rotateleft8:
3613 case Builtin::BI__builtin_rotateleft16:
3614 case Builtin::BI__builtin_rotateleft32:
3615 case Builtin::BI__builtin_rotateleft64:
3616 case Builtin::BI_rotl8: // Microsoft variants of rotate left
3617 case Builtin::BI_rotl16:
3618 case Builtin::BI_rotl:
3619 case Builtin::BI_lrotl:
3620 case Builtin::BI_rotl64:
3621 return emitRotate(E, false);
3622
3623 case Builtin::BI__builtin_rotateright8:
3624 case Builtin::BI__builtin_rotateright16:
3625 case Builtin::BI__builtin_rotateright32:
3626 case Builtin::BI__builtin_rotateright64:
3627 case Builtin::BI_rotr8: // Microsoft variants of rotate right
3628 case Builtin::BI_rotr16:
3629 case Builtin::BI_rotr:
3630 case Builtin::BI_lrotr:
3631 case Builtin::BI_rotr64:
3632 return emitRotate(E, true);
3633
3634 case Builtin::BI__builtin_constant_p: {
3635 llvm::Type *ResultType = ConvertType(E->getType());
3636
3637 const Expr *Arg = E->getArg(0);
3638 QualType ArgType = Arg->getType();
3639 // FIXME: The allowance for Obj-C pointers and block pointers is historical
3640 // and likely a mistake.
3641 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
3642 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
3643 // Per the GCC documentation, only numeric constants are recognized after
3644 // inlining.
3645 return RValue::get(ConstantInt::get(ResultType, 0));
3646
3647 if (Arg->HasSideEffects(getContext()))
3648 // The argument is unevaluated, so be conservative if it might have
3649 // side-effects.
3650 return RValue::get(ConstantInt::get(ResultType, 0));
3651
3652 Value *ArgValue = EmitScalarExpr(Arg);
3653 if (ArgType->isObjCObjectPointerType()) {
3654 // Convert Objective-C objects to id because we cannot distinguish between
3655 // LLVM types for Obj-C classes as they are opaque.
3656 ArgType = CGM.getContext().getObjCIdType();
3657 ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
3658 }
3659 Function *F =
3660 CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
3661 Value *Result = Builder.CreateCall(F, ArgValue);
3662 if (Result->getType() != ResultType)
3663 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
3664 return RValue::get(Result);
3665 }
3666 case Builtin::BI__builtin_dynamic_object_size:
3667 case Builtin::BI__builtin_object_size: {
3668 unsigned Type =
3669 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
3670 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
3671
3672 // We pass this builtin onto the optimizer so that it can figure out the
3673 // object size in more complex cases.
3674 bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
3675 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
3676 /*EmittedE=*/nullptr, IsDynamic));
3677 }
3678 case Builtin::BI__builtin_counted_by_ref: {
3679 // Default to returning '(void *) 0'.
3680 llvm::Value *Result = llvm::ConstantPointerNull::get(
3681 llvm::PointerType::getUnqual(getLLVMContext()));
3682
3683 const Expr *Arg = E->getArg(0)->IgnoreParenImpCasts();
3684
3685 if (auto *UO = dyn_cast<UnaryOperator>(Arg);
3686 UO && UO->getOpcode() == UO_AddrOf) {
3687 Arg = UO->getSubExpr()->IgnoreParenImpCasts();
3688
3689 if (auto *ASE = dyn_cast<ArraySubscriptExpr>(Arg))
3690 Arg = ASE->getBase()->IgnoreParenImpCasts();
3691 }
3692
3693 if (const MemberExpr *ME = dyn_cast_if_present<MemberExpr>(Arg)) {
3694 if (auto *CATy =
3696 CATy && CATy->getKind() == CountAttributedType::CountedBy) {
3697 const auto *MemberDecl = cast<FieldDecl>(ME->getMemberDecl());
3698 if (const FieldDecl *CountFD = MemberDecl->findCountedByField())
3699 Result = GetCountedByFieldExprGEP(Arg, MemberDecl, CountFD);
3700 else
3701 llvm::report_fatal_error("Cannot find the counted_by 'count' field");
3702 }
3703 }
3704
3705 return RValue::get(Result);
3706 }
3707 case Builtin::BI__builtin_prefetch: {
3708 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
3709 // FIXME: Technically these constants should of type 'int', yes?
3710 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
3711 llvm::ConstantInt::get(Int32Ty, 0);
3712 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
3713 llvm::ConstantInt::get(Int32Ty, 3);
3714 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
3715 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
3716 Builder.CreateCall(F, {Address, RW, Locality, Data});
3717 return RValue::get(nullptr);
3718 }
3719 case Builtin::BI__builtin_readcyclecounter: {
3720 Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
3721 return RValue::get(Builder.CreateCall(F));
3722 }
3723 case Builtin::BI__builtin_readsteadycounter: {
3724 Function *F = CGM.getIntrinsic(Intrinsic::readsteadycounter);
3725 return RValue::get(Builder.CreateCall(F));
3726 }
3727 case Builtin::BI__builtin___clear_cache: {
3728 Value *Begin = EmitScalarExpr(E->getArg(0));
3729 Value *End = EmitScalarExpr(E->getArg(1));
3730 Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
3731 return RValue::get(Builder.CreateCall(F, {Begin, End}));
3732 }
3733 case Builtin::BI__builtin_trap:
3734 EmitTrapCall(Intrinsic::trap);
3735 return RValue::get(nullptr);
3736 case Builtin::BI__builtin_verbose_trap: {
3737 llvm::DILocation *TrapLocation = Builder.getCurrentDebugLocation();
3738 if (getDebugInfo()) {
3739 TrapLocation = getDebugInfo()->CreateTrapFailureMessageFor(
3740 TrapLocation, *E->getArg(0)->tryEvaluateString(getContext()),
3742 }
3743 ApplyDebugLocation ApplyTrapDI(*this, TrapLocation);
3744 // Currently no attempt is made to prevent traps from being merged.
3745 EmitTrapCall(Intrinsic::trap);
3746 return RValue::get(nullptr);
3747 }
3748 case Builtin::BI__debugbreak:
3749 EmitTrapCall(Intrinsic::debugtrap);
3750 return RValue::get(nullptr);
3751 case Builtin::BI__builtin_unreachable: {
3753
3754 // We do need to preserve an insertion point.
3755 EmitBlock(createBasicBlock("unreachable.cont"));
3756
3757 return RValue::get(nullptr);
3758 }
3759
3760 case Builtin::BI__builtin_powi:
3761 case Builtin::BI__builtin_powif:
3762 case Builtin::BI__builtin_powil: {
3763 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
3764 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
3765
3766 if (Builder.getIsFPConstrained()) {
3767 // FIXME: llvm.powi has 2 mangling types,
3768 // llvm.experimental.constrained.powi has one.
3769 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3770 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi,
3771 Src0->getType());
3772 return RValue::get(Builder.CreateConstrainedFPCall(F, { Src0, Src1 }));
3773 }
3774
3775 Function *F = CGM.getIntrinsic(Intrinsic::powi,
3776 { Src0->getType(), Src1->getType() });
3777 return RValue::get(Builder.CreateCall(F, { Src0, Src1 }));
3778 }
3779 case Builtin::BI__builtin_frexpl: {
3780 // Linux PPC will not be adding additional PPCDoubleDouble support.
3781 // WIP to switch default to IEEE long double. Will emit libcall for
3782 // frexpl instead of legalizing this type in the BE.
3783 if (&getTarget().getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble())
3784 break;
3785 [[fallthrough]];
3786 }
3787 case Builtin::BI__builtin_frexp:
3788 case Builtin::BI__builtin_frexpf:
3789 case Builtin::BI__builtin_frexpf128:
3790 case Builtin::BI__builtin_frexpf16:
3791 return RValue::get(emitFrexpBuiltin(*this, E, Intrinsic::frexp));
3792 case Builtin::BImodf:
3793 case Builtin::BImodff:
3794 case Builtin::BImodfl:
3795 case Builtin::BI__builtin_modf:
3796 case Builtin::BI__builtin_modff:
3797 case Builtin::BI__builtin_modfl:
3798 if (Builder.getIsFPConstrained())
3799 break; // TODO: Emit constrained modf intrinsic once one exists.
3800 return RValue::get(emitModfBuiltin(*this, E, Intrinsic::modf));
3801 case Builtin::BI__builtin_isgreater:
3802 case Builtin::BI__builtin_isgreaterequal:
3803 case Builtin::BI__builtin_isless:
3804 case Builtin::BI__builtin_islessequal:
3805 case Builtin::BI__builtin_islessgreater:
3806 case Builtin::BI__builtin_isunordered: {
3807 // Ordered comparisons: we know the arguments to these are matching scalar
3808 // floating point values.
3809 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3810 Value *LHS = EmitScalarExpr(E->getArg(0));
3811 Value *RHS = EmitScalarExpr(E->getArg(1));
3812
3813 switch (BuiltinID) {
3814 default: llvm_unreachable("Unknown ordered comparison");
3815 case Builtin::BI__builtin_isgreater:
3816 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
3817 break;
3818 case Builtin::BI__builtin_isgreaterequal:
3819 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
3820 break;
3821 case Builtin::BI__builtin_isless:
3822 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
3823 break;
3824 case Builtin::BI__builtin_islessequal:
3825 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
3826 break;
3827 case Builtin::BI__builtin_islessgreater:
3828 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
3829 break;
3830 case Builtin::BI__builtin_isunordered:
3831 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
3832 break;
3833 }
3834 // ZExt bool to int type.
3835 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
3836 }
3837
3838 case Builtin::BI__builtin_isnan: {
3839 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3840 Value *V = EmitScalarExpr(E->getArg(0));
3841 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3842 return RValue::get(Result);
3843 return RValue::get(
3844 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNan),
3845 ConvertType(E->getType())));
3846 }
3847
3848 case Builtin::BI__builtin_issignaling: {
3849 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3850 Value *V = EmitScalarExpr(E->getArg(0));
3851 return RValue::get(
3852 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSNan),
3853 ConvertType(E->getType())));
3854 }
3855
3856 case Builtin::BI__builtin_isinf: {
3857 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3858 Value *V = EmitScalarExpr(E->getArg(0));
3859 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3860 return RValue::get(Result);
3861 return RValue::get(
3862 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcInf),
3863 ConvertType(E->getType())));
3864 }
3865
3866 case Builtin::BIfinite:
3867 case Builtin::BI__finite:
3868 case Builtin::BIfinitef:
3869 case Builtin::BI__finitef:
3870 case Builtin::BIfinitel:
3871 case Builtin::BI__finitel:
3872 case Builtin::BI__builtin_isfinite: {
3873 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3874 Value *V = EmitScalarExpr(E->getArg(0));
3875 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3876 return RValue::get(Result);
3877 return RValue::get(
3878 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcFinite),
3879 ConvertType(E->getType())));
3880 }
3881
3882 case Builtin::BI__builtin_isnormal: {
3883 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3884 Value *V = EmitScalarExpr(E->getArg(0));
3885 return RValue::get(
3886 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNormal),
3887 ConvertType(E->getType())));
3888 }
3889
3890 case Builtin::BI__builtin_issubnormal: {
3891 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3892 Value *V = EmitScalarExpr(E->getArg(0));
3893 return RValue::get(
3894 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSubnormal),
3895 ConvertType(E->getType())));
3896 }
3897
3898 case Builtin::BI__builtin_iszero: {
3899 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3900 Value *V = EmitScalarExpr(E->getArg(0));
3901 return RValue::get(
3902 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcZero),
3903 ConvertType(E->getType())));
3904 }
3905
3906 case Builtin::BI__builtin_isfpclass: {
3908 if (!E->getArg(1)->EvaluateAsInt(Result, CGM.getContext()))
3909 break;
3910 uint64_t Test = Result.Val.getInt().getLimitedValue();
3911 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3912 Value *V = EmitScalarExpr(E->getArg(0));
3913 return RValue::get(Builder.CreateZExt(Builder.createIsFPClass(V, Test),
3914 ConvertType(E->getType())));
3915 }
3916
3917 case Builtin::BI__builtin_nondeterministic_value: {
3918 llvm::Type *Ty = ConvertType(E->getArg(0)->getType());
3919
3920 Value *Result = PoisonValue::get(Ty);
3921 Result = Builder.CreateFreeze(Result);
3922
3923 return RValue::get(Result);
3924 }
3925
3926 case Builtin::BI__builtin_elementwise_abs: {
3927 Value *Result;
3928 QualType QT = E->getArg(0)->getType();
3929
3930 if (auto *VecTy = QT->getAs<VectorType>())
3931 QT = VecTy->getElementType();
3932 if (QT->isIntegerType())
3933 Result = Builder.CreateBinaryIntrinsic(
3934 Intrinsic::abs, EmitScalarExpr(E->getArg(0)), Builder.getFalse(),
3935 nullptr, "elt.abs");
3936 else
3937 Result = emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::fabs,
3938 "elt.abs");
3939
3940 return RValue::get(Result);
3941 }
3942 case Builtin::BI__builtin_elementwise_bitreverse:
3944 *this, E, Intrinsic::bitreverse, "elt.bitreverse"));
3945 case Builtin::BI__builtin_elementwise_popcount:
3947 *this, E, Intrinsic::ctpop, "elt.ctpop"));
3948 case Builtin::BI__builtin_elementwise_canonicalize:
3950 *this, E, Intrinsic::canonicalize, "elt.canonicalize"));
3951 case Builtin::BI__builtin_elementwise_copysign:
3952 return RValue::get(
3953 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::copysign));
3954 case Builtin::BI__builtin_elementwise_fshl:
3955 return RValue::get(
3956 emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fshl));
3957 case Builtin::BI__builtin_elementwise_fshr:
3958 return RValue::get(
3959 emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fshr));
3960
3961 case Builtin::BI__builtin_elementwise_add_sat:
3962 case Builtin::BI__builtin_elementwise_sub_sat: {
3963 Value *Op0 = EmitScalarExpr(E->getArg(0));
3964 Value *Op1 = EmitScalarExpr(E->getArg(1));
3965 Value *Result;
3966 assert(Op0->getType()->isIntOrIntVectorTy() && "integer type expected");
3967 QualType Ty = E->getArg(0)->getType();
3968 if (auto *VecTy = Ty->getAs<VectorType>())
3969 Ty = VecTy->getElementType();
3970 bool IsSigned = Ty->isSignedIntegerType();
3971 unsigned Opc;
3972 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_add_sat)
3973 Opc = IsSigned ? Intrinsic::sadd_sat : Intrinsic::uadd_sat;
3974 else
3975 Opc = IsSigned ? Intrinsic::ssub_sat : Intrinsic::usub_sat;
3976 Result = Builder.CreateBinaryIntrinsic(Opc, Op0, Op1, nullptr, "elt.sat");
3977 return RValue::get(Result);
3978 }
3979
3980 case Builtin::BI__builtin_elementwise_max: {
3981 Value *Op0 = EmitScalarExpr(E->getArg(0));
3982 Value *Op1 = EmitScalarExpr(E->getArg(1));
3983 Value *Result;
3984 if (Op0->getType()->isIntOrIntVectorTy()) {
3985 QualType Ty = E->getArg(0)->getType();
3986 if (auto *VecTy = Ty->getAs<VectorType>())
3987 Ty = VecTy->getElementType();
3988 Result = Builder.CreateBinaryIntrinsic(
3989 Ty->isSignedIntegerType() ? Intrinsic::smax : Intrinsic::umax, Op0,
3990 Op1, nullptr, "elt.max");
3991 } else
3992 Result = Builder.CreateMaxNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.max");
3993 return RValue::get(Result);
3994 }
3995 case Builtin::BI__builtin_elementwise_min: {
3996 Value *Op0 = EmitScalarExpr(E->getArg(0));
3997 Value *Op1 = EmitScalarExpr(E->getArg(1));
3998 Value *Result;
3999 if (Op0->getType()->isIntOrIntVectorTy()) {
4000 QualType Ty = E->getArg(0)->getType();
4001 if (auto *VecTy = Ty->getAs<VectorType>())
4002 Ty = VecTy->getElementType();
4003 Result = Builder.CreateBinaryIntrinsic(
4004 Ty->isSignedIntegerType() ? Intrinsic::smin : Intrinsic::umin, Op0,
4005 Op1, nullptr, "elt.min");
4006 } else
4007 Result = Builder.CreateMinNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.min");
4008 return RValue::get(Result);
4009 }
4010
4011 case Builtin::BI__builtin_elementwise_maxnum: {
4012 Value *Op0 = EmitScalarExpr(E->getArg(0));
4013 Value *Op1 = EmitScalarExpr(E->getArg(1));
4014 Value *Result = Builder.CreateBinaryIntrinsic(llvm::Intrinsic::maxnum, Op0,
4015 Op1, nullptr, "elt.maxnum");
4016 return RValue::get(Result);
4017 }
4018
4019 case Builtin::BI__builtin_elementwise_minnum: {
4020 Value *Op0 = EmitScalarExpr(E->getArg(0));
4021 Value *Op1 = EmitScalarExpr(E->getArg(1));
4022 Value *Result = Builder.CreateBinaryIntrinsic(llvm::Intrinsic::minnum, Op0,
4023 Op1, nullptr, "elt.minnum");
4024 return RValue::get(Result);
4025 }
4026
4027 case Builtin::BI__builtin_elementwise_maximum: {
4028 Value *Op0 = EmitScalarExpr(E->getArg(0));
4029 Value *Op1 = EmitScalarExpr(E->getArg(1));
4030 Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::maximum, Op0, Op1,
4031 nullptr, "elt.maximum");
4032 return RValue::get(Result);
4033 }
4034
4035 case Builtin::BI__builtin_elementwise_minimum: {
4036 Value *Op0 = EmitScalarExpr(E->getArg(0));
4037 Value *Op1 = EmitScalarExpr(E->getArg(1));
4038 Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::minimum, Op0, Op1,
4039 nullptr, "elt.minimum");
4040 return RValue::get(Result);
4041 }
4042
4043 case Builtin::BI__builtin_elementwise_maximumnum: {
4044 Value *Op0 = EmitScalarExpr(E->getArg(0));
4045 Value *Op1 = EmitScalarExpr(E->getArg(1));
4046 Value *Result = Builder.CreateBinaryIntrinsic(
4047 Intrinsic::maximumnum, Op0, Op1, nullptr, "elt.maximumnum");
4048 return RValue::get(Result);
4049 }
4050
4051 case Builtin::BI__builtin_elementwise_minimumnum: {
4052 Value *Op0 = EmitScalarExpr(E->getArg(0));
4053 Value *Op1 = EmitScalarExpr(E->getArg(1));
4054 Value *Result = Builder.CreateBinaryIntrinsic(
4055 Intrinsic::minimumnum, Op0, Op1, nullptr, "elt.minimumnum");
4056 return RValue::get(Result);
4057 }
4058
4059 case Builtin::BI__builtin_reduce_max: {
4060 auto GetIntrinsicID = [this](QualType QT) {
4061 if (auto *VecTy = QT->getAs<VectorType>())
4062 QT = VecTy->getElementType();
4063 else if (QT->isSizelessVectorType())
4064 QT = QT->getSizelessVectorEltType(CGM.getContext());
4065
4066 if (QT->isSignedIntegerType())
4067 return Intrinsic::vector_reduce_smax;
4068 if (QT->isUnsignedIntegerType())
4069 return Intrinsic::vector_reduce_umax;
4070 assert(QT->isFloatingType() && "must have a float here");
4071 return Intrinsic::vector_reduce_fmax;
4072 };
4074 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
4075 }
4076
4077 case Builtin::BI__builtin_reduce_min: {
4078 auto GetIntrinsicID = [this](QualType QT) {
4079 if (auto *VecTy = QT->getAs<VectorType>())
4080 QT = VecTy->getElementType();
4081 else if (QT->isSizelessVectorType())
4082 QT = QT->getSizelessVectorEltType(CGM.getContext());
4083
4084 if (QT->isSignedIntegerType())
4085 return Intrinsic::vector_reduce_smin;
4086 if (QT->isUnsignedIntegerType())
4087 return Intrinsic::vector_reduce_umin;
4088 assert(QT->isFloatingType() && "must have a float here");
4089 return Intrinsic::vector_reduce_fmin;
4090 };
4091
4093 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
4094 }
4095
4096 case Builtin::BI__builtin_reduce_add:
4098 *this, E, Intrinsic::vector_reduce_add, "rdx.add"));
4099 case Builtin::BI__builtin_reduce_mul:
4101 *this, E, Intrinsic::vector_reduce_mul, "rdx.mul"));
4102 case Builtin::BI__builtin_reduce_xor:
4104 *this, E, Intrinsic::vector_reduce_xor, "rdx.xor"));
4105 case Builtin::BI__builtin_reduce_or:
4107 *this, E, Intrinsic::vector_reduce_or, "rdx.or"));
4108 case Builtin::BI__builtin_reduce_and:
4110 *this, E, Intrinsic::vector_reduce_and, "rdx.and"));
4111 case Builtin::BI__builtin_reduce_maximum:
4113 *this, E, Intrinsic::vector_reduce_fmaximum, "rdx.maximum"));
4114 case Builtin::BI__builtin_reduce_minimum:
4116 *this, E, Intrinsic::vector_reduce_fminimum, "rdx.minimum"));
4117
4118 case Builtin::BI__builtin_matrix_transpose: {
4119 auto *MatrixTy = E->getArg(0)->getType()->castAs<ConstantMatrixType>();
4120 Value *MatValue = EmitScalarExpr(E->getArg(0));
4121 MatrixBuilder MB(Builder);
4122 Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
4123 MatrixTy->getNumColumns());
4124 return RValue::get(Result);
4125 }
4126
4127 case Builtin::BI__builtin_matrix_column_major_load: {
4128 MatrixBuilder MB(Builder);
4129 // Emit everything that isn't dependent on the first parameter type
4130 Value *Stride = EmitScalarExpr(E->getArg(3));
4131 const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
4132 auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
4133 assert(PtrTy && "arg0 must be of pointer type");
4134 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
4135
4138 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4139 0);
4140 Value *Result = MB.CreateColumnMajorLoad(
4141 Src.getElementType(), Src.emitRawPointer(*this),
4142 Align(Src.getAlignment().getQuantity()), Stride, IsVolatile,
4143 ResultTy->getNumRows(), ResultTy->getNumColumns(), "matrix");
4144 return RValue::get(Result);
4145 }
4146
4147 case Builtin::BI__builtin_matrix_column_major_store: {
4148 MatrixBuilder MB(Builder);
4149 Value *Matrix = EmitScalarExpr(E->getArg(0));
4151 Value *Stride = EmitScalarExpr(E->getArg(2));
4152
4153 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
4154 auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
4155 assert(PtrTy && "arg1 must be of pointer type");
4156 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
4157
4159 E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
4160 0);
4161 Value *Result = MB.CreateColumnMajorStore(
4162 Matrix, Dst.emitRawPointer(*this),
4163 Align(Dst.getAlignment().getQuantity()), Stride, IsVolatile,
4164 MatrixTy->getNumRows(), MatrixTy->getNumColumns());
4166 return RValue::get(Result);
4167 }
4168
4169 case Builtin::BI__builtin_masked_load:
4170 case Builtin::BI__builtin_masked_expand_load: {
4171 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4172 llvm::Value *Ptr = EmitScalarExpr(E->getArg(1));
4173
4174 llvm::Type *RetTy = CGM.getTypes().ConvertType(E->getType());
4175 llvm::Value *PassThru = llvm::PoisonValue::get(RetTy);
4176 if (E->getNumArgs() > 2)
4177 PassThru = EmitScalarExpr(E->getArg(2));
4178
4179 CharUnits Align = CGM.getNaturalTypeAlignment(
4180 E->getType()->getAs<VectorType>()->getElementType(), nullptr);
4181
4182 llvm::Value *Result;
4183 if (BuiltinID == Builtin::BI__builtin_masked_load) {
4184 Result = Builder.CreateMaskedLoad(RetTy, Ptr, Align.getAsAlign(), Mask,
4185 PassThru, "masked_load");
4186 } else {
4187 Function *F = CGM.getIntrinsic(Intrinsic::masked_expandload, {RetTy});
4188 Result =
4189 Builder.CreateCall(F, {Ptr, Mask, PassThru}, "masked_expand_load");
4190 }
4191 return RValue::get(Result);
4192 };
4193 case Builtin::BI__builtin_masked_gather: {
4194 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4195 llvm::Value *Idx = EmitScalarExpr(E->getArg(1));
4196 llvm::Value *Ptr = EmitScalarExpr(E->getArg(2));
4197
4198 llvm::Type *RetTy = CGM.getTypes().ConvertType(E->getType());
4199 CharUnits Align = CGM.getNaturalTypeAlignment(
4200 E->getType()->getAs<VectorType>()->getElementType(), nullptr);
4201
4202 llvm::Value *PassThru = llvm::PoisonValue::get(RetTy);
4203 if (E->getNumArgs() > 3)
4204 PassThru = EmitScalarExpr(E->getArg(3));
4205
4206 llvm::Type *ElemTy = CGM.getTypes().ConvertType(
4208 llvm::Value *PtrVec = Builder.CreateGEP(ElemTy, Ptr, Idx);
4209
4210 llvm::Value *Result = Builder.CreateMaskedGather(
4211 RetTy, PtrVec, Align.getAsAlign(), Mask, PassThru, "masked_gather");
4212 return RValue::get(Result);
4213 }
4214 case Builtin::BI__builtin_masked_store:
4215 case Builtin::BI__builtin_masked_compress_store: {
4216 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4217 llvm::Value *Val = EmitScalarExpr(E->getArg(1));
4218 llvm::Value *Ptr = EmitScalarExpr(E->getArg(2));
4219
4220 QualType ValTy = E->getArg(1)->getType();
4221 llvm::Type *ValLLTy = CGM.getTypes().ConvertType(ValTy);
4222
4223 CharUnits Align = CGM.getNaturalTypeAlignment(
4225 nullptr);
4226
4227 if (BuiltinID == Builtin::BI__builtin_masked_store) {
4228 Builder.CreateMaskedStore(Val, Ptr, Align.getAsAlign(), Mask);
4229 } else {
4230 llvm::Function *F =
4231 CGM.getIntrinsic(llvm::Intrinsic::masked_compressstore, {ValLLTy});
4232 Builder.CreateCall(F, {Val, Ptr, Mask});
4233 }
4234 return RValue::get(nullptr);
4235 }
4236 case Builtin::BI__builtin_masked_scatter: {
4237 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4238 llvm::Value *Idx = EmitScalarExpr(E->getArg(1));
4239 llvm::Value *Val = EmitScalarExpr(E->getArg(2));
4240 llvm::Value *Ptr = EmitScalarExpr(E->getArg(3));
4241
4242 CharUnits Align = CGM.getNaturalTypeAlignment(
4244 nullptr);
4245
4246 llvm::Type *ElemTy = CGM.getTypes().ConvertType(
4247 E->getArg(1)->getType()->getAs<VectorType>()->getElementType());
4248 llvm::Value *PtrVec = Builder.CreateGEP(ElemTy, Ptr, Idx);
4249
4250 Builder.CreateMaskedScatter(Val, PtrVec, Align.getAsAlign(), Mask);
4251 return RValue();
4252 }
4253 case Builtin::BI__builtin_isinf_sign: {
4254 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
4255 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4256 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
4257 Value *Arg = EmitScalarExpr(E->getArg(0));
4258 Value *AbsArg = EmitFAbs(*this, Arg);
4259 Value *IsInf = Builder.CreateFCmpOEQ(
4260 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
4261 Value *IsNeg = EmitSignBit(*this, Arg);
4262
4263 llvm::Type *IntTy = ConvertType(E->getType());
4264 Value *Zero = Constant::getNullValue(IntTy);
4265 Value *One = ConstantInt::get(IntTy, 1);
4266 Value *NegativeOne = ConstantInt::getAllOnesValue(IntTy);
4267 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
4268 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
4269 return RValue::get(Result);
4270 }
4271
4272 case Builtin::BI__builtin_flt_rounds: {
4273 Function *F = CGM.getIntrinsic(Intrinsic::get_rounding);
4274
4275 llvm::Type *ResultType = ConvertType(E->getType());
4276 Value *Result = Builder.CreateCall(F);
4277 if (Result->getType() != ResultType)
4278 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
4279 "cast");
4280 return RValue::get(Result);
4281 }
4282
4283 case Builtin::BI__builtin_set_flt_rounds: {
4284 Function *F = CGM.getIntrinsic(Intrinsic::set_rounding);
4285
4286 Value *V = EmitScalarExpr(E->getArg(0));
4287 Builder.CreateCall(F, V);
4288 return RValue::get(nullptr);
4289 }
4290
4291 case Builtin::BI__builtin_fpclassify: {
4292 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4293 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
4294 Value *V = EmitScalarExpr(E->getArg(5));
4295 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
4296
4297 // Create Result
4298 BasicBlock *Begin = Builder.GetInsertBlock();
4299 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
4300 Builder.SetInsertPoint(End);
4301 PHINode *Result =
4302 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
4303 "fpclassify_result");
4304
4305 // if (V==0) return FP_ZERO
4306 Builder.SetInsertPoint(Begin);
4307 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
4308 "iszero");
4309 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
4310 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
4311 Builder.CreateCondBr(IsZero, End, NotZero);
4312 Result->addIncoming(ZeroLiteral, Begin);
4313
4314 // if (V != V) return FP_NAN
4315 Builder.SetInsertPoint(NotZero);
4316 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
4317 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
4318 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
4319 Builder.CreateCondBr(IsNan, End, NotNan);
4320 Result->addIncoming(NanLiteral, NotZero);
4321
4322 // if (fabs(V) == infinity) return FP_INFINITY
4323 Builder.SetInsertPoint(NotNan);
4324 Value *VAbs = EmitFAbs(*this, V);
4325 Value *IsInf =
4326 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
4327 "isinf");
4328 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
4329 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
4330 Builder.CreateCondBr(IsInf, End, NotInf);
4331 Result->addIncoming(InfLiteral, NotNan);
4332
4333 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
4334 Builder.SetInsertPoint(NotInf);
4335 APFloat Smallest = APFloat::getSmallestNormalized(
4336 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
4337 Value *IsNormal =
4338 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
4339 "isnormal");
4340 Value *NormalResult =
4341 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
4342 EmitScalarExpr(E->getArg(3)));
4343 Builder.CreateBr(End);
4344 Result->addIncoming(NormalResult, NotInf);
4345
4346 // return Result
4347 Builder.SetInsertPoint(End);
4348 return RValue::get(Result);
4349 }
4350
4351 // An alloca will always return a pointer to the alloca (stack) address
4352 // space. This address space need not be the same as the AST / Language
4353 // default (e.g. in C / C++ auto vars are in the generic address space). At
4354 // the AST level this is handled within CreateTempAlloca et al., but for the
4355 // builtin / dynamic alloca we have to handle it here. We use an explicit cast
4356 // instead of passing an AS to CreateAlloca so as to not inhibit optimisation.
4357 case Builtin::BIalloca:
4358 case Builtin::BI_alloca:
4359 case Builtin::BI__builtin_alloca_uninitialized:
4360 case Builtin::BI__builtin_alloca: {
4361 Value *Size = EmitScalarExpr(E->getArg(0));
4362 const TargetInfo &TI = getContext().getTargetInfo();
4363 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
4364 const Align SuitableAlignmentInBytes =
4365 CGM.getContext()
4366 .toCharUnitsFromBits(TI.getSuitableAlign())
4367 .getAsAlign();
4368 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
4369 AI->setAlignment(SuitableAlignmentInBytes);
4370 if (BuiltinID != Builtin::BI__builtin_alloca_uninitialized)
4371 initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
4374 if (AAS != EAS) {
4375 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
4376 return RValue::get(
4377 getTargetHooks().performAddrSpaceCast(*this, AI, AAS, Ty));
4378 }
4379 return RValue::get(AI);
4380 }
4381
4382 case Builtin::BI__builtin_alloca_with_align_uninitialized:
4383 case Builtin::BI__builtin_alloca_with_align: {
4384 Value *Size = EmitScalarExpr(E->getArg(0));
4385 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
4386 auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
4387 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
4388 const Align AlignmentInBytes =
4389 CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
4390 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
4391 AI->setAlignment(AlignmentInBytes);
4392 if (BuiltinID != Builtin::BI__builtin_alloca_with_align_uninitialized)
4393 initializeAlloca(*this, AI, Size, AlignmentInBytes);
4396 if (AAS != EAS) {
4397 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
4398 return RValue::get(
4399 getTargetHooks().performAddrSpaceCast(*this, AI, AAS, Ty));
4400 }
4401 return RValue::get(AI);
4402 }
4403
4404 case Builtin::BI__builtin_infer_alloc_token: {
4405 llvm::MDNode *MDN = buildAllocToken(E);
4406 llvm::Value *MDV = MetadataAsValue::get(getLLVMContext(), MDN);
4407 llvm::Function *F =
4408 CGM.getIntrinsic(llvm::Intrinsic::alloc_token_id, {IntPtrTy});
4409 llvm::CallBase *TokenID = Builder.CreateCall(F, MDV);
4410 return RValue::get(TokenID);
4411 }
4412
4413 case Builtin::BIbzero:
4414 case Builtin::BI__builtin_bzero: {
4416 Value *SizeVal = EmitScalarExpr(E->getArg(1));
4417 EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
4418 E->getArg(0)->getExprLoc(), FD, 0);
4419 auto *I = Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
4420 addInstToNewSourceAtom(I, nullptr);
4421 return RValue::get(nullptr);
4422 }
4423
4424 case Builtin::BIbcopy:
4425 case Builtin::BI__builtin_bcopy: {
4428 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4430 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4431 0);
4433 E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
4434 0);
4435 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4436 addInstToNewSourceAtom(I, nullptr);
4437 return RValue::get(nullptr);
4438 }
4439
4440 case Builtin::BImemcpy:
4441 case Builtin::BI__builtin_memcpy:
4442 case Builtin::BImempcpy:
4443 case Builtin::BI__builtin_mempcpy: {
4446 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4447 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4448 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4449 auto *I = Builder.CreateMemCpy(Dest, Src, SizeVal, false);
4450 addInstToNewSourceAtom(I, nullptr);
4451 if (BuiltinID == Builtin::BImempcpy ||
4452 BuiltinID == Builtin::BI__builtin_mempcpy)
4453 return RValue::get(Builder.CreateInBoundsGEP(
4454 Dest.getElementType(), Dest.emitRawPointer(*this), SizeVal));
4455 else
4456 return RValue::get(Dest, *this);
4457 }
4458
4459 case Builtin::BI__builtin_memcpy_inline: {
4462 uint64_t Size =
4463 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
4464 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4465 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4466 auto *I = Builder.CreateMemCpyInline(Dest, Src, Size);
4467 addInstToNewSourceAtom(I, nullptr);
4468 return RValue::get(nullptr);
4469 }
4470
4471 case Builtin::BI__builtin_char_memchr:
4472 BuiltinID = Builtin::BI__builtin_memchr;
4473 break;
4474
4475 case Builtin::BI__builtin___memcpy_chk: {
4476 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
4477 Expr::EvalResult SizeResult, DstSizeResult;
4478 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4479 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4480 break;
4481 llvm::APSInt Size = SizeResult.Val.getInt();
4482 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4483 if (Size.ugt(DstSize))
4484 break;
4487 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4488 auto *I = Builder.CreateMemCpy(Dest, Src, SizeVal, false);
4489 addInstToNewSourceAtom(I, nullptr);
4490 return RValue::get(Dest, *this);
4491 }
4492
4493 case Builtin::BI__builtin_objc_memmove_collectable: {
4494 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
4495 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
4496 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4497 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
4498 DestAddr, SrcAddr, SizeVal);
4499 return RValue::get(DestAddr, *this);
4500 }
4501
4502 case Builtin::BI__builtin___memmove_chk: {
4503 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
4504 Expr::EvalResult SizeResult, DstSizeResult;
4505 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4506 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4507 break;
4508 llvm::APSInt Size = SizeResult.Val.getInt();
4509 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4510 if (Size.ugt(DstSize))
4511 break;
4514 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4515 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4516 addInstToNewSourceAtom(I, nullptr);
4517 return RValue::get(Dest, *this);
4518 }
4519
4520 case Builtin::BI__builtin_trivially_relocate:
4521 case Builtin::BImemmove:
4522 case Builtin::BI__builtin_memmove: {
4525 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4526 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_trivially_relocate)
4527 SizeVal = Builder.CreateMul(
4528 SizeVal,
4529 ConstantInt::get(
4530 SizeVal->getType(),
4531 getContext()
4532 .getTypeSizeInChars(E->getArg(0)->getType()->getPointeeType())
4533 .getQuantity()));
4534 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4535 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4536 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4537 addInstToNewSourceAtom(I, nullptr);
4538 return RValue::get(Dest, *this);
4539 }
4540 case Builtin::BImemset:
4541 case Builtin::BI__builtin_memset: {
4543 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
4544 Builder.getInt8Ty());
4545 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4546 EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
4547 E->getArg(0)->getExprLoc(), FD, 0);
4548 auto *I = Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
4549 addInstToNewSourceAtom(I, ByteVal);
4550 return RValue::get(Dest, *this);
4551 }
4552 case Builtin::BI__builtin_memset_inline: {
4554 Value *ByteVal =
4555 Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty());
4556 uint64_t Size =
4557 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
4559 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4560 0);
4561 auto *I = Builder.CreateMemSetInline(Dest, ByteVal, Size);
4562 addInstToNewSourceAtom(I, nullptr);
4563 return RValue::get(nullptr);
4564 }
4565 case Builtin::BI__builtin___memset_chk: {
4566 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
4567 Expr::EvalResult SizeResult, DstSizeResult;
4568 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4569 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4570 break;
4571 llvm::APSInt Size = SizeResult.Val.getInt();
4572 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4573 if (Size.ugt(DstSize))
4574 break;
4576 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
4577 Builder.getInt8Ty());
4578 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4579 auto *I = Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
4580 addInstToNewSourceAtom(I, nullptr);
4581 return RValue::get(Dest, *this);
4582 }
4583 case Builtin::BI__builtin_wmemchr: {
4584 // The MSVC runtime library does not provide a definition of wmemchr, so we
4585 // need an inline implementation.
4586 if (!getTarget().getTriple().isOSMSVCRT())
4587 break;
4588
4589 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
4590 Value *Str = EmitScalarExpr(E->getArg(0));
4591 Value *Chr = EmitScalarExpr(E->getArg(1));
4592 Value *Size = EmitScalarExpr(E->getArg(2));
4593
4594 BasicBlock *Entry = Builder.GetInsertBlock();
4595 BasicBlock *CmpEq = createBasicBlock("wmemchr.eq");
4596 BasicBlock *Next = createBasicBlock("wmemchr.next");
4597 BasicBlock *Exit = createBasicBlock("wmemchr.exit");
4598 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
4599 Builder.CreateCondBr(SizeEq0, Exit, CmpEq);
4600
4601 EmitBlock(CmpEq);
4602 PHINode *StrPhi = Builder.CreatePHI(Str->getType(), 2);
4603 StrPhi->addIncoming(Str, Entry);
4604 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
4605 SizePhi->addIncoming(Size, Entry);
4606 CharUnits WCharAlign =
4608 Value *StrCh = Builder.CreateAlignedLoad(WCharTy, StrPhi, WCharAlign);
4609 Value *FoundChr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 0);
4610 Value *StrEqChr = Builder.CreateICmpEQ(StrCh, Chr);
4611 Builder.CreateCondBr(StrEqChr, Exit, Next);
4612
4613 EmitBlock(Next);
4614 Value *NextStr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 1);
4615 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
4616 Value *NextSizeEq0 =
4617 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
4618 Builder.CreateCondBr(NextSizeEq0, Exit, CmpEq);
4619 StrPhi->addIncoming(NextStr, Next);
4620 SizePhi->addIncoming(NextSize, Next);
4621
4622 EmitBlock(Exit);
4623 PHINode *Ret = Builder.CreatePHI(Str->getType(), 3);
4624 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Entry);
4625 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Next);
4626 Ret->addIncoming(FoundChr, CmpEq);
4627 return RValue::get(Ret);
4628 }
4629 case Builtin::BI__builtin_wmemcmp: {
4630 // The MSVC runtime library does not provide a definition of wmemcmp, so we
4631 // need an inline implementation.
4632 if (!getTarget().getTriple().isOSMSVCRT())
4633 break;
4634
4635 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
4636
4637 Value *Dst = EmitScalarExpr(E->getArg(0));
4638 Value *Src = EmitScalarExpr(E->getArg(1));
4639 Value *Size = EmitScalarExpr(E->getArg(2));
4640
4641 BasicBlock *Entry = Builder.GetInsertBlock();
4642 BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
4643 BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
4644 BasicBlock *Next = createBasicBlock("wmemcmp.next");
4645 BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
4646 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
4647 Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
4648
4649 EmitBlock(CmpGT);
4650 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
4651 DstPhi->addIncoming(Dst, Entry);
4652 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
4653 SrcPhi->addIncoming(Src, Entry);
4654 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
4655 SizePhi->addIncoming(Size, Entry);
4656 CharUnits WCharAlign =
4658 Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
4659 Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
4660 Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
4661 Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
4662
4663 EmitBlock(CmpLT);
4664 Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
4665 Builder.CreateCondBr(DstLtSrc, Exit, Next);
4666
4667 EmitBlock(Next);
4668 Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
4669 Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
4670 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
4671 Value *NextSizeEq0 =
4672 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
4673 Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
4674 DstPhi->addIncoming(NextDst, Next);
4675 SrcPhi->addIncoming(NextSrc, Next);
4676 SizePhi->addIncoming(NextSize, Next);
4677
4678 EmitBlock(Exit);
4679 PHINode *Ret = Builder.CreatePHI(IntTy, 4);
4680 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
4681 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
4682 Ret->addIncoming(ConstantInt::getAllOnesValue(IntTy), CmpLT);
4683 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
4684 return RValue::get(Ret);
4685 }
4686 case Builtin::BI__builtin_dwarf_cfa: {
4687 // The offset in bytes from the first argument to the CFA.
4688 //
4689 // Why on earth is this in the frontend? Is there any reason at
4690 // all that the backend can't reasonably determine this while
4691 // lowering llvm.eh.dwarf.cfa()?
4692 //
4693 // TODO: If there's a satisfactory reason, add a target hook for
4694 // this instead of hard-coding 0, which is correct for most targets.
4695 int32_t Offset = 0;
4696
4697 Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
4698 return RValue::get(Builder.CreateCall(F,
4699 llvm::ConstantInt::get(Int32Ty, Offset)));
4700 }
4701 case Builtin::BI__builtin_return_address: {
4702 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
4703 getContext().UnsignedIntTy);
4704 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
4705 return RValue::get(Builder.CreateCall(F, Depth));
4706 }
4707 case Builtin::BI_ReturnAddress: {
4708 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
4709 return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
4710 }
4711 case Builtin::BI__builtin_frame_address: {
4712 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
4713 getContext().UnsignedIntTy);
4714 Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
4715 return RValue::get(Builder.CreateCall(F, Depth));
4716 }
4717 case Builtin::BI__builtin_extract_return_addr: {
4720 return RValue::get(Result);
4721 }
4722 case Builtin::BI__builtin_frob_return_addr: {
4725 return RValue::get(Result);
4726 }
4727 case Builtin::BI__builtin_dwarf_sp_column: {
4728 llvm::IntegerType *Ty
4731 if (Column == -1) {
4732 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
4733 return RValue::get(llvm::UndefValue::get(Ty));
4734 }
4735 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
4736 }
4737 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
4739 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
4740 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
4741 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
4742 }
4743 case Builtin::BI__builtin_eh_return: {
4744 Value *Int = EmitScalarExpr(E->getArg(0));
4745 Value *Ptr = EmitScalarExpr(E->getArg(1));
4746
4747 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
4748 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
4749 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
4750 Function *F =
4751 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
4752 : Intrinsic::eh_return_i64);
4753 Builder.CreateCall(F, {Int, Ptr});
4754 Builder.CreateUnreachable();
4755
4756 // We do need to preserve an insertion point.
4757 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
4758
4759 return RValue::get(nullptr);
4760 }
4761 case Builtin::BI__builtin_unwind_init: {
4762 Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
4763 Builder.CreateCall(F);
4764 return RValue::get(nullptr);
4765 }
4766 case Builtin::BI__builtin_extend_pointer: {
4767 // Extends a pointer to the size of an _Unwind_Word, which is
4768 // uint64_t on all platforms. Generally this gets poked into a
4769 // register and eventually used as an address, so if the
4770 // addressing registers are wider than pointers and the platform
4771 // doesn't implicitly ignore high-order bits when doing
4772 // addressing, we need to make sure we zext / sext based on
4773 // the platform's expectations.
4774 //
4775 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
4776
4777 // Cast the pointer to intptr_t.
4778 Value *Ptr = EmitScalarExpr(E->getArg(0));
4779 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
4780
4781 // If that's 64 bits, we're done.
4782 if (IntPtrTy->getBitWidth() == 64)
4783 return RValue::get(Result);
4784
4785 // Otherwise, ask the codegen data what to do.
4786 if (getTargetHooks().extendPointerWithSExt())
4787 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
4788 else
4789 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
4790 }
4791 case Builtin::BI__builtin_setjmp: {
4792 // Buffer is a void**.
4794
4795 if (getTarget().getTriple().getArch() == llvm::Triple::systemz) {
4796 // On this target, the back end fills in the context buffer completely.
4797 // It doesn't really matter if the frontend stores to the buffer before
4798 // calling setjmp, the back-end is going to overwrite them anyway.
4799 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
4800 return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
4801 }
4802
4803 // Store the frame pointer to the setjmp buffer.
4804 Value *FrameAddr = Builder.CreateCall(
4805 CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
4806 ConstantInt::get(Int32Ty, 0));
4807 Builder.CreateStore(FrameAddr, Buf);
4808
4809 // Store the stack pointer to the setjmp buffer.
4810 Value *StackAddr = Builder.CreateStackSave();
4811 assert(Buf.emitRawPointer(*this)->getType() == StackAddr->getType());
4812
4813 Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
4814 Builder.CreateStore(StackAddr, StackSaveSlot);
4815
4816 // Call LLVM's EH setjmp, which is lightweight.
4817 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
4818 return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
4819 }
4820 case Builtin::BI__builtin_longjmp: {
4821 Value *Buf = EmitScalarExpr(E->getArg(0));
4822
4823 // Call LLVM's EH longjmp, which is lightweight.
4824 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
4825
4826 // longjmp doesn't return; mark this as unreachable.
4827 Builder.CreateUnreachable();
4828
4829 // We do need to preserve an insertion point.
4830 EmitBlock(createBasicBlock("longjmp.cont"));
4831
4832 return RValue::get(nullptr);
4833 }
4834 case Builtin::BI__builtin_launder: {
4835 const Expr *Arg = E->getArg(0);
4836 QualType ArgTy = Arg->getType()->getPointeeType();
4837 Value *Ptr = EmitScalarExpr(Arg);
4838 if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
4839 Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
4840
4841 return RValue::get(Ptr);
4842 }
4843 case Builtin::BI__sync_fetch_and_add:
4844 case Builtin::BI__sync_fetch_and_sub:
4845 case Builtin::BI__sync_fetch_and_or:
4846 case Builtin::BI__sync_fetch_and_and:
4847 case Builtin::BI__sync_fetch_and_xor:
4848 case Builtin::BI__sync_fetch_and_nand:
4849 case Builtin::BI__sync_add_and_fetch:
4850 case Builtin::BI__sync_sub_and_fetch:
4851 case Builtin::BI__sync_and_and_fetch:
4852 case Builtin::BI__sync_or_and_fetch:
4853 case Builtin::BI__sync_xor_and_fetch:
4854 case Builtin::BI__sync_nand_and_fetch:
4855 case Builtin::BI__sync_val_compare_and_swap:
4856 case Builtin::BI__sync_bool_compare_and_swap:
4857 case Builtin::BI__sync_lock_test_and_set:
4858 case Builtin::BI__sync_lock_release:
4859 case Builtin::BI__sync_swap:
4860 llvm_unreachable("Shouldn't make it through sema");
4861 case Builtin::BI__sync_fetch_and_add_1:
4862 case Builtin::BI__sync_fetch_and_add_2:
4863 case Builtin::BI__sync_fetch_and_add_4:
4864 case Builtin::BI__sync_fetch_and_add_8:
4865 case Builtin::BI__sync_fetch_and_add_16:
4866 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
4867 case Builtin::BI__sync_fetch_and_sub_1:
4868 case Builtin::BI__sync_fetch_and_sub_2:
4869 case Builtin::BI__sync_fetch_and_sub_4:
4870 case Builtin::BI__sync_fetch_and_sub_8:
4871 case Builtin::BI__sync_fetch_and_sub_16:
4872 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
4873 case Builtin::BI__sync_fetch_and_or_1:
4874 case Builtin::BI__sync_fetch_and_or_2:
4875 case Builtin::BI__sync_fetch_and_or_4:
4876 case Builtin::BI__sync_fetch_and_or_8:
4877 case Builtin::BI__sync_fetch_and_or_16:
4878 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
4879 case Builtin::BI__sync_fetch_and_and_1:
4880 case Builtin::BI__sync_fetch_and_and_2:
4881 case Builtin::BI__sync_fetch_and_and_4:
4882 case Builtin::BI__sync_fetch_and_and_8:
4883 case Builtin::BI__sync_fetch_and_and_16:
4884 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
4885 case Builtin::BI__sync_fetch_and_xor_1:
4886 case Builtin::BI__sync_fetch_and_xor_2:
4887 case Builtin::BI__sync_fetch_and_xor_4:
4888 case Builtin::BI__sync_fetch_and_xor_8:
4889 case Builtin::BI__sync_fetch_and_xor_16:
4890 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
4891 case Builtin::BI__sync_fetch_and_nand_1:
4892 case Builtin::BI__sync_fetch_and_nand_2:
4893 case Builtin::BI__sync_fetch_and_nand_4:
4894 case Builtin::BI__sync_fetch_and_nand_8:
4895 case Builtin::BI__sync_fetch_and_nand_16:
4896 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
4897
4898 // Clang extensions: not overloaded yet.
4899 case Builtin::BI__sync_fetch_and_min:
4900 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
4901 case Builtin::BI__sync_fetch_and_max:
4902 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
4903 case Builtin::BI__sync_fetch_and_umin:
4904 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
4905 case Builtin::BI__sync_fetch_and_umax:
4906 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
4907
4908 case Builtin::BI__sync_add_and_fetch_1:
4909 case Builtin::BI__sync_add_and_fetch_2:
4910 case Builtin::BI__sync_add_and_fetch_4:
4911 case Builtin::BI__sync_add_and_fetch_8:
4912 case Builtin::BI__sync_add_and_fetch_16:
4913 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
4914 llvm::Instruction::Add);
4915 case Builtin::BI__sync_sub_and_fetch_1:
4916 case Builtin::BI__sync_sub_and_fetch_2:
4917 case Builtin::BI__sync_sub_and_fetch_4:
4918 case Builtin::BI__sync_sub_and_fetch_8:
4919 case Builtin::BI__sync_sub_and_fetch_16:
4920 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
4921 llvm::Instruction::Sub);
4922 case Builtin::BI__sync_and_and_fetch_1:
4923 case Builtin::BI__sync_and_and_fetch_2:
4924 case Builtin::BI__sync_and_and_fetch_4:
4925 case Builtin::BI__sync_and_and_fetch_8:
4926 case Builtin::BI__sync_and_and_fetch_16:
4927 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
4928 llvm::Instruction::And);
4929 case Builtin::BI__sync_or_and_fetch_1:
4930 case Builtin::BI__sync_or_and_fetch_2:
4931 case Builtin::BI__sync_or_and_fetch_4:
4932 case Builtin::BI__sync_or_and_fetch_8:
4933 case Builtin::BI__sync_or_and_fetch_16:
4934 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
4935 llvm::Instruction::Or);
4936 case Builtin::BI__sync_xor_and_fetch_1:
4937 case Builtin::BI__sync_xor_and_fetch_2:
4938 case Builtin::BI__sync_xor_and_fetch_4:
4939 case Builtin::BI__sync_xor_and_fetch_8:
4940 case Builtin::BI__sync_xor_and_fetch_16:
4941 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
4942 llvm::Instruction::Xor);
4943 case Builtin::BI__sync_nand_and_fetch_1:
4944 case Builtin::BI__sync_nand_and_fetch_2:
4945 case Builtin::BI__sync_nand_and_fetch_4:
4946 case Builtin::BI__sync_nand_and_fetch_8:
4947 case Builtin::BI__sync_nand_and_fetch_16:
4948 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
4949 llvm::Instruction::And, true);
4950
4951 case Builtin::BI__sync_val_compare_and_swap_1:
4952 case Builtin::BI__sync_val_compare_and_swap_2:
4953 case Builtin::BI__sync_val_compare_and_swap_4:
4954 case Builtin::BI__sync_val_compare_and_swap_8:
4955 case Builtin::BI__sync_val_compare_and_swap_16:
4956 return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
4957
4958 case Builtin::BI__sync_bool_compare_and_swap_1:
4959 case Builtin::BI__sync_bool_compare_and_swap_2:
4960 case Builtin::BI__sync_bool_compare_and_swap_4:
4961 case Builtin::BI__sync_bool_compare_and_swap_8:
4962 case Builtin::BI__sync_bool_compare_and_swap_16:
4963 return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
4964
4965 case Builtin::BI__sync_swap_1:
4966 case Builtin::BI__sync_swap_2:
4967 case Builtin::BI__sync_swap_4:
4968 case Builtin::BI__sync_swap_8:
4969 case Builtin::BI__sync_swap_16:
4970 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
4971
4972 case Builtin::BI__sync_lock_test_and_set_1:
4973 case Builtin::BI__sync_lock_test_and_set_2:
4974 case Builtin::BI__sync_lock_test_and_set_4:
4975 case Builtin::BI__sync_lock_test_and_set_8:
4976 case Builtin::BI__sync_lock_test_and_set_16:
4977 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
4978
4979 case Builtin::BI__sync_lock_release_1:
4980 case Builtin::BI__sync_lock_release_2:
4981 case Builtin::BI__sync_lock_release_4:
4982 case Builtin::BI__sync_lock_release_8:
4983 case Builtin::BI__sync_lock_release_16: {
4984 Address Ptr = CheckAtomicAlignment(*this, E);
4985 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
4986
4987 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
4988 getContext().getTypeSize(ElTy));
4989 llvm::StoreInst *Store =
4990 Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
4991 Store->setAtomic(llvm::AtomicOrdering::Release);
4992 return RValue::get(nullptr);
4993 }
4994
4995 case Builtin::BI__sync_synchronize: {
4996 // We assume this is supposed to correspond to a C++0x-style
4997 // sequentially-consistent fence (i.e. this is only usable for
4998 // synchronization, not device I/O or anything like that). This intrinsic
4999 // is really badly designed in the sense that in theory, there isn't
5000 // any way to safely use it... but in practice, it mostly works
5001 // to use it with non-atomic loads and stores to get acquire/release
5002 // semantics.
5003 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
5004 return RValue::get(nullptr);
5005 }
5006
5007 case Builtin::BI__builtin_nontemporal_load:
5008 return RValue::get(EmitNontemporalLoad(*this, E));
5009 case Builtin::BI__builtin_nontemporal_store:
5010 return RValue::get(EmitNontemporalStore(*this, E));
5011 case Builtin::BI__c11_atomic_is_lock_free:
5012 case Builtin::BI__atomic_is_lock_free: {
5013 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
5014 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
5015 // _Atomic(T) is always properly-aligned.
5016 const char *LibCallName = "__atomic_is_lock_free";
5017 CallArgList Args;
5018 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
5019 getContext().getSizeType());
5020 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
5021 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
5023 else
5024 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
5026 const CGFunctionInfo &FuncInfo =
5027 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
5028 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
5029 llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
5030 return EmitCall(FuncInfo, CGCallee::forDirect(Func),
5031 ReturnValueSlot(), Args);
5032 }
5033
5034 case Builtin::BI__atomic_thread_fence:
5035 case Builtin::BI__atomic_signal_fence:
5036 case Builtin::BI__c11_atomic_thread_fence:
5037 case Builtin::BI__c11_atomic_signal_fence: {
5038 llvm::SyncScope::ID SSID;
5039 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
5040 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
5041 SSID = llvm::SyncScope::SingleThread;
5042 else
5043 SSID = llvm::SyncScope::System;
5044 Value *Order = EmitScalarExpr(E->getArg(0));
5045 if (isa<llvm::ConstantInt>(Order)) {
5046 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
5047 switch (ord) {
5048 case 0: // memory_order_relaxed
5049 default: // invalid order
5050 break;
5051 case 1: // memory_order_consume
5052 case 2: // memory_order_acquire
5053 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
5054 break;
5055 case 3: // memory_order_release
5056 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
5057 break;
5058 case 4: // memory_order_acq_rel
5059 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
5060 break;
5061 case 5: // memory_order_seq_cst
5062 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
5063 break;
5064 }
5065 return RValue::get(nullptr);
5066 }
5067
5068 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
5069 AcquireBB = createBasicBlock("acquire", CurFn);
5070 ReleaseBB = createBasicBlock("release", CurFn);
5071 AcqRelBB = createBasicBlock("acqrel", CurFn);
5072 SeqCstBB = createBasicBlock("seqcst", CurFn);
5073 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
5074
5075 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
5076 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
5077
5078 Builder.SetInsertPoint(AcquireBB);
5079 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
5080 Builder.CreateBr(ContBB);
5081 SI->addCase(Builder.getInt32(1), AcquireBB);
5082 SI->addCase(Builder.getInt32(2), AcquireBB);
5083
5084 Builder.SetInsertPoint(ReleaseBB);
5085 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
5086 Builder.CreateBr(ContBB);
5087 SI->addCase(Builder.getInt32(3), ReleaseBB);
5088
5089 Builder.SetInsertPoint(AcqRelBB);
5090 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
5091 Builder.CreateBr(ContBB);
5092 SI->addCase(Builder.getInt32(4), AcqRelBB);
5093
5094 Builder.SetInsertPoint(SeqCstBB);
5095 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
5096 Builder.CreateBr(ContBB);
5097 SI->addCase(Builder.getInt32(5), SeqCstBB);
5098
5099 Builder.SetInsertPoint(ContBB);
5100 return RValue::get(nullptr);
5101 }
5102 case Builtin::BI__scoped_atomic_thread_fence: {
5104
5105 Value *Order = EmitScalarExpr(E->getArg(0));
5106 Value *Scope = EmitScalarExpr(E->getArg(1));
5107 auto Ord = dyn_cast<llvm::ConstantInt>(Order);
5108 auto Scp = dyn_cast<llvm::ConstantInt>(Scope);
5109 if (Ord && Scp) {
5110 SyncScope SS = ScopeModel->isValid(Scp->getZExtValue())
5111 ? ScopeModel->map(Scp->getZExtValue())
5112 : ScopeModel->map(ScopeModel->getFallBackValue());
5113 switch (Ord->getZExtValue()) {
5114 case 0: // memory_order_relaxed
5115 default: // invalid order
5116 break;
5117 case 1: // memory_order_consume
5118 case 2: // memory_order_acquire
5119 Builder.CreateFence(
5120 llvm::AtomicOrdering::Acquire,
5121 getTargetHooks().getLLVMSyncScopeID(getLangOpts(), SS,
5122 llvm::AtomicOrdering::Acquire,
5123 getLLVMContext()));
5124 break;
5125 case 3: // memory_order_release
5126 Builder.CreateFence(
5127 llvm::AtomicOrdering::Release,
5128 getTargetHooks().getLLVMSyncScopeID(getLangOpts(), SS,
5129 llvm::AtomicOrdering::Release,
5130 getLLVMContext()));
5131 break;
5132 case 4: // memory_order_acq_rel
5133 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease,
5134 getTargetHooks().getLLVMSyncScopeID(
5135 getLangOpts(), SS,
5136 llvm::AtomicOrdering::AcquireRelease,
5137 getLLVMContext()));
5138 break;
5139 case 5: // memory_order_seq_cst
5140 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
5141 getTargetHooks().getLLVMSyncScopeID(
5142 getLangOpts(), SS,
5143 llvm::AtomicOrdering::SequentiallyConsistent,
5144 getLLVMContext()));
5145 break;
5146 }
5147 return RValue::get(nullptr);
5148 }
5149
5150 llvm::BasicBlock *ContBB = createBasicBlock("atomic.scope.continue", CurFn);
5151
5153 OrderBBs;
5154 if (Ord) {
5155 switch (Ord->getZExtValue()) {
5156 case 0: // memory_order_relaxed
5157 default: // invalid order
5158 ContBB->eraseFromParent();
5159 return RValue::get(nullptr);
5160 case 1: // memory_order_consume
5161 case 2: // memory_order_acquire
5162 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5163 llvm::AtomicOrdering::Acquire);
5164 break;
5165 case 3: // memory_order_release
5166 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5167 llvm::AtomicOrdering::Release);
5168 break;
5169 case 4: // memory_order_acq_rel
5170 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5171 llvm::AtomicOrdering::AcquireRelease);
5172 break;
5173 case 5: // memory_order_seq_cst
5174 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5175 llvm::AtomicOrdering::SequentiallyConsistent);
5176 break;
5177 }
5178 } else {
5179 llvm::BasicBlock *AcquireBB = createBasicBlock("acquire", CurFn);
5180 llvm::BasicBlock *ReleaseBB = createBasicBlock("release", CurFn);
5181 llvm::BasicBlock *AcqRelBB = createBasicBlock("acqrel", CurFn);
5182 llvm::BasicBlock *SeqCstBB = createBasicBlock("seqcst", CurFn);
5183
5184 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
5185 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
5186 SI->addCase(Builder.getInt32(1), AcquireBB);
5187 SI->addCase(Builder.getInt32(2), AcquireBB);
5188 SI->addCase(Builder.getInt32(3), ReleaseBB);
5189 SI->addCase(Builder.getInt32(4), AcqRelBB);
5190 SI->addCase(Builder.getInt32(5), SeqCstBB);
5191
5192 OrderBBs.emplace_back(AcquireBB, llvm::AtomicOrdering::Acquire);
5193 OrderBBs.emplace_back(ReleaseBB, llvm::AtomicOrdering::Release);
5194 OrderBBs.emplace_back(AcqRelBB, llvm::AtomicOrdering::AcquireRelease);
5195 OrderBBs.emplace_back(SeqCstBB,
5196 llvm::AtomicOrdering::SequentiallyConsistent);
5197 }
5198
5199 for (auto &[OrderBB, Ordering] : OrderBBs) {
5200 Builder.SetInsertPoint(OrderBB);
5201 if (Scp) {
5202 SyncScope SS = ScopeModel->isValid(Scp->getZExtValue())
5203 ? ScopeModel->map(Scp->getZExtValue())
5204 : ScopeModel->map(ScopeModel->getFallBackValue());
5205 Builder.CreateFence(Ordering,
5206 getTargetHooks().getLLVMSyncScopeID(
5207 getLangOpts(), SS, Ordering, getLLVMContext()));
5208 Builder.CreateBr(ContBB);
5209 } else {
5210 llvm::DenseMap<unsigned, llvm::BasicBlock *> BBs;
5211 for (unsigned Scp : ScopeModel->getRuntimeValues())
5212 BBs[Scp] = createBasicBlock(getAsString(ScopeModel->map(Scp)), CurFn);
5213
5214 auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
5215 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, ContBB);
5216 for (unsigned Scp : ScopeModel->getRuntimeValues()) {
5217 auto *B = BBs[Scp];
5218 SI->addCase(Builder.getInt32(Scp), B);
5219
5220 Builder.SetInsertPoint(B);
5221 Builder.CreateFence(Ordering, getTargetHooks().getLLVMSyncScopeID(
5222 getLangOpts(), ScopeModel->map(Scp),
5223 Ordering, getLLVMContext()));
5224 Builder.CreateBr(ContBB);
5225 }
5226 }
5227 }
5228
5229 Builder.SetInsertPoint(ContBB);
5230 return RValue::get(nullptr);
5231 }
5232
5233 case Builtin::BI__builtin_signbit:
5234 case Builtin::BI__builtin_signbitf:
5235 case Builtin::BI__builtin_signbitl: {
5236 return RValue::get(
5237 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
5238 ConvertType(E->getType())));
5239 }
5240 case Builtin::BI__warn_memset_zero_len:
5241 return RValue::getIgnored();
5242 case Builtin::BI__annotation: {
5243 // Re-encode each wide string to UTF8 and make an MDString.
5245 for (const Expr *Arg : E->arguments()) {
5246 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
5247 assert(Str->getCharByteWidth() == 2);
5248 StringRef WideBytes = Str->getBytes();
5249 std::string StrUtf8;
5250 if (!convertUTF16ToUTF8String(
5251 ArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
5252 CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
5253 continue;
5254 }
5255 Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
5256 }
5257
5258 // Build and MDTuple of MDStrings and emit the intrinsic call.
5259 llvm::Function *F = CGM.getIntrinsic(Intrinsic::codeview_annotation, {});
5260 MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
5261 Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
5262 return RValue::getIgnored();
5263 }
5264 case Builtin::BI__builtin_annotation: {
5265 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
5266 llvm::Function *F = CGM.getIntrinsic(
5267 Intrinsic::annotation, {AnnVal->getType(), CGM.ConstGlobalsPtrTy});
5268
5269 // Get the annotation string, go through casts. Sema requires this to be a
5270 // non-wide string literal, potentially casted, so the cast<> is safe.
5271 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
5272 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
5273 return RValue::get(
5274 EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr));
5275 }
5276 case Builtin::BI__builtin_addcb:
5277 case Builtin::BI__builtin_addcs:
5278 case Builtin::BI__builtin_addc:
5279 case Builtin::BI__builtin_addcl:
5280 case Builtin::BI__builtin_addcll:
5281 case Builtin::BI__builtin_subcb:
5282 case Builtin::BI__builtin_subcs:
5283 case Builtin::BI__builtin_subc:
5284 case Builtin::BI__builtin_subcl:
5285 case Builtin::BI__builtin_subcll: {
5286
5287 // We translate all of these builtins from expressions of the form:
5288 // int x = ..., y = ..., carryin = ..., carryout, result;
5289 // result = __builtin_addc(x, y, carryin, &carryout);
5290 //
5291 // to LLVM IR of the form:
5292 //
5293 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
5294 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
5295 // %carry1 = extractvalue {i32, i1} %tmp1, 1
5296 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
5297 // i32 %carryin)
5298 // %result = extractvalue {i32, i1} %tmp2, 0
5299 // %carry2 = extractvalue {i32, i1} %tmp2, 1
5300 // %tmp3 = or i1 %carry1, %carry2
5301 // %tmp4 = zext i1 %tmp3 to i32
5302 // store i32 %tmp4, i32* %carryout
5303
5304 // Scalarize our inputs.
5305 llvm::Value *X = EmitScalarExpr(E->getArg(0));
5306 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
5307 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
5308 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
5309
5310 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
5311 Intrinsic::ID IntrinsicId;
5312 switch (BuiltinID) {
5313 default: llvm_unreachable("Unknown multiprecision builtin id.");
5314 case Builtin::BI__builtin_addcb:
5315 case Builtin::BI__builtin_addcs:
5316 case Builtin::BI__builtin_addc:
5317 case Builtin::BI__builtin_addcl:
5318 case Builtin::BI__builtin_addcll:
5319 IntrinsicId = Intrinsic::uadd_with_overflow;
5320 break;
5321 case Builtin::BI__builtin_subcb:
5322 case Builtin::BI__builtin_subcs:
5323 case Builtin::BI__builtin_subc:
5324 case Builtin::BI__builtin_subcl:
5325 case Builtin::BI__builtin_subcll:
5326 IntrinsicId = Intrinsic::usub_with_overflow;
5327 break;
5328 }
5329
5330 // Construct our resulting LLVM IR expression.
5331 llvm::Value *Carry1;
5332 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
5333 X, Y, Carry1);
5334 llvm::Value *Carry2;
5335 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
5336 Sum1, Carryin, Carry2);
5337 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
5338 X->getType());
5339 Builder.CreateStore(CarryOut, CarryOutPtr);
5340 return RValue::get(Sum2);
5341 }
5342
5343 case Builtin::BI__builtin_add_overflow:
5344 case Builtin::BI__builtin_sub_overflow:
5345 case Builtin::BI__builtin_mul_overflow: {
5346 const clang::Expr *LeftArg = E->getArg(0);
5347 const clang::Expr *RightArg = E->getArg(1);
5348 const clang::Expr *ResultArg = E->getArg(2);
5349
5350 clang::QualType ResultQTy =
5351 ResultArg->getType()->castAs<PointerType>()->getPointeeType();
5352
5353 WidthAndSignedness LeftInfo =
5354 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
5355 WidthAndSignedness RightInfo =
5356 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
5357 WidthAndSignedness ResultInfo =
5358 getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
5359
5360 // Handle mixed-sign multiplication as a special case, because adding
5361 // runtime or backend support for our generic irgen would be too expensive.
5362 if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
5363 return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
5364 RightInfo, ResultArg, ResultQTy,
5365 ResultInfo);
5366
5367 if (isSpecialUnsignedMultiplySignedResult(BuiltinID, LeftInfo, RightInfo,
5368 ResultInfo))
5370 *this, LeftArg, LeftInfo, RightArg, RightInfo, ResultArg, ResultQTy,
5371 ResultInfo);
5372
5373 WidthAndSignedness EncompassingInfo =
5374 EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
5375
5376 llvm::Type *EncompassingLLVMTy =
5377 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
5378
5379 llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
5380
5381 Intrinsic::ID IntrinsicId;
5382 switch (BuiltinID) {
5383 default:
5384 llvm_unreachable("Unknown overflow builtin id.");
5385 case Builtin::BI__builtin_add_overflow:
5386 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::sadd_with_overflow
5387 : Intrinsic::uadd_with_overflow;
5388 break;
5389 case Builtin::BI__builtin_sub_overflow:
5390 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::ssub_with_overflow
5391 : Intrinsic::usub_with_overflow;
5392 break;
5393 case Builtin::BI__builtin_mul_overflow:
5394 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::smul_with_overflow
5395 : Intrinsic::umul_with_overflow;
5396 break;
5397 }
5398
5399 llvm::Value *Left = EmitScalarExpr(LeftArg);
5400 llvm::Value *Right = EmitScalarExpr(RightArg);
5401 Address ResultPtr = EmitPointerWithAlignment(ResultArg);
5402
5403 // Extend each operand to the encompassing type.
5404 Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
5405 Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
5406
5407 // Perform the operation on the extended values.
5408 llvm::Value *Overflow, *Result;
5409 Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
5410
5411 if (EncompassingInfo.Width > ResultInfo.Width) {
5412 // The encompassing type is wider than the result type, so we need to
5413 // truncate it.
5414 llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
5415
5416 // To see if the truncation caused an overflow, we will extend
5417 // the result and then compare it to the original result.
5418 llvm::Value *ResultTruncExt = Builder.CreateIntCast(
5419 ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
5420 llvm::Value *TruncationOverflow =
5421 Builder.CreateICmpNE(Result, ResultTruncExt);
5422
5423 Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
5424 Result = ResultTrunc;
5425 }
5426
5427 // Finally, store the result using the pointer.
5428 bool isVolatile =
5429 ResultArg->getType()->getPointeeType().isVolatileQualified();
5430 Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
5431
5432 return RValue::get(Overflow);
5433 }
5434
5435 case Builtin::BI__builtin_uadd_overflow:
5436 case Builtin::BI__builtin_uaddl_overflow:
5437 case Builtin::BI__builtin_uaddll_overflow:
5438 case Builtin::BI__builtin_usub_overflow:
5439 case Builtin::BI__builtin_usubl_overflow:
5440 case Builtin::BI__builtin_usubll_overflow:
5441 case Builtin::BI__builtin_umul_overflow:
5442 case Builtin::BI__builtin_umull_overflow:
5443 case Builtin::BI__builtin_umulll_overflow:
5444 case Builtin::BI__builtin_sadd_overflow:
5445 case Builtin::BI__builtin_saddl_overflow:
5446 case Builtin::BI__builtin_saddll_overflow:
5447 case Builtin::BI__builtin_ssub_overflow:
5448 case Builtin::BI__builtin_ssubl_overflow:
5449 case Builtin::BI__builtin_ssubll_overflow:
5450 case Builtin::BI__builtin_smul_overflow:
5451 case Builtin::BI__builtin_smull_overflow:
5452 case Builtin::BI__builtin_smulll_overflow: {
5453
5454 // We translate all of these builtins directly to the relevant llvm IR node.
5455
5456 // Scalarize our inputs.
5457 llvm::Value *X = EmitScalarExpr(E->getArg(0));
5458 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
5459 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
5460
5461 // Decide which of the overflow intrinsics we are lowering to:
5462 Intrinsic::ID IntrinsicId;
5463 switch (BuiltinID) {
5464 default: llvm_unreachable("Unknown overflow builtin id.");
5465 case Builtin::BI__builtin_uadd_overflow:
5466 case Builtin::BI__builtin_uaddl_overflow:
5467 case Builtin::BI__builtin_uaddll_overflow:
5468 IntrinsicId = Intrinsic::uadd_with_overflow;
5469 break;
5470 case Builtin::BI__builtin_usub_overflow:
5471 case Builtin::BI__builtin_usubl_overflow:
5472 case Builtin::BI__builtin_usubll_overflow:
5473 IntrinsicId = Intrinsic::usub_with_overflow;
5474 break;
5475 case Builtin::BI__builtin_umul_overflow:
5476 case Builtin::BI__builtin_umull_overflow:
5477 case Builtin::BI__builtin_umulll_overflow:
5478 IntrinsicId = Intrinsic::umul_with_overflow;
5479 break;
5480 case Builtin::BI__builtin_sadd_overflow:
5481 case Builtin::BI__builtin_saddl_overflow:
5482 case Builtin::BI__builtin_saddll_overflow:
5483 IntrinsicId = Intrinsic::sadd_with_overflow;
5484 break;
5485 case Builtin::BI__builtin_ssub_overflow:
5486 case Builtin::BI__builtin_ssubl_overflow:
5487 case Builtin::BI__builtin_ssubll_overflow:
5488 IntrinsicId = Intrinsic::ssub_with_overflow;
5489 break;
5490 case Builtin::BI__builtin_smul_overflow:
5491 case Builtin::BI__builtin_smull_overflow:
5492 case Builtin::BI__builtin_smulll_overflow:
5493 IntrinsicId = Intrinsic::smul_with_overflow;
5494 break;
5495 }
5496
5497
5498 llvm::Value *Carry;
5499 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
5500 Builder.CreateStore(Sum, SumOutPtr);
5501
5502 return RValue::get(Carry);
5503 }
5504 case Builtin::BIaddressof:
5505 case Builtin::BI__addressof:
5506 case Builtin::BI__builtin_addressof:
5507 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
5508 case Builtin::BI__builtin_function_start:
5509 return RValue::get(CGM.GetFunctionStart(
5510 E->getArg(0)->getAsBuiltinConstantDeclRef(CGM.getContext())));
5511 case Builtin::BI__builtin_operator_new:
5513 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
5514 case Builtin::BI__builtin_operator_delete:
5516 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
5517 return RValue::get(nullptr);
5518
5519 case Builtin::BI__builtin_is_aligned:
5520 return EmitBuiltinIsAligned(E);
5521 case Builtin::BI__builtin_align_up:
5522 return EmitBuiltinAlignTo(E, true);
5523 case Builtin::BI__builtin_align_down:
5524 return EmitBuiltinAlignTo(E, false);
5525
5526 case Builtin::BI__noop:
5527 // __noop always evaluates to an integer literal zero.
5528 return RValue::get(ConstantInt::get(IntTy, 0));
5529 case Builtin::BI__builtin_call_with_static_chain: {
5530 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
5531 const Expr *Chain = E->getArg(1);
5532 return EmitCall(Call->getCallee()->getType(),
5533 EmitCallee(Call->getCallee()), Call, ReturnValue,
5534 EmitScalarExpr(Chain));
5535 }
5536 case Builtin::BI_InterlockedExchange8:
5537 case Builtin::BI_InterlockedExchange16:
5538 case Builtin::BI_InterlockedExchange:
5539 case Builtin::BI_InterlockedExchangePointer:
5540 return RValue::get(
5542 case Builtin::BI_InterlockedCompareExchangePointer:
5543 return RValue::get(
5545 case Builtin::BI_InterlockedCompareExchangePointer_nf:
5546 return RValue::get(
5548 case Builtin::BI_InterlockedCompareExchange8:
5549 case Builtin::BI_InterlockedCompareExchange16:
5550 case Builtin::BI_InterlockedCompareExchange:
5551 case Builtin::BI_InterlockedCompareExchange64:
5552 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
5553 case Builtin::BI_InterlockedIncrement16:
5554 case Builtin::BI_InterlockedIncrement:
5555 return RValue::get(
5557 case Builtin::BI_InterlockedDecrement16:
5558 case Builtin::BI_InterlockedDecrement:
5559 return RValue::get(
5561 case Builtin::BI_InterlockedAnd8:
5562 case Builtin::BI_InterlockedAnd16:
5563 case Builtin::BI_InterlockedAnd:
5565 case Builtin::BI_InterlockedExchangeAdd8:
5566 case Builtin::BI_InterlockedExchangeAdd16:
5567 case Builtin::BI_InterlockedExchangeAdd:
5568 return RValue::get(
5570 case Builtin::BI_InterlockedExchangeSub8:
5571 case Builtin::BI_InterlockedExchangeSub16:
5572 case Builtin::BI_InterlockedExchangeSub:
5573 return RValue::get(
5575 case Builtin::BI_InterlockedOr8:
5576 case Builtin::BI_InterlockedOr16:
5577 case Builtin::BI_InterlockedOr:
5579 case Builtin::BI_InterlockedXor8:
5580 case Builtin::BI_InterlockedXor16:
5581 case Builtin::BI_InterlockedXor:
5583
5584 case Builtin::BI_bittest64:
5585 case Builtin::BI_bittest:
5586 case Builtin::BI_bittestandcomplement64:
5587 case Builtin::BI_bittestandcomplement:
5588 case Builtin::BI_bittestandreset64:
5589 case Builtin::BI_bittestandreset:
5590 case Builtin::BI_bittestandset64:
5591 case Builtin::BI_bittestandset:
5592 case Builtin::BI_interlockedbittestandreset:
5593 case Builtin::BI_interlockedbittestandreset64:
5594 case Builtin::BI_interlockedbittestandreset64_acq:
5595 case Builtin::BI_interlockedbittestandreset64_rel:
5596 case Builtin::BI_interlockedbittestandreset64_nf:
5597 case Builtin::BI_interlockedbittestandset64:
5598 case Builtin::BI_interlockedbittestandset64_acq:
5599 case Builtin::BI_interlockedbittestandset64_rel:
5600 case Builtin::BI_interlockedbittestandset64_nf:
5601 case Builtin::BI_interlockedbittestandset:
5602 case Builtin::BI_interlockedbittestandset_acq:
5603 case Builtin::BI_interlockedbittestandset_rel:
5604 case Builtin::BI_interlockedbittestandset_nf:
5605 case Builtin::BI_interlockedbittestandreset_acq:
5606 case Builtin::BI_interlockedbittestandreset_rel:
5607 case Builtin::BI_interlockedbittestandreset_nf:
5608 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
5609
5610 // These builtins exist to emit regular volatile loads and stores not
5611 // affected by the -fms-volatile setting.
5612 case Builtin::BI__iso_volatile_load8:
5613 case Builtin::BI__iso_volatile_load16:
5614 case Builtin::BI__iso_volatile_load32:
5615 case Builtin::BI__iso_volatile_load64:
5616 return RValue::get(EmitISOVolatileLoad(*this, E));
5617 case Builtin::BI__iso_volatile_store8:
5618 case Builtin::BI__iso_volatile_store16:
5619 case Builtin::BI__iso_volatile_store32:
5620 case Builtin::BI__iso_volatile_store64:
5621 return RValue::get(EmitISOVolatileStore(*this, E));
5622
5623 case Builtin::BI__builtin_ptrauth_sign_constant:
5624 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
5625
5626 case Builtin::BI__builtin_ptrauth_auth:
5627 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5628 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5629 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5630 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5631 case Builtin::BI__builtin_ptrauth_strip: {
5632 // Emit the arguments.
5634 for (auto argExpr : E->arguments())
5635 Args.push_back(EmitScalarExpr(argExpr));
5636
5637 // Cast the value to intptr_t, saving its original type.
5638 llvm::Type *OrigValueType = Args[0]->getType();
5639 if (OrigValueType->isPointerTy())
5640 Args[0] = Builder.CreatePtrToInt(Args[0], IntPtrTy);
5641
5642 switch (BuiltinID) {
5643 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5644 if (Args[4]->getType()->isPointerTy())
5645 Args[4] = Builder.CreatePtrToInt(Args[4], IntPtrTy);
5646 [[fallthrough]];
5647
5648 case Builtin::BI__builtin_ptrauth_auth:
5649 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5650 if (Args[2]->getType()->isPointerTy())
5651 Args[2] = Builder.CreatePtrToInt(Args[2], IntPtrTy);
5652 break;
5653
5654 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5655 if (Args[1]->getType()->isPointerTy())
5656 Args[1] = Builder.CreatePtrToInt(Args[1], IntPtrTy);
5657 break;
5658
5659 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5660 case Builtin::BI__builtin_ptrauth_strip:
5661 break;
5662 }
5663
5664 // Call the intrinsic.
5665 auto IntrinsicID = [&]() -> unsigned {
5666 switch (BuiltinID) {
5667 case Builtin::BI__builtin_ptrauth_auth:
5668 return Intrinsic::ptrauth_auth;
5669 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5670 return Intrinsic::ptrauth_resign;
5671 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5672 return Intrinsic::ptrauth_blend;
5673 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5674 return Intrinsic::ptrauth_sign_generic;
5675 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5676 return Intrinsic::ptrauth_sign;
5677 case Builtin::BI__builtin_ptrauth_strip:
5678 return Intrinsic::ptrauth_strip;
5679 }
5680 llvm_unreachable("bad ptrauth intrinsic");
5681 }();
5682 auto Intrinsic = CGM.getIntrinsic(IntrinsicID);
5683 llvm::Value *Result = EmitRuntimeCall(Intrinsic, Args);
5684
5685 if (BuiltinID != Builtin::BI__builtin_ptrauth_sign_generic_data &&
5686 BuiltinID != Builtin::BI__builtin_ptrauth_blend_discriminator &&
5687 OrigValueType->isPointerTy()) {
5688 Result = Builder.CreateIntToPtr(Result, OrigValueType);
5689 }
5690 return RValue::get(Result);
5691 }
5692
5693 case Builtin::BI__builtin_get_vtable_pointer: {
5694 const Expr *Target = E->getArg(0);
5695 QualType TargetType = Target->getType();
5696 const CXXRecordDecl *Decl = TargetType->getPointeeCXXRecordDecl();
5697 assert(Decl);
5698 auto ThisAddress = EmitPointerWithAlignment(Target);
5699 assert(ThisAddress.isValid());
5700 llvm::Value *VTablePointer =
5702 return RValue::get(VTablePointer);
5703 }
5704
5705 case Builtin::BI__exception_code:
5706 case Builtin::BI_exception_code:
5708 case Builtin::BI__exception_info:
5709 case Builtin::BI_exception_info:
5711 case Builtin::BI__abnormal_termination:
5712 case Builtin::BI_abnormal_termination:
5714 case Builtin::BI_setjmpex:
5715 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
5716 E->getArg(0)->getType()->isPointerType())
5717 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
5718 break;
5719 case Builtin::BI_setjmp:
5720 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
5721 E->getArg(0)->getType()->isPointerType()) {
5722 if (getTarget().getTriple().getArch() == llvm::Triple::x86)
5723 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
5724 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
5725 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
5726 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
5727 }
5728 break;
5729
5730 // C++ std:: builtins.
5731 case Builtin::BImove:
5732 case Builtin::BImove_if_noexcept:
5733 case Builtin::BIforward:
5734 case Builtin::BIforward_like:
5735 case Builtin::BIas_const:
5736 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
5737 case Builtin::BI__GetExceptionInfo: {
5738 if (llvm::GlobalVariable *GV =
5739 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
5740 return RValue::get(GV);
5741 break;
5742 }
5743
5744 case Builtin::BI__fastfail:
5746
5747 case Builtin::BI__builtin_coro_id:
5748 return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
5749 case Builtin::BI__builtin_coro_promise:
5750 return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
5751 case Builtin::BI__builtin_coro_resume:
5752 EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
5753 return RValue::get(nullptr);
5754 case Builtin::BI__builtin_coro_frame:
5755 return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
5756 case Builtin::BI__builtin_coro_noop:
5757 return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
5758 case Builtin::BI__builtin_coro_free:
5759 return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
5760 case Builtin::BI__builtin_coro_destroy:
5761 EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
5762 return RValue::get(nullptr);
5763 case Builtin::BI__builtin_coro_done:
5764 return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
5765 case Builtin::BI__builtin_coro_alloc:
5766 return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
5767 case Builtin::BI__builtin_coro_begin:
5768 return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
5769 case Builtin::BI__builtin_coro_end:
5770 return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
5771 case Builtin::BI__builtin_coro_suspend:
5772 return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
5773 case Builtin::BI__builtin_coro_size:
5774 return EmitCoroutineIntrinsic(E, Intrinsic::coro_size);
5775 case Builtin::BI__builtin_coro_align:
5776 return EmitCoroutineIntrinsic(E, Intrinsic::coro_align);
5777
5778 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
5779 case Builtin::BIread_pipe:
5780 case Builtin::BIwrite_pipe: {
5781 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5782 *Arg1 = EmitScalarExpr(E->getArg(1));
5783 CGOpenCLRuntime OpenCLRT(CGM);
5784 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5785 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5786
5787 // Type of the generic packet parameter.
5788 unsigned GenericAS =
5790 llvm::Type *I8PTy = llvm::PointerType::get(getLLVMContext(), GenericAS);
5791
5792 // Testing which overloaded version we should generate the call for.
5793 if (2U == E->getNumArgs()) {
5794 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
5795 : "__write_pipe_2";
5796 // Creating a generic function type to be able to call with any builtin or
5797 // user defined type.
5798 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
5799 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5800 Value *ACast = Builder.CreateAddrSpaceCast(Arg1, I8PTy);
5801 return RValue::get(
5802 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5803 {Arg0, ACast, PacketSize, PacketAlign}));
5804 } else {
5805 assert(4 == E->getNumArgs() &&
5806 "Illegal number of parameters to pipe function");
5807 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
5808 : "__write_pipe_4";
5809
5810 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
5811 Int32Ty, Int32Ty};
5812 Value *Arg2 = EmitScalarExpr(E->getArg(2)),
5813 *Arg3 = EmitScalarExpr(E->getArg(3));
5814 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5815 Value *ACast = Builder.CreateAddrSpaceCast(Arg3, I8PTy);
5816 // We know the third argument is an integer type, but we may need to cast
5817 // it to i32.
5818 if (Arg2->getType() != Int32Ty)
5819 Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
5820 return RValue::get(
5821 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5822 {Arg0, Arg1, Arg2, ACast, PacketSize, PacketAlign}));
5823 }
5824 }
5825 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
5826 // functions
5827 case Builtin::BIreserve_read_pipe:
5828 case Builtin::BIreserve_write_pipe:
5829 case Builtin::BIwork_group_reserve_read_pipe:
5830 case Builtin::BIwork_group_reserve_write_pipe:
5831 case Builtin::BIsub_group_reserve_read_pipe:
5832 case Builtin::BIsub_group_reserve_write_pipe: {
5833 // Composing the mangled name for the function.
5834 const char *Name;
5835 if (BuiltinID == Builtin::BIreserve_read_pipe)
5836 Name = "__reserve_read_pipe";
5837 else if (BuiltinID == Builtin::BIreserve_write_pipe)
5838 Name = "__reserve_write_pipe";
5839 else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
5840 Name = "__work_group_reserve_read_pipe";
5841 else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
5842 Name = "__work_group_reserve_write_pipe";
5843 else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
5844 Name = "__sub_group_reserve_read_pipe";
5845 else
5846 Name = "__sub_group_reserve_write_pipe";
5847
5848 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5849 *Arg1 = EmitScalarExpr(E->getArg(1));
5850 llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
5851 CGOpenCLRuntime OpenCLRT(CGM);
5852 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5853 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5854
5855 // Building the generic function prototype.
5856 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
5857 llvm::FunctionType *FTy =
5858 llvm::FunctionType::get(ReservedIDTy, ArgTys, false);
5859 // We know the second argument is an integer type, but we may need to cast
5860 // it to i32.
5861 if (Arg1->getType() != Int32Ty)
5862 Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
5863 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5864 {Arg0, Arg1, PacketSize, PacketAlign}));
5865 }
5866 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
5867 // functions
5868 case Builtin::BIcommit_read_pipe:
5869 case Builtin::BIcommit_write_pipe:
5870 case Builtin::BIwork_group_commit_read_pipe:
5871 case Builtin::BIwork_group_commit_write_pipe:
5872 case Builtin::BIsub_group_commit_read_pipe:
5873 case Builtin::BIsub_group_commit_write_pipe: {
5874 const char *Name;
5875 if (BuiltinID == Builtin::BIcommit_read_pipe)
5876 Name = "__commit_read_pipe";
5877 else if (BuiltinID == Builtin::BIcommit_write_pipe)
5878 Name = "__commit_write_pipe";
5879 else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
5880 Name = "__work_group_commit_read_pipe";
5881 else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
5882 Name = "__work_group_commit_write_pipe";
5883 else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
5884 Name = "__sub_group_commit_read_pipe";
5885 else
5886 Name = "__sub_group_commit_write_pipe";
5887
5888 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5889 *Arg1 = EmitScalarExpr(E->getArg(1));
5890 CGOpenCLRuntime OpenCLRT(CGM);
5891 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5892 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5893
5894 // Building the generic function prototype.
5895 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
5896 llvm::FunctionType *FTy = llvm::FunctionType::get(
5897 llvm::Type::getVoidTy(getLLVMContext()), ArgTys, false);
5898
5899 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5900 {Arg0, Arg1, PacketSize, PacketAlign}));
5901 }
5902 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
5903 case Builtin::BIget_pipe_num_packets:
5904 case Builtin::BIget_pipe_max_packets: {
5905 const char *BaseName;
5906 const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
5907 if (BuiltinID == Builtin::BIget_pipe_num_packets)
5908 BaseName = "__get_pipe_num_packets";
5909 else
5910 BaseName = "__get_pipe_max_packets";
5911 std::string Name = std::string(BaseName) +
5912 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
5913
5914 // Building the generic function prototype.
5915 Value *Arg0 = EmitScalarExpr(E->getArg(0));
5916 CGOpenCLRuntime OpenCLRT(CGM);
5917 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5918 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5919 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
5920 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5921
5922 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5923 {Arg0, PacketSize, PacketAlign}));
5924 }
5925
5926 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
5927 case Builtin::BIto_global:
5928 case Builtin::BIto_local:
5929 case Builtin::BIto_private: {
5930 auto Arg0 = EmitScalarExpr(E->getArg(0));
5931 auto NewArgT = llvm::PointerType::get(
5933 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
5934 auto NewRetT = llvm::PointerType::get(
5936 CGM.getContext().getTargetAddressSpace(
5938 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
5939 llvm::Value *NewArg;
5940 if (Arg0->getType()->getPointerAddressSpace() !=
5941 NewArgT->getPointerAddressSpace())
5942 NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
5943 else
5944 NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
5945 auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
5946 auto NewCall =
5947 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
5948 return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
5949 ConvertType(E->getType())));
5950 }
5951
5952 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
5953 // Table 6.13.17.1 specifies four overload forms of enqueue_kernel.
5954 // The code below expands the builtin call to a call to one of the following
5955 // functions that an OpenCL runtime library will have to provide:
5956 // __enqueue_kernel_basic
5957 // __enqueue_kernel_varargs
5958 // __enqueue_kernel_basic_events
5959 // __enqueue_kernel_events_varargs
5960 case Builtin::BIenqueue_kernel: {
5961 StringRef Name; // Generated function call name
5962 unsigned NumArgs = E->getNumArgs();
5963
5964 llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
5965 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
5966 getContext().getTargetAddressSpace(LangAS::opencl_generic));
5967
5968 llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
5969 llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
5970 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
5971 llvm::Value *Range = NDRangeL.getAddress().emitRawPointer(*this);
5972
5973 // FIXME: Look through the addrspacecast which may exist to the stack
5974 // temporary as a hack.
5975 //
5976 // This is hardcoding the assumed ABI of the target function. This assumes
5977 // direct passing for every argument except NDRange, which is assumed to be
5978 // byval or byref indirect passed.
5979 //
5980 // This should be fixed to query a signature from CGOpenCLRuntime, and go
5981 // through EmitCallArgs to get the correct target ABI.
5982 Range = Range->stripPointerCasts();
5983
5984 llvm::Type *RangePtrTy = Range->getType();
5985
5986 if (NumArgs == 4) {
5987 // The most basic form of the call with parameters:
5988 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
5989 Name = "__enqueue_kernel_basic";
5990 llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangePtrTy, GenericVoidPtrTy,
5991 GenericVoidPtrTy};
5992 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5993
5994 auto Info =
5995 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
5996 llvm::Value *Kernel =
5997 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
5998 llvm::Value *Block =
5999 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6000
6001 auto RTCall = EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6002 {Queue, Flags, Range, Kernel, Block});
6003 return RValue::get(RTCall);
6004 }
6005 assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
6006
6007 // Create a temporary array to hold the sizes of local pointer arguments
6008 // for the block. \p First is the position of the first size argument.
6009 auto CreateArrayForSizeVar =
6010 [=](unsigned First) -> std::pair<llvm::Value *, llvm::Value *> {
6011 llvm::APInt ArraySize(32, NumArgs - First);
6013 getContext().getSizeType(), ArraySize, nullptr,
6015 /*IndexTypeQuals=*/0);
6016 auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
6017 llvm::Value *TmpPtr = Tmp.getPointer();
6018 // The EmitLifetime* pair expect a naked Alloca as their last argument,
6019 // however for cases where the default AS is not the Alloca AS, Tmp is
6020 // actually the Alloca ascasted to the default AS, hence the
6021 // stripPointerCasts()
6022 llvm::Value *Alloca = TmpPtr->stripPointerCasts();
6023 llvm::Value *ElemPtr;
6024 EmitLifetimeStart(Alloca);
6025 // Each of the following arguments specifies the size of the corresponding
6026 // argument passed to the enqueued block.
6027 auto *Zero = llvm::ConstantInt::get(IntTy, 0);
6028 for (unsigned I = First; I < NumArgs; ++I) {
6029 auto *Index = llvm::ConstantInt::get(IntTy, I - First);
6030 auto *GEP =
6031 Builder.CreateGEP(Tmp.getElementType(), Alloca, {Zero, Index});
6032 if (I == First)
6033 ElemPtr = GEP;
6034 auto *V =
6035 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
6036 Builder.CreateAlignedStore(
6037 V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
6038 }
6039 // Return the Alloca itself rather than a potential ascast as this is only
6040 // used by the paired EmitLifetimeEnd.
6041 return {ElemPtr, Alloca};
6042 };
6043
6044 // Could have events and/or varargs.
6045 if (E->getArg(3)->getType()->isBlockPointerType()) {
6046 // No events passed, but has variadic arguments.
6047 Name = "__enqueue_kernel_varargs";
6048 auto Info =
6049 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
6050 llvm::Value *Kernel =
6051 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6052 auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6053 auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(4);
6054
6055 // Create a vector of the arguments, as well as a constant value to
6056 // express to the runtime the number of variadic arguments.
6057 llvm::Value *const Args[] = {Queue, Flags,
6058 Range, Kernel,
6059 Block, ConstantInt::get(IntTy, NumArgs - 4),
6060 ElemPtr};
6061 llvm::Type *const ArgTys[] = {
6062 QueueTy, IntTy, RangePtrTy, GenericVoidPtrTy,
6063 GenericVoidPtrTy, IntTy, ElemPtr->getType()};
6064
6065 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6066 auto Call = RValue::get(
6067 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6068 EmitLifetimeEnd(TmpPtr);
6069 return Call;
6070 }
6071 // Any calls now have event arguments passed.
6072 if (NumArgs >= 7) {
6073 llvm::PointerType *PtrTy = llvm::PointerType::get(
6074 CGM.getLLVMContext(),
6075 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
6076
6077 llvm::Value *NumEvents =
6078 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
6079
6080 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
6081 // to be a null pointer constant (including `0` literal), we can take it
6082 // into account and emit null pointer directly.
6083 llvm::Value *EventWaitList = nullptr;
6084 if (E->getArg(4)->isNullPointerConstant(
6086 EventWaitList = llvm::ConstantPointerNull::get(PtrTy);
6087 } else {
6088 EventWaitList =
6089 E->getArg(4)->getType()->isArrayType()
6091 : EmitScalarExpr(E->getArg(4));
6092 // Convert to generic address space.
6093 EventWaitList = Builder.CreatePointerCast(EventWaitList, PtrTy);
6094 }
6095 llvm::Value *EventRet = nullptr;
6096 if (E->getArg(5)->isNullPointerConstant(
6098 EventRet = llvm::ConstantPointerNull::get(PtrTy);
6099 } else {
6100 EventRet =
6101 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), PtrTy);
6102 }
6103
6104 auto Info =
6105 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
6106 llvm::Value *Kernel =
6107 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6108 llvm::Value *Block =
6109 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6110
6111 std::vector<llvm::Type *> ArgTys = {
6112 QueueTy, Int32Ty, RangePtrTy, Int32Ty,
6113 PtrTy, PtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
6114
6115 std::vector<llvm::Value *> Args = {Queue, Flags, Range,
6116 NumEvents, EventWaitList, EventRet,
6117 Kernel, Block};
6118
6119 if (NumArgs == 7) {
6120 // Has events but no variadics.
6121 Name = "__enqueue_kernel_basic_events";
6122 llvm::FunctionType *FTy =
6123 llvm::FunctionType::get(Int32Ty, ArgTys, false);
6124 return RValue::get(
6125 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6126 }
6127 // Has event info and variadics
6128 // Pass the number of variadics to the runtime function too.
6129 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
6130 ArgTys.push_back(Int32Ty);
6131 Name = "__enqueue_kernel_events_varargs";
6132
6133 auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(7);
6134 Args.push_back(ElemPtr);
6135 ArgTys.push_back(ElemPtr->getType());
6136
6137 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6138 auto Call = RValue::get(
6139 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6140 EmitLifetimeEnd(TmpPtr);
6141 return Call;
6142 }
6143 llvm_unreachable("Unexpected enqueue_kernel signature");
6144 }
6145 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
6146 // parameter.
6147 case Builtin::BIget_kernel_work_group_size: {
6148 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6149 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6150 auto Info =
6151 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
6152 Value *Kernel =
6153 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6154 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6156 CGM.CreateRuntimeFunction(
6157 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
6158 false),
6159 "__get_kernel_work_group_size_impl"),
6160 {Kernel, Arg}));
6161 }
6162 case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
6163 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6164 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6165 auto Info =
6166 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
6167 Value *Kernel =
6168 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6169 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6171 CGM.CreateRuntimeFunction(
6172 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
6173 false),
6174 "__get_kernel_preferred_work_group_size_multiple_impl"),
6175 {Kernel, Arg}));
6176 }
6177 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
6178 case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
6179 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6180 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6181 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
6182 llvm::Value *NDRange = NDRangeL.getAddress().emitRawPointer(*this);
6183 auto Info =
6184 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
6185 Value *Kernel =
6186 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6187 Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6188 const char *Name =
6189 BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
6190 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
6191 : "__get_kernel_sub_group_count_for_ndrange_impl";
6193 CGM.CreateRuntimeFunction(
6194 llvm::FunctionType::get(
6195 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
6196 false),
6197 Name),
6198 {NDRange, Kernel, Block}));
6199 }
6200 case Builtin::BI__builtin_store_half:
6201 case Builtin::BI__builtin_store_halff: {
6202 Value *Val = EmitScalarExpr(E->getArg(0));
6204 Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
6205 Builder.CreateStore(HalfVal, Address);
6206 return RValue::get(nullptr);
6207 }
6208 case Builtin::BI__builtin_load_half: {
6210 Value *HalfVal = Builder.CreateLoad(Address);
6211 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
6212 }
6213 case Builtin::BI__builtin_load_halff: {
6215 Value *HalfVal = Builder.CreateLoad(Address);
6216 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
6217 }
6218 case Builtin::BI__builtin_printf:
6219 case Builtin::BIprintf:
6220 if (getTarget().getTriple().isNVPTX() ||
6221 getTarget().getTriple().isAMDGCN() ||
6222 (getTarget().getTriple().isSPIRV() &&
6223 getTarget().getTriple().getVendor() == Triple::VendorType::AMD)) {
6224 if (getTarget().getTriple().isNVPTX())
6226 if ((getTarget().getTriple().isAMDGCN() ||
6227 getTarget().getTriple().isSPIRV()) &&
6228 getLangOpts().HIP)
6230 }
6231
6232 break;
6233 case Builtin::BI__builtin_canonicalize:
6234 case Builtin::BI__builtin_canonicalizef:
6235 case Builtin::BI__builtin_canonicalizef16:
6236 case Builtin::BI__builtin_canonicalizel:
6237 return RValue::get(
6238 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::canonicalize));
6239
6240 case Builtin::BI__builtin_thread_pointer: {
6241 if (!getContext().getTargetInfo().isTLSSupported())
6242 CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
6243
6244 return RValue::get(Builder.CreateIntrinsic(llvm::Intrinsic::thread_pointer,
6245 {GlobalsInt8PtrTy}, {}));
6246 }
6247 case Builtin::BI__builtin_os_log_format:
6248 return emitBuiltinOSLogFormat(*E);
6249
6250 case Builtin::BI__xray_customevent: {
6252 return RValue::getIgnored();
6253
6254 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
6256 return RValue::getIgnored();
6257
6258 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
6259 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
6260 return RValue::getIgnored();
6261
6262 Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
6263 auto FTy = F->getFunctionType();
6264 auto Arg0 = E->getArg(0);
6265 auto Arg0Val = EmitScalarExpr(Arg0);
6266 auto Arg0Ty = Arg0->getType();
6267 auto PTy0 = FTy->getParamType(0);
6268 if (PTy0 != Arg0Val->getType()) {
6269 if (Arg0Ty->isArrayType())
6270 Arg0Val = EmitArrayToPointerDecay(Arg0).emitRawPointer(*this);
6271 else
6272 Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
6273 }
6274 auto Arg1 = EmitScalarExpr(E->getArg(1));
6275 auto PTy1 = FTy->getParamType(1);
6276 if (PTy1 != Arg1->getType())
6277 Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
6278 return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
6279 }
6280
6281 case Builtin::BI__xray_typedevent: {
6282 // TODO: There should be a way to always emit events even if the current
6283 // function is not instrumented. Losing events in a stream can cripple
6284 // a trace.
6286 return RValue::getIgnored();
6287
6288 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
6290 return RValue::getIgnored();
6291
6292 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
6293 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
6294 return RValue::getIgnored();
6295
6296 Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
6297 auto FTy = F->getFunctionType();
6298 auto Arg0 = EmitScalarExpr(E->getArg(0));
6299 auto PTy0 = FTy->getParamType(0);
6300 if (PTy0 != Arg0->getType())
6301 Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
6302 auto Arg1 = E->getArg(1);
6303 auto Arg1Val = EmitScalarExpr(Arg1);
6304 auto Arg1Ty = Arg1->getType();
6305 auto PTy1 = FTy->getParamType(1);
6306 if (PTy1 != Arg1Val->getType()) {
6307 if (Arg1Ty->isArrayType())
6308 Arg1Val = EmitArrayToPointerDecay(Arg1).emitRawPointer(*this);
6309 else
6310 Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
6311 }
6312 auto Arg2 = EmitScalarExpr(E->getArg(2));
6313 auto PTy2 = FTy->getParamType(2);
6314 if (PTy2 != Arg2->getType())
6315 Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
6316 return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
6317 }
6318
6319 case Builtin::BI__builtin_ms_va_start:
6320 case Builtin::BI__builtin_ms_va_end:
6321 return RValue::get(
6323 BuiltinID == Builtin::BI__builtin_ms_va_start));
6324
6325 case Builtin::BI__builtin_ms_va_copy: {
6326 // Lower this manually. We can't reliably determine whether or not any
6327 // given va_copy() is for a Win64 va_list from the calling convention
6328 // alone, because it's legal to do this from a System V ABI function.
6329 // With opaque pointer types, we won't have enough information in LLVM
6330 // IR to determine this from the argument types, either. Best to do it
6331 // now, while we have enough information.
6332 Address DestAddr = EmitMSVAListRef(E->getArg(0));
6333 Address SrcAddr = EmitMSVAListRef(E->getArg(1));
6334
6335 DestAddr = DestAddr.withElementType(Int8PtrTy);
6336 SrcAddr = SrcAddr.withElementType(Int8PtrTy);
6337
6338 Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
6339 return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
6340 }
6341
6342 case Builtin::BI__builtin_get_device_side_mangled_name: {
6343 auto Name = CGM.getCUDARuntime().getDeviceSideName(
6344 cast<DeclRefExpr>(E->getArg(0)->IgnoreImpCasts())->getDecl());
6345 auto Str = CGM.GetAddrOfConstantCString(Name, "");
6346 return RValue::get(Str.getPointer());
6347 }
6348 }
6349
6350 // If this is an alias for a lib function (e.g. __builtin_sin), emit
6351 // the call using the normal call path, but using the unmangled
6352 // version of the function name.
6353 const auto &BI = getContext().BuiltinInfo;
6354 if (!shouldEmitBuiltinAsIR(BuiltinID, BI, *this) &&
6355 BI.isLibFunction(BuiltinID))
6356 return emitLibraryCall(*this, FD, E,
6357 CGM.getBuiltinLibFunction(FD, BuiltinID));
6358
6359 // If this is a predefined lib function (e.g. malloc), emit the call
6360 // using exactly the normal call path.
6361 if (BI.isPredefinedLibFunction(BuiltinID))
6362 return emitLibraryCall(*this, FD, E, CGM.getRawFunctionPointer(FD));
6363
6364 // Check that a call to a target specific builtin has the correct target
6365 // features.
6366 // This is down here to avoid non-target specific builtins, however, if
6367 // generic builtins start to require generic target features then we
6368 // can move this up to the beginning of the function.
6369 checkTargetFeatures(E, FD);
6370
6371 if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
6372 LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
6373
6374 // See if we have a target specific intrinsic.
6375 std::string Name = getContext().BuiltinInfo.getName(BuiltinID);
6376 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
6377 StringRef Prefix =
6378 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
6379 if (!Prefix.empty()) {
6380 IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin(Prefix.data(), Name);
6381 if (IntrinsicID == Intrinsic::not_intrinsic && Prefix == "spv" &&
6382 getTarget().getTriple().getOS() == llvm::Triple::OSType::AMDHSA)
6383 IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin("amdgcn", Name);
6384 // NOTE we don't need to perform a compatibility flag check here since the
6385 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
6386 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
6387 if (IntrinsicID == Intrinsic::not_intrinsic)
6388 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
6389 }
6390
6391 if (IntrinsicID != Intrinsic::not_intrinsic) {
6393
6394 // Find out if any arguments are required to be integer constant
6395 // expressions.
6396 unsigned ICEArguments = 0;
6398 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
6399 assert(Error == ASTContext::GE_None && "Should not codegen an error");
6400
6401 Function *F = CGM.getIntrinsic(IntrinsicID);
6402 llvm::FunctionType *FTy = F->getFunctionType();
6403
6404 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
6405 Value *ArgValue = EmitScalarOrConstFoldImmArg(ICEArguments, i, E);
6406 // If the intrinsic arg type is different from the builtin arg type
6407 // we need to do a bit cast.
6408 llvm::Type *PTy = FTy->getParamType(i);
6409 if (PTy != ArgValue->getType()) {
6410 // XXX - vector of pointers?
6411 if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
6412 if (PtrTy->getAddressSpace() !=
6413 ArgValue->getType()->getPointerAddressSpace()) {
6414 ArgValue = Builder.CreateAddrSpaceCast(
6415 ArgValue, llvm::PointerType::get(getLLVMContext(),
6416 PtrTy->getAddressSpace()));
6417 }
6418 }
6419
6420 // Cast vector type (e.g., v256i32) to x86_amx, this only happen
6421 // in amx intrinsics.
6422 if (PTy->isX86_AMXTy())
6423 ArgValue = Builder.CreateIntrinsic(Intrinsic::x86_cast_vector_to_tile,
6424 {ArgValue->getType()}, {ArgValue});
6425 else
6426 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
6427 }
6428
6429 Args.push_back(ArgValue);
6430 }
6431
6432 Value *V = Builder.CreateCall(F, Args);
6433 QualType BuiltinRetType = E->getType();
6434
6435 llvm::Type *RetTy = VoidTy;
6436 if (!BuiltinRetType->isVoidType())
6437 RetTy = ConvertType(BuiltinRetType);
6438
6439 if (RetTy != V->getType()) {
6440 // XXX - vector of pointers?
6441 if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
6442 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
6443 V = Builder.CreateAddrSpaceCast(
6444 V, llvm::PointerType::get(getLLVMContext(),
6445 PtrTy->getAddressSpace()));
6446 }
6447 }
6448
6449 // Cast x86_amx to vector type (e.g., v256i32), this only happen
6450 // in amx intrinsics.
6451 if (V->getType()->isX86_AMXTy())
6452 V = Builder.CreateIntrinsic(Intrinsic::x86_cast_tile_to_vector, {RetTy},
6453 {V});
6454 else
6455 V = Builder.CreateBitCast(V, RetTy);
6456 }
6457
6458 if (RetTy->isVoidTy())
6459 return RValue::get(nullptr);
6460
6461 return RValue::get(V);
6462 }
6463
6464 // Some target-specific builtins can have aggregate return values, e.g.
6465 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
6466 // ReturnValue to be non-null, so that the target-specific emission code can
6467 // always just emit into it.
6469 if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) {
6470 Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp");
6471 ReturnValue = ReturnValueSlot(DestPtr, false);
6472 }
6473
6474 // Now see if we can emit a target-specific builtin.
6475 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
6476 switch (EvalKind) {
6477 case TEK_Scalar:
6478 if (V->getType()->isVoidTy())
6479 return RValue::get(nullptr);
6480 return RValue::get(V);
6481 case TEK_Aggregate:
6482 return RValue::getAggregate(ReturnValue.getAddress(),
6483 ReturnValue.isVolatile());
6484 case TEK_Complex:
6485 llvm_unreachable("No current target builtin returns complex");
6486 }
6487 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
6488 }
6489
6490 // EmitHLSLBuiltinExpr will check getLangOpts().HLSL
6491 if (Value *V = EmitHLSLBuiltinExpr(BuiltinID, E, ReturnValue)) {
6492 switch (EvalKind) {
6493 case TEK_Scalar:
6494 if (V->getType()->isVoidTy())
6495 return RValue::get(nullptr);
6496 return RValue::get(V);
6497 case TEK_Aggregate:
6498 return RValue::getAggregate(ReturnValue.getAddress(),
6499 ReturnValue.isVolatile());
6500 case TEK_Complex:
6501 llvm_unreachable("No current hlsl builtin returns complex");
6502 }
6503 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
6504 }
6505
6506 if (getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice)
6507 return EmitHipStdParUnsupportedBuiltin(this, FD);
6508
6509 ErrorUnsupported(E, "builtin function");
6510
6511 // Unknown builtin, for now just dump it out and return undef.
6512 return GetUndefRValue(E->getType());
6513}
6514
6515namespace {
6516struct BuiltinAlignArgs {
6517 llvm::Value *Src = nullptr;
6518 llvm::Type *SrcType = nullptr;
6519 llvm::Value *Alignment = nullptr;
6520 llvm::Value *Mask = nullptr;
6521 llvm::IntegerType *IntType = nullptr;
6522
6523 BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
6524 QualType AstType = E->getArg(0)->getType();
6525 if (AstType->isArrayType())
6526 Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(CGF);
6527 else
6528 Src = CGF.EmitScalarExpr(E->getArg(0));
6529 SrcType = Src->getType();
6530 if (SrcType->isPointerTy()) {
6531 IntType = IntegerType::get(
6532 CGF.getLLVMContext(),
6533 CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType));
6534 } else {
6535 assert(SrcType->isIntegerTy());
6536 IntType = cast<llvm::IntegerType>(SrcType);
6537 }
6538 Alignment = CGF.EmitScalarExpr(E->getArg(1));
6539 Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment");
6540 auto *One = llvm::ConstantInt::get(IntType, 1);
6541 Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
6542 }
6543};
6544} // namespace
6545
6546/// Generate (x & (y-1)) == 0.
6548 BuiltinAlignArgs Args(E, *this);
6549 llvm::Value *SrcAddress = Args.Src;
6550 if (Args.SrcType->isPointerTy())
6551 SrcAddress =
6552 Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr");
6553 return RValue::get(Builder.CreateICmpEQ(
6554 Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"),
6555 llvm::Constant::getNullValue(Args.IntType), "is_aligned"));
6556}
6557
6558/// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
6559/// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
6560/// llvm.ptrmask intrinsic (with a GEP before in the align_up case).
6562 BuiltinAlignArgs Args(E, *this);
6563 llvm::Value *SrcForMask = Args.Src;
6564 if (AlignUp) {
6565 // When aligning up we have to first add the mask to ensure we go over the
6566 // next alignment value and then align down to the next valid multiple.
6567 // By adding the mask, we ensure that align_up on an already aligned
6568 // value will not change the value.
6569 if (Args.Src->getType()->isPointerTy()) {
6570 if (getLangOpts().PointerOverflowDefined)
6571 SrcForMask =
6572 Builder.CreateGEP(Int8Ty, SrcForMask, Args.Mask, "over_boundary");
6573 else
6574 SrcForMask = EmitCheckedInBoundsGEP(Int8Ty, SrcForMask, Args.Mask,
6575 /*SignedIndices=*/true,
6576 /*isSubtraction=*/false,
6577 E->getExprLoc(), "over_boundary");
6578 } else {
6579 SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
6580 }
6581 }
6582 // Invert the mask to only clear the lower bits.
6583 llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
6584 llvm::Value *Result = nullptr;
6585 if (Args.Src->getType()->isPointerTy()) {
6586 Result = Builder.CreateIntrinsic(
6587 Intrinsic::ptrmask, {Args.SrcType, Args.IntType},
6588 {SrcForMask, InvertedMask}, nullptr, "aligned_result");
6589 } else {
6590 Result = Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
6591 }
6592 assert(Result->getType() == Args.SrcType);
6593 return RValue::get(Result);
6594}
#define V(N, I)
static char bitActionToX86BTCode(BitTest::ActionKind A)
static Value * EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering SuccessOrdering)
static void emitSincosBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static CanQualType getOSLogArgType(ASTContext &C, int Size)
Get the argument type for arguments to os_log_helper.
static Value * EmitOverflowCheckedAbs(CodeGenFunction &CGF, const CallExpr *E, bool SanitizeOverflow)
static llvm::Value * EmitBitCountExpr(CodeGenFunction &CGF, const Expr *E)
static Value * tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID, Value *V)
static bool areBOSTypesCompatible(int From, int To)
Checks if using the result of __builtin_object_size(p, From) in place of __builtin_object_size(p,...
static std::pair< llvm::Value *, llvm::Value * > GetCountFieldAndIndex(CodeGenFunction &CGF, const MemberExpr *ME, const FieldDecl *ArrayFD, const FieldDecl *CountFD, const Expr *Idx, llvm::IntegerType *ResType, bool IsSigned)
Value * EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, QualType T, llvm::Type *ResultType)
static bool TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty, llvm::SmallPtrSetImpl< const Decl * > &Seen)
static Value * EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)
static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind, const CallExpr *E)
MSVC handles setjmp a bit differently on different platforms.
#define MUTATE_LDBL(func)
static Value * emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty)
Determine if the specified type requires laundering by checking if it is a dynamic class type or cont...
static Value * EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E)
static Value * EmitTargetArchBuiltinExpr(CodeGenFunction *CGF, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
Definition CGBuiltin.cpp:72
static Value * emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, Instruction::BinaryOps Op, bool Invert=false)
Utility to insert an atomic instruction based Intrinsic::ID and the expression node,...
static bool HasNoIndirectArgumentsOrResults(CGFunctionInfo const &FnInfo)
Checks no arguments or results are passed indirectly in the ABI (i.e.
Value * EmitToInt(CodeGenFunction &CGF, llvm::Value *V, QualType T, llvm::IntegerType *IntType)
Emit the conversions required to turn the given value into an integer of the given size.
static Value * emitBinaryExpMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID, Intrinsic::ID ConstrainedIntrinsicID)
static llvm::Value * EmitBitTestIntrinsic(CodeGenFunction &CGF, unsigned BuiltinID, const CallExpr *E)
Emit a _bittest* intrinsic.
static Value * EmitSignBit(CodeGenFunction &CGF, Value *V)
Emit the computation of the sign bit for a floating point value.
static Value * EmitFAbs(CodeGenFunction &CGF, Value *V)
EmitFAbs - Emit a call to @llvm.fabs().
static llvm::Value * EmitPositiveResultOrZero(CodeGenFunction &CGF, llvm::Value *Res, llvm::Value *Index, llvm::IntegerType *ResType, bool IsSigned)
static bool shouldEmitBuiltinAsIR(unsigned BuiltinID, const Builtin::Context &BI, const CodeGenFunction &CGF)
Some builtins do not have library implementation on some targets and are instead emitted as LLVM IRs ...
Definition CGBuiltin.cpp:48
static bool isSpecialUnsignedMultiplySignedResult(unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, WidthAndSignedness ResultInfo)
static llvm::Value * getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType)
static Value * emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static RValue EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, const clang::Expr *Op2, WidthAndSignedness Op2Info, const clang::Expr *ResultArg, QualType ResultQTy, WidthAndSignedness ResultInfo)
Emit a checked mixed-sign multiply.
static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID)
Value * MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E, bool ReturnBool)
Utility to insert an atomic cmpxchg instruction.
static RValue EmitBinaryAtomic(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E)
static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size, Align AlignmentInBytes)
static Value * EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering SuccessOrdering=AtomicOrdering::SequentiallyConsistent)
This function should be invoked to emit atomic cmpxchg for Microsoft's _InterlockedCompareExchange* i...
static bool isSpecialMixedSignMultiply(unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, WidthAndSignedness ResultInfo)
Determine if a binop is a checked mixed-sign multiply we can specialize.
static Value * emitFrexpBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static llvm::Value * emitModfBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static Value * EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E)
static const FieldDecl * FindFlexibleArrayMemberField(CodeGenFunction &CGF, ASTContext &Ctx, const RecordDecl *RD)
Find a struct's flexible array member.
static Value * EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E)
static RValue EmitHipStdParUnsupportedBuiltin(CodeGenFunction *CGF, const FunctionDecl *FD)
static llvm::Value * EmitX86BitTestIntrinsic(CodeGenFunction &CGF, BitTest BT, const CallExpr *E, Value *BitBase, Value *BitPos)
static RValue EmitCheckedUnsignedMultiplySignedResult(CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, const clang::Expr *Op2, WidthAndSignedness Op2Info, const clang::Expr *ResultArg, QualType ResultQTy, WidthAndSignedness ResultInfo)
Address CheckAtomicAlignment(CodeGenFunction &CGF, const CallExpr *E)
static Value * EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E)
static llvm::AtomicOrdering getBitTestAtomicOrdering(BitTest::InterlockingKind I)
static bool GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FD, int64_t &Offset)
Calculate the offset of a struct field.
Value * MakeBinaryAtomicValue(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, AtomicOrdering Ordering)
Utility to insert an atomic instruction based on Intrinsic::ID and the expression node.
llvm::Value * EmitOverflowIntrinsic(CodeGenFunction &CGF, const Intrinsic::ID IntrinsicID, llvm::Value *X, llvm::Value *Y, llvm::Value *&Carry)
Emit a call to llvm.
static Value * EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW)
static Value * EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)
llvm::Value * emitBuiltinWithOneOverloadedType(clang::CodeGen::CodeGenFunction &CGF, const clang::CallExpr *E, unsigned IntrinsicID, llvm::StringRef Name="")
Definition CGBuiltin.h:63
static RValue emitUnaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitLibraryCall(CIRGenFunction &cgf, const FunctionDecl *fd, const CallExpr *e, mlir::Operation *calleeValue)
static WidthAndSignedness getIntegerWidthAndSignedness(const clang::ASTContext &astContext, const clang::QualType type)
static struct WidthAndSignedness EncompassingIntegerType(ArrayRef< struct WidthAndSignedness > types)
TokenType getType() const
Returns the token's type, e.g.
FormatToken * Next
The next token in the unwrapped line.
#define X(type, name)
Definition Value.h:97
static unsigned getCharWidth(tok::TokenKind kind, const TargetInfo &Target)
llvm::MachO::Record Record
Definition MachO.h:31
SanitizerHandler
static QualType getPointeeType(const MemRegion *R)
__DEVICE__ float modf(float __x, float *__iptr)
__DEVICE__ double nan(const char *)
APSInt & getInt()
Definition APValue.h:489
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType VoidPtrTy
IdentifierTable & Idents
Definition ASTContext.h:790
Builtin::Context & BuiltinInfo
Definition ASTContext.h:792
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, const Expr *SizeExpr, ArraySizeModifier ASM, unsigned IndexTypeQuals) const
Return the unique reference to the type for a constant array of the specified element type.
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CanQualType VoidTy
QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error, unsigned *IntegerConstantArgs=nullptr) const
Return the type for the specified builtin.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:909
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
@ GE_None
No error.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
QualType getElementType() const
Definition TypeBase.h:3735
static std::unique_ptr< AtomicScopeModel > create(AtomicScopeModelKind K)
Create an atomic scope model by AtomicScopeModelKind.
Definition SyncScope.h:298
static bool isCommaOp(Opcode Opc)
Definition Expr.h:4141
Expr * getRHS() const
Definition Expr.h:4090
Holds information about both target-independent and target-specific builtins, allowing easy queries b...
Definition Builtins.h:235
bool shouldGenerateFPMathIntrinsic(unsigned BuiltinID, llvm::Triple Trip, std::optional< bool > ErrnoOverwritten, bool MathErrnoEnabled, bool HasOptNoneAttr, bool IsOptimizationEnabled) const
Determine whether we can generate LLVM intrinsics for the given builtin ID, based on whether it has s...
Definition Builtins.cpp:225
bool isConstWithoutErrnoAndExceptions(unsigned ID) const
Return true if this function has no side effects and doesn't read memory, except for possibly errno o...
Definition Builtins.h:412
std::string getName(unsigned ID) const
Return the identifier name for the specified builtin, e.g.
Definition Builtins.cpp:80
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2943
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3147
bool hasStoredFPFeatures() const
Definition Expr.h:3102
SourceLocation getBeginLoc() const
Definition Expr.h:3277
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3126
Expr * getCallee()
Definition Expr.h:3090
FPOptionsOverride getFPFeatures() const
Definition Expr.h:3242
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3134
arg_range arguments()
Definition Expr.h:3195
CastKind getCastKind() const
Definition Expr.h:3720
Expr * getSubExpr()
Definition Expr.h:3726
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
Address withAlignment(CharUnits NewAlignment) const
Return address with different alignment, but same pointer and element type.
Definition Address.h:269
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition CGBuilder.h:140
llvm::StoreInst * CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr, CharUnits Align, bool IsVolatile=false)
Definition CGBuilder.h:147
llvm::AtomicRMWInst * CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Ordering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition CGBuilder.h:184
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition CGBuilder.h:402
llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(Address Addr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition CGBuilder.h:173
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:112
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition CGBuilder.h:132
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition CGBuilder.h:350
All available information about a concrete callee.
Definition CGCall.h:63
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition CGCall.h:137
llvm::DILocation * CreateTrapFailureMessageFor(llvm::DebugLoc TrapLocation, StringRef Category, StringRef FailureMsg)
Create a debug location from TrapLocation that adds an artificial inline frame where the frame name i...
CGFunctionInfo - Class to encapsulate the information about a function definition.
MutableArrayRef< ArgInfo > arguments()
virtual llvm::Value * getPipeElemAlign(const Expr *PipeArg)
virtual llvm::Value * getPipeElemSize(const Expr *PipeArg)
llvm::StructType * getLLVMType() const
Return the "complete object" LLVM type associated with this record.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition CGCall.h:274
void add(RValue rvalue, QualType type)
Definition CGCall.h:302
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E)
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Definition CGClass.cpp:2856
RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E)
RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID)
llvm::Value * EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, const CallExpr *E)
SanitizerSet SanOpts
Sanitizers enabled for this function.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
llvm::Value * GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Definition CGExpr.cpp:1188
llvm::Type * ConvertType(QualType T)
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
BuiltinCheckKind
Specifies which type of sanitizer check to apply when handling a particular builtin.
llvm::Value * EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition SystemZ.cpp:39
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
Definition CGCall.cpp:5092
llvm::Value * EmitSEHAbnormalTermination()
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
Produce the code to do a retain.
Definition CGObjC.cpp:2328
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
llvm::Value * EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart)
Emits a call to an LLVM variable-argument intrinsic, either llvm.va_start or llvm....
llvm::Value * EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition AMDGPU.cpp:411
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3825
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:6887
RValue emitBuiltinOSLogFormat(const CallExpr &E)
Emit IR for __builtin_os_log_format.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
llvm::Function * generateBuiltinOSLogHelperFunction(const analyze_os_log::OSLogBufferLayout &Layout, CharUnits BufferAlignment)
const LangOptions & getLangOpts() const
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
llvm::Value * EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
Definition RISCV.cpp:1073
llvm::Value * EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind)
Emits an argument for a call to a builtin.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3715
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
Definition CGCall.cpp:4591
const TargetInfo & getTarget() const
RValue emitRotate(const CallExpr *E, bool IsRotateRight)
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location, const AnnotateAttr *Attr)
Emit an annotation call (intrinsic).
llvm::Value * EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
Definition ARM.cpp:2710
CGCallee EmitCallee(const Expr *E)
Definition CGExpr.cpp:6281
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression, potentially with an active ...
llvm::Value * EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition ARM.cpp:7857
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:226
llvm::Value * EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition PPC.cpp:73
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:3973
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
Definition CGDecl.cpp:1356
llvm::MDNode * buildAllocToken(QualType AllocType)
Build metadata used by the AllocToken instrumentation.
Definition CGExpr.cpp:1300
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2215
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition CGCall.cpp:5248
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp)
Emit IR for __builtin_align_up/__builtin_align_down.
void EmitLifetimeEnd(llvm::Value *Addr)
Definition CGDecl.cpp:1368
llvm::Value * EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
llvm::Value * EmitDirectXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition DirectX.cpp:22
llvm::Value * EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm::Triple::ArchType Arch)
Definition ARM.cpp:5052
llvm::Value * EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4403
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
Definition CGDecl.cpp:2331
llvm::Value * EmitSPIRVBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition SPIR.cpp:22
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
Address EmitVAListRef(const Expr *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
Definition CGExpr.cpp:1592
RValue EmitBuiltinIsAligned(const CallExpr *E)
Emit IR for __builtin_is_aligned.
RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, const CallExpr *TheCallExpr, bool IsDelete)
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition Hexagon.cpp:77
CodeGenTypes & getTypes() const
llvm::Value * EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition X86.cpp:737
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1575
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:189
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
Definition CGExpr.cpp:4388
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4315
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2245
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
Definition CGExpr.cpp:1231
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
llvm::Value * EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition NVPTX.cpp:420
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
Definition CGExpr.cpp:4303
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1691
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
EmitTargetBuiltinExpr - Emit the given builtin call.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
llvm::Value * EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Given a number of pointers, inform the optimizer that they're being intrinsically used up until this ...
Definition CGObjC.cpp:2167
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:656
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
llvm::Constant * getBuiltinLibFunction(const FunctionDecl *FD, unsigned BuiltinID)
Given a builtin id for a function like "__builtin_fabsf", return a Function* for "fabsf".
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
const llvm::Triple & getTriple() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition CGCall.cpp:1702
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
LValue - This represents an lvalue references.
Definition CGValue.h:183
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition CGValue.h:367
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
static RValue getIgnored()
Definition CGValue.h:94
static RValue get(llvm::Value *V)
Definition CGValue.h:99
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition CGValue.h:126
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition CGValue.h:109
An abstract representation of an aligned address.
Definition Address.h:42
static RawAddress invalid()
Definition Address.h:61
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition CGCall.h:379
virtual bool supportsLibCall() const
supportsLibCall - Query to whether or not target supports all lib calls.
Definition TargetInfo.h:78
virtual llvm::Value * encodeReturnAddress(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Performs the code-generation required to convert the address of an instruction into a return address ...
Definition TargetInfo.h:176
virtual llvm::Value * decodeReturnAddress(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Performs the code-generation required to convert a return address as stored by the system into the ac...
Definition TargetInfo.h:166
virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const
Determines the DWARF register number for the stack pointer, for exception-handling purposes.
Definition TargetInfo.h:148
virtual llvm::Value * testFPKind(llvm::Value *V, unsigned BuiltinID, CGBuilderTy &Builder, CodeGenModule &CGM) const
Performs a target specific test of a floating point value for things like IsNaN, Infinity,...
Definition TargetInfo.h:185
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3276
Represents a concrete matrix type with constant number of rows and columns.
Definition TypeBase.h:4388
Represents a sugar type with __counted_by or __sized_by annotations, including their _or_null variant...
Definition TypeBase.h:3437
DynamicCountPointerKind getKind() const
Definition TypeBase.h:3467
static bool isFlexibleArrayMemberLike(const ASTContext &Context, const Decl *D, QualType Ty, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, bool IgnoreTemplateOrMacroSubstitution)
Whether it resembles a flexible array member.
Definition DeclBase.cpp:459
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition DeclBase.h:593
FunctionDecl * getAsFunction() LLVM_READONLY
Returns the function itself, or the templated function if this is a function template.
Definition DeclBase.cpp:273
bool hasAttr() const
Definition DeclBase.h:577
Concrete class used by the front-end to report problems and issues.
Definition Diagnostic.h:232
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3116
Expr * IgnoreParenCasts() LLVM_READONLY
Skip past any parentheses and casts which might surround this expression until reaching a fixed point...
Definition Expr.cpp:3094
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3089
bool EvaluateAsFloat(llvm::APFloat &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsFloat - Return true if this is a constant which we can fold and convert to a floating point...
bool isPRValue() const
Definition Expr.h:285
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
Definition Expr.h:835
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3669
std::optional< std::string > tryEvaluateString(ASTContext &Ctx) const
If the current Expr can be evaluated to a pointer to a null-terminated constant string,...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3069
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Definition Expr.cpp:4047
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
bool tryEvaluateObjectSize(uint64_t &Result, ASTContext &Ctx, unsigned Type) const
If the current Expr is a pointer, this will try to statically determine the number of bytes available...
const ValueDecl * getAsBuiltinConstantDeclRef(const ASTContext &Context) const
If this expression is an unambiguous reference to a single declaration, in the style of __builtin_fun...
Definition Expr.cpp:225
Represents difference between two FPOptions values.
LangOptions::FPExceptionModeKind getExceptionMode() const
Represents a member of a struct/union/class.
Definition Decl.h:3160
const FieldDecl * findCountedByField() const
Find the FieldDecl specified in a FAM's "counted_by" attribute.
Definition Decl.cpp:4851
Represents a function declaration or definition.
Definition Decl.h:2000
const ParmVarDecl * getParamDecl(unsigned i) const
Definition Decl.h:2797
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition Decl.cpp:3758
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5269
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
IdentifierInfo & get(StringRef Name)
Return the identifier token info for the specified named identifier.
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition Decl.cpp:5536
@ FPE_Ignore
Assume that floating-point exceptions are masked.
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3364
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3447
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
std::string getNameAsString() const
Get a human-readable name for the declaration, even if it is one of the special kinds of names (C++ c...
Definition Decl.h:317
const Expr * getSubExpr() const
Definition Expr.h:2199
PipeType - OpenCL20.
Definition TypeBase.h:8111
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3329
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8377
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8419
Represents a struct/union/class.
Definition Decl.h:4321
field_range fields() const
Definition Decl.h:4524
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
Encodes a location in the source.
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:350
bool isUnion() const
Definition Decl.h:3922
Exposes information about the current target.
Definition TargetInfo.h:226
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool isBigEndian() const
virtual bool checkArithmeticFenceSupported() const
Controls if __arithmetic_fence is supported in the targeted backend.
unsigned getSuitableAlign() const
Return the alignment that is the largest alignment ever used for any scalar/SIMD data type on the tar...
Definition TargetInfo.h:747
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
The base class of the type hierarchy.
Definition TypeBase.h:1833
bool isBlockPointerType() const
Definition TypeBase.h:8550
bool isVoidType() const
Definition TypeBase.h:8892
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2206
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isArrayType() const
Definition TypeBase.h:8629
bool isCountAttributedType() const
Definition Type.cpp:742
bool isPointerType() const
Definition TypeBase.h:8530
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8936
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9179
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1910
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9112
Expr * getSubExpr() const
Definition Expr.h:2285
QualType getType() const
Definition Decl.h:723
QualType getType() const
Definition Value.cpp:237
Represents a GCC generic vector type.
Definition TypeBase.h:4176
QualType getElementType() const
Definition TypeBase.h:4190
SmallVector< OSLogBufferItem, 4 > Items
Definition OSLog.h:113
unsigned char getNumArgsByte() const
Definition OSLog.h:148
unsigned char getSummaryByte() const
Definition OSLog.h:139
Defines the clang::TargetInfo interface.
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
Definition CGValue.h:155
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:146
llvm::Constant * initializationPatternFor(CodeGenModule &, llvm::Type *)
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
constexpr XRayInstrMask Typed
Definition XRayInstr.h:42
constexpr XRayInstrMask Custom
Definition XRayInstr.h:41
bool computeOSLogBufferLayout(clang::ASTContext &Ctx, const clang::CallExpr *E, OSLogBufferLayout &layout)
Definition OSLog.cpp:192
bool Mul(InterpState &S, CodePtr OpPC)
Definition Interp.h:350
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ Success
Annotation was successful.
Definition Parser.h:65
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Expr * Cond
};
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
const FunctionProtoType * T
LangAS
Defines the address space values used by the address space qualifier of QualType.
SyncScope
Defines sync scope values used internally by clang.
Definition SyncScope.h:42
llvm::StringRef getAsString(SyncScope S)
Definition SyncScope.h:62
U cast(CodeGen::Address addr)
Definition Address.h:327
@ Other
Other implicit parameter.
Definition Decl.h:1746
unsigned long uint64_t
long int64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
void clear(SanitizerMask K=SanitizerKind::All)
Disable the sanitizers specified in K.
Definition Sanitizers.h:195
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition Sanitizers.h:187
#define sinh(__x)
Definition tgmath.h:373
#define asin(__x)
Definition tgmath.h:112
#define scalbln(__x, __y)
Definition tgmath.h:1182
#define sqrt(__x)
Definition tgmath.h:520
#define acos(__x)
Definition tgmath.h:83
#define fmin(__x, __y)
Definition tgmath.h:780
#define exp(__x)
Definition tgmath.h:431
#define ilogb(__x)
Definition tgmath.h:851
#define copysign(__x, __y)
Definition tgmath.h:618
#define erf(__x)
Definition tgmath.h:636
#define atanh(__x)
Definition tgmath.h:228
#define remquo(__x, __y, __z)
Definition tgmath.h:1111
#define nextafter(__x, __y)
Definition tgmath.h:1055
#define frexp(__x, __y)
Definition tgmath.h:816
#define asinh(__x)
Definition tgmath.h:199
#define erfc(__x)
Definition tgmath.h:653
#define atan2(__x, __y)
Definition tgmath.h:566
#define nexttoward(__x, __y)
Definition tgmath.h:1073
#define hypot(__x, __y)
Definition tgmath.h:833
#define exp2(__x)
Definition tgmath.h:670
#define sin(__x)
Definition tgmath.h:286
#define cbrt(__x)
Definition tgmath.h:584
#define log2(__x)
Definition tgmath.h:970
#define llround(__x)
Definition tgmath.h:919
#define cosh(__x)
Definition tgmath.h:344
#define trunc(__x)
Definition tgmath.h:1216
#define fmax(__x, __y)
Definition tgmath.h:762
#define ldexp(__x, __y)
Definition tgmath.h:868
#define acosh(__x)
Definition tgmath.h:170
#define tgamma(__x)
Definition tgmath.h:1199
#define scalbn(__x, __y)
Definition tgmath.h:1165
#define round(__x)
Definition tgmath.h:1148
#define fmod(__x, __y)
Definition tgmath.h:798
#define llrint(__x)
Definition tgmath.h:902
#define tan(__x)
Definition tgmath.h:315
#define cos(__x)
Definition tgmath.h:257
#define log10(__x)
Definition tgmath.h:936
#define fabs(__x)
Definition tgmath.h:549
#define pow(__x, __y)
Definition tgmath.h:490
#define log1p(__x)
Definition tgmath.h:953
#define rint(__x)
Definition tgmath.h:1131
#define expm1(__x)
Definition tgmath.h:687
#define remainder(__x, __y)
Definition tgmath.h:1090
#define fdim(__x, __y)
Definition tgmath.h:704
#define lgamma(__x)
Definition tgmath.h:885
#define tanh(__x)
Definition tgmath.h:402
#define lrint(__x)
Definition tgmath.h:1004
#define atan(__x)
Definition tgmath.h:141
#define floor(__x)
Definition tgmath.h:722
#define ceil(__x)
Definition tgmath.h:601
#define log(__x)
Definition tgmath.h:460
#define logb(__x)
Definition tgmath.h:987
#define nearbyint(__x)
Definition tgmath.h:1038
#define lround(__x)
Definition tgmath.h:1021
#define fma(__x, __y, __z)
Definition tgmath.h:742