clang 22.0.0git
CGBuiltin.cpp
Go to the documentation of this file.
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGBuiltin.h"
14#include "ABIInfo.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGDebugInfo.h"
18#include "CGObjCRuntime.h"
19#include "CGOpenCLRuntime.h"
20#include "CGRecordLayout.h"
21#include "CGValue.h"
22#include "CodeGenFunction.h"
23#include "CodeGenModule.h"
24#include "ConstantEmitter.h"
25#include "PatternInit.h"
26#include "TargetInfo.h"
27#include "clang/AST/OSLog.h"
31#include "llvm/IR/InlineAsm.h"
32#include "llvm/IR/Instruction.h"
33#include "llvm/IR/Intrinsics.h"
34#include "llvm/IR/IntrinsicsX86.h"
35#include "llvm/IR/MatrixBuilder.h"
36#include "llvm/Support/ConvertUTF.h"
37#include "llvm/Support/ScopedPrinter.h"
38#include <optional>
39#include <utility>
40
41using namespace clang;
42using namespace CodeGen;
43using namespace llvm;
44
45/// Some builtins do not have library implementation on some targets and
46/// are instead emitted as LLVM IRs by some target builtin emitters.
47/// FIXME: Remove this when library support is added
48static bool shouldEmitBuiltinAsIR(unsigned BuiltinID,
49 const Builtin::Context &BI,
50 const CodeGenFunction &CGF) {
51 if (!CGF.CGM.getLangOpts().MathErrno &&
55 switch (BuiltinID) {
56 default:
57 return false;
58 case Builtin::BIlogbf:
59 case Builtin::BI__builtin_logbf:
60 case Builtin::BIlogb:
61 case Builtin::BI__builtin_logb:
62 case Builtin::BIscalbnf:
63 case Builtin::BI__builtin_scalbnf:
64 case Builtin::BIscalbn:
65 case Builtin::BI__builtin_scalbn:
66 return true;
67 }
68 }
69 return false;
70}
71
73 unsigned BuiltinID, const CallExpr *E,
74 ReturnValueSlot ReturnValue,
75 llvm::Triple::ArchType Arch) {
76 // When compiling in HipStdPar mode we have to be conservative in rejecting
77 // target specific features in the FE, and defer the possible error to the
78 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
79 // referenced by an accelerator executable function, we emit an error.
80 // Returning nullptr here leads to the builtin being handled in
81 // EmitStdParUnsupportedBuiltin.
82 if (CGF->getLangOpts().HIPStdPar && CGF->getLangOpts().CUDAIsDevice &&
83 Arch != CGF->getTarget().getTriple().getArch())
84 return nullptr;
85
86 switch (Arch) {
87 case llvm::Triple::arm:
88 case llvm::Triple::armeb:
89 case llvm::Triple::thumb:
90 case llvm::Triple::thumbeb:
91 return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch);
92 case llvm::Triple::aarch64:
93 case llvm::Triple::aarch64_32:
94 case llvm::Triple::aarch64_be:
95 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
96 case llvm::Triple::bpfeb:
97 case llvm::Triple::bpfel:
98 return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
99 case llvm::Triple::dxil:
100 return CGF->EmitDirectXBuiltinExpr(BuiltinID, E);
101 case llvm::Triple::x86:
102 case llvm::Triple::x86_64:
103 return CGF->EmitX86BuiltinExpr(BuiltinID, E);
104 case llvm::Triple::ppc:
105 case llvm::Triple::ppcle:
106 case llvm::Triple::ppc64:
107 case llvm::Triple::ppc64le:
108 return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
109 case llvm::Triple::r600:
110 case llvm::Triple::amdgcn:
111 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
112 case llvm::Triple::systemz:
113 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
114 case llvm::Triple::nvptx:
115 case llvm::Triple::nvptx64:
116 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
117 case llvm::Triple::wasm32:
118 case llvm::Triple::wasm64:
119 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
120 case llvm::Triple::hexagon:
121 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
122 case llvm::Triple::riscv32:
123 case llvm::Triple::riscv64:
124 return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue);
125 case llvm::Triple::spirv32:
126 case llvm::Triple::spirv64:
127 if (CGF->getTarget().getTriple().getOS() == llvm::Triple::OSType::AMDHSA)
128 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
129 [[fallthrough]];
130 case llvm::Triple::spirv:
131 return CGF->EmitSPIRVBuiltinExpr(BuiltinID, E);
132 default:
133 return nullptr;
134 }
135}
136
138 const CallExpr *E,
140 if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
141 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
143 this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
144 ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
145 }
146
147 return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue,
148 getTarget().getTriple().getArch());
149}
150
151static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
152 Align AlignmentInBytes) {
153 ConstantInt *Byte;
154 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
156 // Nothing to initialize.
157 return;
159 Byte = CGF.Builder.getInt8(0x00);
160 break;
162 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
163 Byte = llvm::dyn_cast<llvm::ConstantInt>(
164 initializationPatternFor(CGF.CGM, Int8));
165 break;
166 }
167 }
168 if (CGF.CGM.stopAutoInit())
169 return;
170 auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
171 I->addAnnotationMetadata("auto-init");
172}
173
174/// getBuiltinLibFunction - Given a builtin id for a function like
175/// "__builtin_fabsf", return a Function* for "fabsf".
177 unsigned BuiltinID) {
178 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
179
180 // Get the name, skip over the __builtin_ prefix (if necessary). We may have
181 // to build this up so provide a small stack buffer to handle the vast
182 // majority of names.
184 GlobalDecl D(FD);
185
186 // TODO: This list should be expanded or refactored after all GCC-compatible
187 // std libcall builtins are implemented.
188 static SmallDenseMap<unsigned, StringRef, 64> F128Builtins{
189 {Builtin::BI__builtin___fprintf_chk, "__fprintf_chkieee128"},
190 {Builtin::BI__builtin___printf_chk, "__printf_chkieee128"},
191 {Builtin::BI__builtin___snprintf_chk, "__snprintf_chkieee128"},
192 {Builtin::BI__builtin___sprintf_chk, "__sprintf_chkieee128"},
193 {Builtin::BI__builtin___vfprintf_chk, "__vfprintf_chkieee128"},
194 {Builtin::BI__builtin___vprintf_chk, "__vprintf_chkieee128"},
195 {Builtin::BI__builtin___vsnprintf_chk, "__vsnprintf_chkieee128"},
196 {Builtin::BI__builtin___vsprintf_chk, "__vsprintf_chkieee128"},
197 {Builtin::BI__builtin_fprintf, "__fprintfieee128"},
198 {Builtin::BI__builtin_printf, "__printfieee128"},
199 {Builtin::BI__builtin_snprintf, "__snprintfieee128"},
200 {Builtin::BI__builtin_sprintf, "__sprintfieee128"},
201 {Builtin::BI__builtin_vfprintf, "__vfprintfieee128"},
202 {Builtin::BI__builtin_vprintf, "__vprintfieee128"},
203 {Builtin::BI__builtin_vsnprintf, "__vsnprintfieee128"},
204 {Builtin::BI__builtin_vsprintf, "__vsprintfieee128"},
205 {Builtin::BI__builtin_fscanf, "__fscanfieee128"},
206 {Builtin::BI__builtin_scanf, "__scanfieee128"},
207 {Builtin::BI__builtin_sscanf, "__sscanfieee128"},
208 {Builtin::BI__builtin_vfscanf, "__vfscanfieee128"},
209 {Builtin::BI__builtin_vscanf, "__vscanfieee128"},
210 {Builtin::BI__builtin_vsscanf, "__vsscanfieee128"},
211 {Builtin::BI__builtin_nexttowardf128, "__nexttowardieee128"},
212 };
213
214 // The AIX library functions frexpl, ldexpl, and modfl are for 128-bit
215 // IBM 'long double' (i.e. __ibm128). Map to the 'double' versions
216 // if it is 64-bit 'long double' mode.
217 static SmallDenseMap<unsigned, StringRef, 4> AIXLongDouble64Builtins{
218 {Builtin::BI__builtin_frexpl, "frexp"},
219 {Builtin::BI__builtin_ldexpl, "ldexp"},
220 {Builtin::BI__builtin_modfl, "modf"},
221 };
222
223 // If the builtin has been declared explicitly with an assembler label,
224 // use the mangled name. This differs from the plain label on platforms
225 // that prefix labels.
226 if (FD->hasAttr<AsmLabelAttr>())
227 Name = getMangledName(D);
228 else {
229 // TODO: This mutation should also be applied to other targets other than
230 // PPC, after backend supports IEEE 128-bit style libcalls.
231 if (getTriple().isPPC64() &&
232 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad() &&
233 F128Builtins.contains(BuiltinID))
234 Name = F128Builtins[BuiltinID];
235 else if (getTriple().isOSAIX() &&
236 &getTarget().getLongDoubleFormat() ==
237 &llvm::APFloat::IEEEdouble() &&
238 AIXLongDouble64Builtins.contains(BuiltinID))
239 Name = AIXLongDouble64Builtins[BuiltinID];
240 else
241 Name = Context.BuiltinInfo.getName(BuiltinID).substr(10);
242 }
243
244 llvm::FunctionType *Ty =
245 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
246
247 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
248}
249
250/// Emit the conversions required to turn the given value into an
251/// integer of the given size.
252Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
253 QualType T, llvm::IntegerType *IntType) {
254 V = CGF.EmitToMemory(V, T);
255
256 if (V->getType()->isPointerTy())
257 return CGF.Builder.CreatePtrToInt(V, IntType);
258
259 assert(V->getType() == IntType);
260 return V;
261}
262
263Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
264 QualType T, llvm::Type *ResultType) {
265 V = CGF.EmitFromMemory(V, T);
266
267 if (ResultType->isPointerTy())
268 return CGF.Builder.CreateIntToPtr(V, ResultType);
269
270 assert(V->getType() == ResultType);
271 return V;
272}
273
275 ASTContext &Ctx = CGF.getContext();
276 Address Ptr = CGF.EmitPointerWithAlignment(E->getArg(0));
277 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
278 unsigned Bytes = Ptr.getElementType()->isPointerTy()
280 : DL.getTypeStoreSize(Ptr.getElementType());
281 unsigned Align = Ptr.getAlignment().getQuantity();
282 if (Align % Bytes != 0) {
283 DiagnosticsEngine &Diags = CGF.CGM.getDiags();
284 Diags.Report(E->getBeginLoc(), diag::warn_sync_op_misaligned);
285 // Force address to be at least naturally-aligned.
286 return Ptr.withAlignment(CharUnits::fromQuantity(Bytes));
287 }
288 return Ptr;
289}
290
291/// Utility to insert an atomic instruction based on Intrinsic::ID
292/// and the expression node.
294 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
295 AtomicOrdering Ordering) {
296
297 QualType T = E->getType();
298 assert(E->getArg(0)->getType()->isPointerType());
300 E->getArg(0)->getType()->getPointeeType()));
301 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
302
303 Address DestAddr = CheckAtomicAlignment(CGF, E);
304
305 llvm::IntegerType *IntType = llvm::IntegerType::get(
306 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
307
308 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1));
309 llvm::Type *ValueType = Val->getType();
310 Val = EmitToInt(CGF, Val, T, IntType);
311
312 llvm::Value *Result =
313 CGF.Builder.CreateAtomicRMW(Kind, DestAddr, Val, Ordering);
314 return EmitFromInt(CGF, Result, T, ValueType);
315}
316
318 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
320
321 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
322 LValue LV = CGF.MakeAddrLValue(Addr, E->getArg(0)->getType());
323 LV.setNontemporal(true);
324 CGF.EmitStoreOfScalar(Val, LV, false);
325 return nullptr;
326}
327
330
331 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
332 LV.setNontemporal(true);
333 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
334}
335
337 llvm::AtomicRMWInst::BinOp Kind,
338 const CallExpr *E) {
339 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
340}
341
342/// Utility to insert an atomic instruction based Intrinsic::ID and
343/// the expression node, where the return value is the result of the
344/// operation.
346 llvm::AtomicRMWInst::BinOp Kind,
347 const CallExpr *E,
348 Instruction::BinaryOps Op,
349 bool Invert = false) {
350 QualType T = E->getType();
351 assert(E->getArg(0)->getType()->isPointerType());
353 E->getArg(0)->getType()->getPointeeType()));
354 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
355
356 Address DestAddr = CheckAtomicAlignment(CGF, E);
357
358 llvm::IntegerType *IntType = llvm::IntegerType::get(
359 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
360
361 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1));
362 llvm::Type *ValueType = Val->getType();
363 Val = EmitToInt(CGF, Val, T, IntType);
364
365 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
366 Kind, DestAddr, Val, llvm::AtomicOrdering::SequentiallyConsistent);
367 Result = CGF.Builder.CreateBinOp(Op, Result, Val);
368 if (Invert)
369 Result =
370 CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
371 llvm::ConstantInt::getAllOnesValue(IntType));
372 Result = EmitFromInt(CGF, Result, T, ValueType);
373 return RValue::get(Result);
374}
375
376/// Utility to insert an atomic cmpxchg instruction.
377///
378/// @param CGF The current codegen function.
379/// @param E Builtin call expression to convert to cmpxchg.
380/// arg0 - address to operate on
381/// arg1 - value to compare with
382/// arg2 - new value
383/// @param ReturnBool Specifies whether to return success flag of
384/// cmpxchg result or the old value.
385///
386/// @returns result of cmpxchg, according to ReturnBool
387///
388/// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
389/// invoke the function EmitAtomicCmpXchgForMSIntrin.
391 bool ReturnBool) {
392 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
393 Address DestAddr = CheckAtomicAlignment(CGF, E);
394
395 llvm::IntegerType *IntType = llvm::IntegerType::get(
396 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
397
398 Value *Cmp = CGF.EmitScalarExpr(E->getArg(1));
399 llvm::Type *ValueType = Cmp->getType();
400 Cmp = EmitToInt(CGF, Cmp, T, IntType);
401 Value *New = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
402
404 DestAddr, Cmp, New, llvm::AtomicOrdering::SequentiallyConsistent,
405 llvm::AtomicOrdering::SequentiallyConsistent);
406 if (ReturnBool)
407 // Extract boolean success flag and zext it to int.
408 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
409 CGF.ConvertType(E->getType()));
410 else
411 // Extract old value and emit it using the same type as compare value.
412 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
413 ValueType);
414}
415
416/// This function should be invoked to emit atomic cmpxchg for Microsoft's
417/// _InterlockedCompareExchange* intrinsics which have the following signature:
418/// T _InterlockedCompareExchange(T volatile *Destination,
419/// T Exchange,
420/// T Comparand);
421///
422/// Whereas the llvm 'cmpxchg' instruction has the following syntax:
423/// cmpxchg *Destination, Comparand, Exchange.
424/// So we need to swap Comparand and Exchange when invoking
425/// CreateAtomicCmpXchg. That is the reason we could not use the above utility
426/// function MakeAtomicCmpXchgValue since it expects the arguments to be
427/// already swapped.
428
429static
431 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
432 assert(E->getArg(0)->getType()->isPointerType());
434 E->getType(), E->getArg(0)->getType()->getPointeeType()));
435 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
436 E->getArg(1)->getType()));
437 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
438 E->getArg(2)->getType()));
439
440 Address DestAddr = CheckAtomicAlignment(CGF, E);
441
442 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
443 auto *RTy = Exchange->getType();
444
445 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
446
447 if (RTy->isPointerTy()) {
448 Exchange = CGF.Builder.CreatePtrToInt(Exchange, CGF.IntPtrTy);
449 Comparand = CGF.Builder.CreatePtrToInt(Comparand, CGF.IntPtrTy);
450 }
451
452 // For Release ordering, the failure ordering should be Monotonic.
453 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
454 AtomicOrdering::Monotonic :
455 SuccessOrdering;
456
457 // The atomic instruction is marked volatile for consistency with MSVC. This
458 // blocks the few atomics optimizations that LLVM has. If we want to optimize
459 // _Interlocked* operations in the future, we will have to remove the volatile
460 // marker.
461 auto *CmpXchg = CGF.Builder.CreateAtomicCmpXchg(
462 DestAddr, Comparand, Exchange, SuccessOrdering, FailureOrdering);
463 CmpXchg->setVolatile(true);
464
465 auto *Result = CGF.Builder.CreateExtractValue(CmpXchg, 0);
466 if (RTy->isPointerTy()) {
467 Result = CGF.Builder.CreateIntToPtr(Result, RTy);
468 }
469
470 return Result;
471}
472
473// 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
474// prototyped like this:
475//
476// unsigned char _InterlockedCompareExchange128...(
477// __int64 volatile * _Destination,
478// __int64 _ExchangeHigh,
479// __int64 _ExchangeLow,
480// __int64 * _ComparandResult);
481//
482// Note that Destination is assumed to be at least 16-byte aligned, despite
483// being typed int64.
484
486 const CallExpr *E,
487 AtomicOrdering SuccessOrdering) {
488 assert(E->getNumArgs() == 4);
489 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
490 llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1));
491 llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2));
492 Address ComparandAddr = CGF.EmitPointerWithAlignment(E->getArg(3));
493
494 assert(DestPtr->getType()->isPointerTy());
495 assert(!ExchangeHigh->getType()->isPointerTy());
496 assert(!ExchangeLow->getType()->isPointerTy());
497
498 // For Release ordering, the failure ordering should be Monotonic.
499 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
500 ? AtomicOrdering::Monotonic
501 : SuccessOrdering;
502
503 // Convert to i128 pointers and values. Alignment is also overridden for
504 // destination pointer.
505 llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128);
506 Address DestAddr(DestPtr, Int128Ty,
508 ComparandAddr = ComparandAddr.withElementType(Int128Ty);
509
510 // (((i128)hi) << 64) | ((i128)lo)
511 ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
512 ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty);
513 ExchangeHigh =
514 CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64));
515 llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow);
516
517 // Load the comparand for the instruction.
518 llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandAddr);
519
520 auto *CXI = CGF.Builder.CreateAtomicCmpXchg(DestAddr, Comparand, Exchange,
521 SuccessOrdering, FailureOrdering);
522
523 // The atomic instruction is marked volatile for consistency with MSVC. This
524 // blocks the few atomics optimizations that LLVM has. If we want to optimize
525 // _Interlocked* operations in the future, we will have to remove the volatile
526 // marker.
527 CXI->setVolatile(true);
528
529 // Store the result as an outparameter.
530 CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0),
531 ComparandAddr);
532
533 // Get the success boolean and zero extend it to i8.
534 Value *Success = CGF.Builder.CreateExtractValue(CXI, 1);
535 return CGF.Builder.CreateZExt(Success, CGF.Int8Ty);
536}
537
539 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
540 assert(E->getArg(0)->getType()->isPointerType());
541
542 auto *IntTy = CGF.ConvertType(E->getType());
543 Address DestAddr = CheckAtomicAlignment(CGF, E);
544 auto *Result = CGF.Builder.CreateAtomicRMW(
545 AtomicRMWInst::Add, DestAddr, ConstantInt::get(IntTy, 1), Ordering);
546 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
547}
548
550 CodeGenFunction &CGF, const CallExpr *E,
551 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
552 assert(E->getArg(0)->getType()->isPointerType());
553
554 auto *IntTy = CGF.ConvertType(E->getType());
555 Address DestAddr = CheckAtomicAlignment(CGF, E);
556 auto *Result = CGF.Builder.CreateAtomicRMW(
557 AtomicRMWInst::Sub, DestAddr, ConstantInt::get(IntTy, 1), Ordering);
558 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
559}
560
561// Build a plain volatile load.
563 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
564 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
565 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
566 llvm::Type *ITy =
567 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
568 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize);
569 Load->setVolatile(true);
570 return Load;
571}
572
573// Build a plain volatile store.
575 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
576 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
577 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
578 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
579 llvm::StoreInst *Store =
580 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
581 Store->setVolatile(true);
582 return Store;
583}
584
585// Emit a simple mangled intrinsic that has 1 argument and a return type
586// matching the argument type. Depending on mode, this may be a constrained
587// floating-point intrinsic.
589 const CallExpr *E, unsigned IntrinsicID,
590 unsigned ConstrainedIntrinsicID) {
591 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
592
593 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
594 if (CGF.Builder.getIsFPConstrained()) {
595 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
596 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
597 } else {
598 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
599 return CGF.Builder.CreateCall(F, Src0);
600 }
601}
602
603// Emit an intrinsic that has 2 operands of the same type as its result.
604// Depending on mode, this may be a constrained floating-point intrinsic.
606 const CallExpr *E, unsigned IntrinsicID,
607 unsigned ConstrainedIntrinsicID) {
608 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
609 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
610
611 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
612 if (CGF.Builder.getIsFPConstrained()) {
613 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
614 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
615 } else {
616 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
617 return CGF.Builder.CreateCall(F, { Src0, Src1 });
618 }
619}
620
621// Has second type mangled argument.
622static Value *
624 Intrinsic::ID IntrinsicID,
625 Intrinsic::ID ConstrainedIntrinsicID) {
626 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
627 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
628
629 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
630 if (CGF.Builder.getIsFPConstrained()) {
631 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
632 {Src0->getType(), Src1->getType()});
633 return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1});
634 }
635
636 Function *F =
637 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), Src1->getType()});
638 return CGF.Builder.CreateCall(F, {Src0, Src1});
639}
640
641// Emit an intrinsic that has 3 operands of the same type as its result.
642// Depending on mode, this may be a constrained floating-point intrinsic.
644 const CallExpr *E, unsigned IntrinsicID,
645 unsigned ConstrainedIntrinsicID) {
646 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
647 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
648 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
649
650 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
651 if (CGF.Builder.getIsFPConstrained()) {
652 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
653 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
654 } else {
655 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
656 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
657 }
658}
659
660// Emit an intrinsic that has overloaded integer result and fp operand.
661static Value *
663 unsigned IntrinsicID,
664 unsigned ConstrainedIntrinsicID) {
665 llvm::Type *ResultType = CGF.ConvertType(E->getType());
666 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
667
668 if (CGF.Builder.getIsFPConstrained()) {
669 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
670 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
671 {ResultType, Src0->getType()});
672 return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
673 } else {
674 Function *F =
675 CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
676 return CGF.Builder.CreateCall(F, Src0);
677 }
678}
679
681 Intrinsic::ID IntrinsicID) {
682 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
683 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
684
685 QualType IntPtrTy = E->getArg(1)->getType()->getPointeeType();
686 llvm::Type *IntTy = CGF.ConvertType(IntPtrTy);
687 llvm::Function *F =
688 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), IntTy});
689 llvm::Value *Call = CGF.Builder.CreateCall(F, Src0);
690
691 llvm::Value *Exp = CGF.Builder.CreateExtractValue(Call, 1);
692 LValue LV = CGF.MakeNaturalAlignAddrLValue(Src1, IntPtrTy);
693 CGF.EmitStoreOfScalar(Exp, LV);
694
695 return CGF.Builder.CreateExtractValue(Call, 0);
696}
697
698static void emitSincosBuiltin(CodeGenFunction &CGF, const CallExpr *E,
699 Intrinsic::ID IntrinsicID) {
700 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(0));
701 llvm::Value *Dest0 = CGF.EmitScalarExpr(E->getArg(1));
702 llvm::Value *Dest1 = CGF.EmitScalarExpr(E->getArg(2));
703
704 llvm::Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {Val->getType()});
705 llvm::Value *Call = CGF.Builder.CreateCall(F, Val);
706
707 llvm::Value *SinResult = CGF.Builder.CreateExtractValue(Call, 0);
708 llvm::Value *CosResult = CGF.Builder.CreateExtractValue(Call, 1);
709
710 QualType DestPtrType = E->getArg(1)->getType()->getPointeeType();
711 LValue SinLV = CGF.MakeNaturalAlignAddrLValue(Dest0, DestPtrType);
712 LValue CosLV = CGF.MakeNaturalAlignAddrLValue(Dest1, DestPtrType);
713
714 llvm::StoreInst *StoreSin =
715 CGF.Builder.CreateStore(SinResult, SinLV.getAddress());
716 llvm::StoreInst *StoreCos =
717 CGF.Builder.CreateStore(CosResult, CosLV.getAddress());
718
719 // Mark the two stores as non-aliasing with each other. The order of stores
720 // emitted by this builtin is arbitrary, enforcing a particular order will
721 // prevent optimizations later on.
722 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
723 MDNode *Domain = MDHelper.createAnonymousAliasScopeDomain();
724 MDNode *AliasScope = MDHelper.createAnonymousAliasScope(Domain);
725 MDNode *AliasScopeList = MDNode::get(Call->getContext(), AliasScope);
726 StoreSin->setMetadata(LLVMContext::MD_alias_scope, AliasScopeList);
727 StoreCos->setMetadata(LLVMContext::MD_noalias, AliasScopeList);
728}
729
730static llvm::Value *emitModfBuiltin(CodeGenFunction &CGF, const CallExpr *E,
731 Intrinsic::ID IntrinsicID) {
732 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(0));
733 llvm::Value *IntPartDest = CGF.EmitScalarExpr(E->getArg(1));
734
735 llvm::Value *Call =
736 CGF.Builder.CreateIntrinsic(IntrinsicID, {Val->getType()}, Val);
737
738 llvm::Value *FractionalResult = CGF.Builder.CreateExtractValue(Call, 0);
739 llvm::Value *IntegralResult = CGF.Builder.CreateExtractValue(Call, 1);
740
741 QualType DestPtrType = E->getArg(1)->getType()->getPointeeType();
742 LValue IntegralLV = CGF.MakeNaturalAlignAddrLValue(IntPartDest, DestPtrType);
743 CGF.EmitStoreOfScalar(IntegralResult, IntegralLV);
744
745 return FractionalResult;
746}
747
748/// EmitFAbs - Emit a call to @llvm.fabs().
750 Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
751 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
752 Call->setDoesNotAccessMemory();
753 return Call;
754}
755
756/// Emit the computation of the sign bit for a floating point value. Returns
757/// the i1 sign bit value.
759 LLVMContext &C = CGF.CGM.getLLVMContext();
760
761 llvm::Type *Ty = V->getType();
762 int Width = Ty->getPrimitiveSizeInBits();
763 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
764 V = CGF.Builder.CreateBitCast(V, IntTy);
765 if (Ty->isPPC_FP128Ty()) {
766 // We want the sign bit of the higher-order double. The bitcast we just
767 // did works as if the double-double was stored to memory and then
768 // read as an i128. The "store" will put the higher-order double in the
769 // lower address in both little- and big-Endian modes, but the "load"
770 // will treat those bits as a different part of the i128: the low bits in
771 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
772 // we need to shift the high bits down to the low before truncating.
773 Width >>= 1;
774 if (CGF.getTarget().isBigEndian()) {
775 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
776 V = CGF.Builder.CreateLShr(V, ShiftCst);
777 }
778 // We are truncating value in order to extract the higher-order
779 // double, which we will be using to extract the sign from.
780 IntTy = llvm::IntegerType::get(C, Width);
781 V = CGF.Builder.CreateTrunc(V, IntTy);
782 }
783 Value *Zero = llvm::Constant::getNullValue(IntTy);
784 return CGF.Builder.CreateICmpSLT(V, Zero);
785}
786
787/// Checks no arguments or results are passed indirectly in the ABI (i.e. via a
788/// hidden pointer). This is used to check annotating FP libcalls (that could
789/// set `errno`) with "int" TBAA metadata is safe. If any floating-point
790/// arguments are passed indirectly, setup for the call could be incorrectly
791/// optimized out.
793 auto IsIndirect = [&](ABIArgInfo const &info) {
794 return info.isIndirect() || info.isIndirectAliased() || info.isInAlloca();
795 };
796 return !IsIndirect(FnInfo.getReturnInfo()) &&
797 llvm::none_of(FnInfo.arguments(),
798 [&](CGFunctionInfoArgInfo const &ArgInfo) {
799 return IsIndirect(ArgInfo.info);
800 });
801}
802
804 const CallExpr *E, llvm::Constant *calleeValue) {
805 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
806 CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
807 llvm::CallBase *callOrInvoke = nullptr;
808 CGFunctionInfo const *FnInfo = nullptr;
809 RValue Call =
810 CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot(),
811 /*Chain=*/nullptr, &callOrInvoke, &FnInfo);
812
813 if (unsigned BuiltinID = FD->getBuiltinID()) {
814 // Check whether a FP math builtin function, such as BI__builtin_expf
815 ASTContext &Context = CGF.getContext();
816 bool ConstWithoutErrnoAndExceptions =
818 // Restrict to target with errno, for example, MacOS doesn't set errno.
819 // TODO: Support builtin function with complex type returned, eg: cacosh
820 if (ConstWithoutErrnoAndExceptions && CGF.CGM.getLangOpts().MathErrno &&
821 !CGF.Builder.getIsFPConstrained() && Call.isScalar() &&
823 // Emit "int" TBAA metadata on FP math libcalls.
824 clang::QualType IntTy = Context.IntTy;
825 TBAAAccessInfo TBAAInfo = CGF.CGM.getTBAAAccessInfo(IntTy);
826 CGF.CGM.DecorateInstructionWithTBAA(callOrInvoke, TBAAInfo);
827 }
828 }
829 return Call;
830}
831
832/// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
833/// depending on IntrinsicID.
834///
835/// \arg CGF The current codegen function.
836/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
837/// \arg X The first argument to the llvm.*.with.overflow.*.
838/// \arg Y The second argument to the llvm.*.with.overflow.*.
839/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
840/// \returns The result (i.e. sum/product) returned by the intrinsic.
842 const Intrinsic::ID IntrinsicID,
843 llvm::Value *X, llvm::Value *Y,
844 llvm::Value *&Carry) {
845 // Make sure we have integers of the same width.
846 assert(X->getType() == Y->getType() &&
847 "Arguments must be the same type. (Did you forget to make sure both "
848 "arguments have the same integer width?)");
849
850 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
851 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
852 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
853 return CGF.Builder.CreateExtractValue(Tmp, 0);
854}
855
856namespace {
857 struct WidthAndSignedness {
858 unsigned Width;
859 bool Signed;
860 };
861}
862
863static WidthAndSignedness
865 const clang::QualType Type) {
866 assert(Type->isIntegerType() && "Given type is not an integer.");
867 unsigned Width = context.getIntWidth(Type);
869 return {Width, Signed};
870}
871
872// Given one or more integer types, this function produces an integer type that
873// encompasses them: any value in one of the given types could be expressed in
874// the encompassing type.
875static struct WidthAndSignedness
876EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
877 assert(Types.size() > 0 && "Empty list of types.");
878
879 // If any of the given types is signed, we must return a signed type.
880 bool Signed = false;
881 for (const auto &Type : Types) {
882 Signed |= Type.Signed;
883 }
884
885 // The encompassing type must have a width greater than or equal to the width
886 // of the specified types. Additionally, if the encompassing type is signed,
887 // its width must be strictly greater than the width of any unsigned types
888 // given.
889 unsigned Width = 0;
890 for (const auto &Type : Types) {
891 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
892 if (Width < MinWidth) {
893 Width = MinWidth;
894 }
895 }
896
897 return {Width, Signed};
898}
899
900Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
901 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
902 return Builder.CreateCall(CGM.getIntrinsic(inst, {ArgValue->getType()}),
903 ArgValue);
904}
905
906/// Checks if using the result of __builtin_object_size(p, @p From) in place of
907/// __builtin_object_size(p, @p To) is correct
908static bool areBOSTypesCompatible(int From, int To) {
909 // Note: Our __builtin_object_size implementation currently treats Type=0 and
910 // Type=2 identically. Encoding this implementation detail here may make
911 // improving __builtin_object_size difficult in the future, so it's omitted.
912 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
913}
914
915static llvm::Value *
916getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
917 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
918}
919
920llvm::Value *
921CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
922 llvm::IntegerType *ResType,
923 llvm::Value *EmittedE,
924 bool IsDynamic) {
925 uint64_t ObjectSize;
926 if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
927 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
928 return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
929}
930
931namespace {
932
933/// StructFieldAccess is a simple visitor class to grab the first MemberExpr
934/// from an Expr. It records any ArraySubscriptExpr we meet along the way.
935class StructFieldAccess
936 : public ConstStmtVisitor<StructFieldAccess, const Expr *> {
937 bool AddrOfSeen = false;
938
939public:
940 const Expr *ArrayIndex = nullptr;
941 QualType ArrayElementTy;
942
943 const Expr *VisitMemberExpr(const MemberExpr *E) {
944 if (AddrOfSeen && E->getType()->isArrayType())
945 // Avoid forms like '&ptr->array'.
946 return nullptr;
947 return E;
948 }
949
950 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
951 if (ArrayIndex)
952 // We don't support multiple subscripts.
953 return nullptr;
954
955 AddrOfSeen = false; // '&ptr->array[idx]' is okay.
956 ArrayIndex = E->getIdx();
957 ArrayElementTy = E->getBase()->getType();
958 return Visit(E->getBase());
959 }
960 const Expr *VisitCastExpr(const CastExpr *E) {
961 if (E->getCastKind() == CK_LValueToRValue)
962 return E;
963 return Visit(E->getSubExpr());
964 }
965 const Expr *VisitParenExpr(const ParenExpr *E) {
966 return Visit(E->getSubExpr());
967 }
968 const Expr *VisitUnaryAddrOf(const clang::UnaryOperator *E) {
969 AddrOfSeen = true;
970 return Visit(E->getSubExpr());
971 }
972 const Expr *VisitUnaryDeref(const clang::UnaryOperator *E) {
973 AddrOfSeen = false;
974 return Visit(E->getSubExpr());
975 }
976 const Expr *VisitBinaryOperator(const clang::BinaryOperator *Op) {
977 return Op->isCommaOp() ? Visit(Op->getRHS()) : nullptr;
978 }
979};
980
981} // end anonymous namespace
982
983/// Find a struct's flexible array member. It may be embedded inside multiple
984/// sub-structs, but must still be the last field.
986 ASTContext &Ctx,
987 const RecordDecl *RD) {
988 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
989 CGF.getLangOpts().getStrictFlexArraysLevel();
990
991 if (RD->isImplicit())
992 return nullptr;
993
994 for (const FieldDecl *FD : RD->fields()) {
996 Ctx, FD, FD->getType(), StrictFlexArraysLevel,
997 /*IgnoreTemplateOrMacroSubstitution=*/true))
998 return FD;
999
1000 if (const auto *RD = FD->getType()->getAsRecordDecl())
1001 if (const FieldDecl *FD = FindFlexibleArrayMemberField(CGF, Ctx, RD))
1002 return FD;
1003 }
1004
1005 return nullptr;
1006}
1007
1008/// Calculate the offset of a struct field. It may be embedded inside multiple
1009/// sub-structs.
1010static bool GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD,
1011 const FieldDecl *FD, int64_t &Offset) {
1012 if (RD->isImplicit())
1013 return false;
1014
1015 // Keep track of the field number ourselves, because the other methods
1016 // (CGRecordLayout::getLLVMFieldNo) aren't always equivalent to how the AST
1017 // is laid out.
1018 uint32_t FieldNo = 0;
1019 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
1020
1021 for (const FieldDecl *Field : RD->fields()) {
1022 if (Field == FD) {
1023 Offset += Layout.getFieldOffset(FieldNo);
1024 return true;
1025 }
1026
1027 if (const auto *RD = Field->getType()->getAsRecordDecl()) {
1028 if (GetFieldOffset(Ctx, RD, FD, Offset)) {
1029 Offset += Layout.getFieldOffset(FieldNo);
1030 return true;
1031 }
1032 }
1033
1034 if (!RD->isUnion())
1035 ++FieldNo;
1036 }
1037
1038 return false;
1039}
1040
1041static std::optional<int64_t>
1042GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FD) {
1043 int64_t Offset = 0;
1044
1045 if (GetFieldOffset(Ctx, RD, FD, Offset))
1046 return std::optional<int64_t>(Offset);
1047
1048 return std::nullopt;
1049}
1050
1051llvm::Value *CodeGenFunction::emitCountedBySize(const Expr *E,
1052 llvm::Value *EmittedE,
1053 unsigned Type,
1054 llvm::IntegerType *ResType) {
1055 // Note: If the whole struct is specificed in the __bdos (i.e. Visitor
1056 // returns a DeclRefExpr). The calculation of the whole size of the structure
1057 // with a flexible array member can be done in two ways:
1058 //
1059 // 1) sizeof(struct S) + count * sizeof(typeof(fam))
1060 // 2) offsetof(struct S, fam) + count * sizeof(typeof(fam))
1061 //
1062 // The first will add additional padding after the end of the array
1063 // allocation while the second method is more precise, but not quite expected
1064 // from programmers. See
1065 // https://lore.kernel.org/lkml/ZvV6X5FPBBW7CO1f@archlinux/ for a discussion
1066 // of the topic.
1067 //
1068 // GCC isn't (currently) able to calculate __bdos on a pointer to the whole
1069 // structure. Therefore, because of the above issue, we choose to match what
1070 // GCC does for consistency's sake.
1071
1072 StructFieldAccess Visitor;
1073 E = Visitor.Visit(E);
1074 if (!E)
1075 return nullptr;
1076
1077 const Expr *Idx = Visitor.ArrayIndex;
1078 if (Idx) {
1079 if (Idx->HasSideEffects(getContext()))
1080 // We can't have side-effects.
1081 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1082
1083 if (const auto *IL = dyn_cast<IntegerLiteral>(Idx)) {
1084 int64_t Val = IL->getValue().getSExtValue();
1085 if (Val < 0)
1086 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1087
1088 // The index is 0, so we don't need to take it into account.
1089 if (Val == 0)
1090 Idx = nullptr;
1091 }
1092 }
1093
1094 // __counted_by on either a flexible array member or a pointer into a struct
1095 // with a flexible array member.
1096 if (const auto *ME = dyn_cast<MemberExpr>(E))
1097 return emitCountedByMemberSize(ME, Idx, EmittedE, Visitor.ArrayElementTy,
1098 Type, ResType);
1099
1100 // __counted_by on a pointer in a struct.
1101 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E);
1102 ICE && ICE->getCastKind() == CK_LValueToRValue)
1103 return emitCountedByPointerSize(ICE, Idx, EmittedE, Visitor.ArrayElementTy,
1104 Type, ResType);
1105
1106 return nullptr;
1107}
1108
1110 llvm::Value *Res,
1111 llvm::Value *Index,
1112 llvm::IntegerType *ResType,
1113 bool IsSigned) {
1114 // cmp = (array_size >= 0)
1115 Value *Cmp = CGF.Builder.CreateIsNotNeg(Res);
1116 if (Index)
1117 // cmp = (cmp && index >= 0)
1118 Cmp = CGF.Builder.CreateAnd(CGF.Builder.CreateIsNotNeg(Index), Cmp);
1119
1120 // return cmp ? result : 0
1121 return CGF.Builder.CreateSelect(Cmp, Res,
1122 ConstantInt::get(ResType, 0, IsSigned));
1123}
1124
1125static std::pair<llvm::Value *, llvm::Value *>
1127 const FieldDecl *ArrayFD, const FieldDecl *CountFD,
1128 const Expr *Idx, llvm::IntegerType *ResType,
1129 bool IsSigned) {
1130 // count = ptr->count;
1131 Value *Count = CGF.EmitLoadOfCountedByField(ME, ArrayFD, CountFD);
1132 if (!Count)
1133 return std::make_pair<Value *>(nullptr, nullptr);
1134 Count = CGF.Builder.CreateIntCast(Count, ResType, IsSigned, "count");
1135
1136 // index = ptr->index;
1137 Value *Index = nullptr;
1138 if (Idx) {
1139 bool IdxSigned = Idx->getType()->isSignedIntegerType();
1140 Index = CGF.EmitScalarExpr(Idx);
1141 Index = CGF.Builder.CreateIntCast(Index, ResType, IdxSigned, "index");
1142 }
1143
1144 return std::make_pair(Count, Index);
1145}
1146
1147llvm::Value *CodeGenFunction::emitCountedByPointerSize(
1148 const ImplicitCastExpr *E, const Expr *Idx, llvm::Value *EmittedE,
1149 QualType CastedArrayElementTy, unsigned Type, llvm::IntegerType *ResType) {
1150 assert(E->getCastKind() == CK_LValueToRValue &&
1151 "must be an LValue to RValue cast");
1152
1153 const MemberExpr *ME =
1154 dyn_cast<MemberExpr>(E->getSubExpr()->IgnoreParenNoopCasts(getContext()));
1155 if (!ME)
1156 return nullptr;
1157
1158 const auto *ArrayBaseFD = dyn_cast<FieldDecl>(ME->getMemberDecl());
1159 if (!ArrayBaseFD || !ArrayBaseFD->getType()->isPointerType() ||
1160 !ArrayBaseFD->getType()->isCountAttributedType())
1161 return nullptr;
1162
1163 // Get the 'count' FieldDecl.
1164 const FieldDecl *CountFD = ArrayBaseFD->findCountedByField();
1165 if (!CountFD)
1166 // Can't find the field referenced by the "counted_by" attribute.
1167 return nullptr;
1168
1169 // Calculate the array's object size using these formulae. (Note: if the
1170 // calculation is negative, we return 0.):
1171 //
1172 // struct p;
1173 // struct s {
1174 // /* ... */
1175 // struct p **array __attribute__((counted_by(count)));
1176 // int count;
1177 // };
1178 //
1179 // 1) 'ptr->array':
1180 //
1181 // count = ptr->count;
1182 //
1183 // array_element_size = sizeof (*ptr->array);
1184 // array_size = count * array_element_size;
1185 //
1186 // result = array_size;
1187 //
1188 // cmp = (result >= 0)
1189 // return cmp ? result : 0;
1190 //
1191 // 2) '&((cast) ptr->array)[idx]':
1192 //
1193 // count = ptr->count;
1194 // index = idx;
1195 //
1196 // array_element_size = sizeof (*ptr->array);
1197 // array_size = count * array_element_size;
1198 //
1199 // casted_array_element_size = sizeof (*((cast) ptr->array));
1200 //
1201 // index_size = index * casted_array_element_size;
1202 // result = array_size - index_size;
1203 //
1204 // cmp = (result >= 0)
1205 // if (index)
1206 // cmp = (cmp && index > 0)
1207 // return cmp ? result : 0;
1208
1209 auto GetElementBaseSize = [&](QualType ElementTy) {
1210 CharUnits ElementSize =
1211 getContext().getTypeSizeInChars(ElementTy->getPointeeType());
1212
1213 if (ElementSize.isZero()) {
1214 // This might be a __sized_by (or __counted_by) on a
1215 // 'void *', which counts bytes, not elements.
1216 [[maybe_unused]] auto *CAT = ElementTy->getAs<CountAttributedType>();
1217 assert(CAT && "must have an CountAttributedType");
1218
1219 ElementSize = CharUnits::One();
1220 }
1221
1222 return std::optional<CharUnits>(ElementSize);
1223 };
1224
1225 // Get the sizes of the original array element and the casted array element,
1226 // if different.
1227 std::optional<CharUnits> ArrayElementBaseSize =
1228 GetElementBaseSize(ArrayBaseFD->getType());
1229 if (!ArrayElementBaseSize)
1230 return nullptr;
1231
1232 std::optional<CharUnits> CastedArrayElementBaseSize = ArrayElementBaseSize;
1233 if (!CastedArrayElementTy.isNull() && CastedArrayElementTy->isPointerType()) {
1234 CastedArrayElementBaseSize = GetElementBaseSize(CastedArrayElementTy);
1235 if (!CastedArrayElementBaseSize)
1236 return nullptr;
1237 }
1238
1239 bool IsSigned = CountFD->getType()->isSignedIntegerType();
1240
1241 // count = ptr->count;
1242 // index = ptr->index;
1243 Value *Count, *Index;
1244 std::tie(Count, Index) = GetCountFieldAndIndex(
1245 *this, ME, ArrayBaseFD, CountFD, Idx, ResType, IsSigned);
1246 if (!Count)
1247 return nullptr;
1248
1249 // array_element_size = sizeof (*ptr->array)
1250 auto *ArrayElementSize = llvm::ConstantInt::get(
1251 ResType, ArrayElementBaseSize->getQuantity(), IsSigned);
1252
1253 // casted_array_element_size = sizeof (*((cast) ptr->array));
1254 auto *CastedArrayElementSize = llvm::ConstantInt::get(
1255 ResType, CastedArrayElementBaseSize->getQuantity(), IsSigned);
1256
1257 // array_size = count * array_element_size;
1258 Value *ArraySize = Builder.CreateMul(Count, ArrayElementSize, "array_size",
1259 !IsSigned, IsSigned);
1260
1261 // Option (1) 'ptr->array'
1262 // result = array_size
1263 Value *Result = ArraySize;
1264
1265 if (Idx) { // Option (2) '&((cast) ptr->array)[idx]'
1266 // index_size = index * casted_array_element_size;
1267 Value *IndexSize = Builder.CreateMul(Index, CastedArrayElementSize,
1268 "index_size", !IsSigned, IsSigned);
1269
1270 // result = result - index_size;
1271 Result =
1272 Builder.CreateSub(Result, IndexSize, "result", !IsSigned, IsSigned);
1273 }
1274
1275 return EmitPositiveResultOrZero(*this, Result, Index, ResType, IsSigned);
1276}
1277
1278llvm::Value *CodeGenFunction::emitCountedByMemberSize(
1279 const MemberExpr *ME, const Expr *Idx, llvm::Value *EmittedE,
1280 QualType CastedArrayElementTy, unsigned Type, llvm::IntegerType *ResType) {
1281 const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
1282 if (!FD)
1283 return nullptr;
1284
1285 // Find the flexible array member and check that it has the __counted_by
1286 // attribute.
1287 ASTContext &Ctx = getContext();
1288 const RecordDecl *RD = FD->getDeclContext()->getOuterLexicalRecordContext();
1289 const FieldDecl *FlexibleArrayMemberFD = nullptr;
1290
1292 Ctx, FD, FD->getType(), getLangOpts().getStrictFlexArraysLevel(),
1293 /*IgnoreTemplateOrMacroSubstitution=*/true))
1294 FlexibleArrayMemberFD = FD;
1295 else
1296 FlexibleArrayMemberFD = FindFlexibleArrayMemberField(*this, Ctx, RD);
1297
1298 if (!FlexibleArrayMemberFD ||
1299 !FlexibleArrayMemberFD->getType()->isCountAttributedType())
1300 return nullptr;
1301
1302 // Get the 'count' FieldDecl.
1303 const FieldDecl *CountFD = FlexibleArrayMemberFD->findCountedByField();
1304 if (!CountFD)
1305 // Can't find the field referenced by the "counted_by" attribute.
1306 return nullptr;
1307
1308 // Calculate the flexible array member's object size using these formulae.
1309 // (Note: if the calculation is negative, we return 0.):
1310 //
1311 // struct p;
1312 // struct s {
1313 // /* ... */
1314 // int count;
1315 // struct p *array[] __attribute__((counted_by(count)));
1316 // };
1317 //
1318 // 1) 'ptr->array':
1319 //
1320 // count = ptr->count;
1321 //
1322 // flexible_array_member_element_size = sizeof (*ptr->array);
1323 // flexible_array_member_size =
1324 // count * flexible_array_member_element_size;
1325 //
1326 // result = flexible_array_member_size;
1327 //
1328 // cmp = (result >= 0)
1329 // return cmp ? result : 0;
1330 //
1331 // 2) '&((cast) ptr->array)[idx]':
1332 //
1333 // count = ptr->count;
1334 // index = idx;
1335 //
1336 // flexible_array_member_element_size = sizeof (*ptr->array);
1337 // flexible_array_member_size =
1338 // count * flexible_array_member_element_size;
1339 //
1340 // casted_flexible_array_member_element_size =
1341 // sizeof (*((cast) ptr->array));
1342 // index_size = index * casted_flexible_array_member_element_size;
1343 //
1344 // result = flexible_array_member_size - index_size;
1345 //
1346 // cmp = (result >= 0)
1347 // if (index != 0)
1348 // cmp = (cmp && index >= 0)
1349 // return cmp ? result : 0;
1350 //
1351 // 3) '&ptr->field':
1352 //
1353 // count = ptr->count;
1354 // sizeof_struct = sizeof (struct s);
1355 //
1356 // flexible_array_member_element_size = sizeof (*ptr->array);
1357 // flexible_array_member_size =
1358 // count * flexible_array_member_element_size;
1359 //
1360 // field_offset = offsetof (struct s, field);
1361 // offset_diff = sizeof_struct - field_offset;
1362 //
1363 // result = offset_diff + flexible_array_member_size;
1364 //
1365 // cmp = (result >= 0)
1366 // return cmp ? result : 0;
1367 //
1368 // 4) '&((cast) ptr->field_array)[idx]':
1369 //
1370 // count = ptr->count;
1371 // index = idx;
1372 // sizeof_struct = sizeof (struct s);
1373 //
1374 // flexible_array_member_element_size = sizeof (*ptr->array);
1375 // flexible_array_member_size =
1376 // count * flexible_array_member_element_size;
1377 //
1378 // casted_field_element_size = sizeof (*((cast) ptr->field_array));
1379 // field_offset = offsetof (struct s, field)
1380 // field_offset += index * casted_field_element_size;
1381 //
1382 // offset_diff = sizeof_struct - field_offset;
1383 //
1384 // result = offset_diff + flexible_array_member_size;
1385 //
1386 // cmp = (result >= 0)
1387 // if (index != 0)
1388 // cmp = (cmp && index >= 0)
1389 // return cmp ? result : 0;
1390
1391 bool IsSigned = CountFD->getType()->isSignedIntegerType();
1392
1393 QualType FlexibleArrayMemberTy = FlexibleArrayMemberFD->getType();
1394
1395 // Explicit cast because otherwise the CharWidth will promote an i32's into
1396 // u64's leading to overflows.
1397 int64_t CharWidth = static_cast<int64_t>(CGM.getContext().getCharWidth());
1398
1399 // field_offset = offsetof (struct s, field);
1400 Value *FieldOffset = nullptr;
1401 if (FlexibleArrayMemberFD != FD) {
1402 std::optional<int64_t> Offset = GetFieldOffset(Ctx, RD, FD);
1403 if (!Offset)
1404 return nullptr;
1405 FieldOffset =
1406 llvm::ConstantInt::get(ResType, *Offset / CharWidth, IsSigned);
1407 }
1408
1409 // count = ptr->count;
1410 // index = ptr->index;
1411 Value *Count, *Index;
1412 std::tie(Count, Index) = GetCountFieldAndIndex(
1413 *this, ME, FlexibleArrayMemberFD, CountFD, Idx, ResType, IsSigned);
1414 if (!Count)
1415 return nullptr;
1416
1417 // flexible_array_member_element_size = sizeof (*ptr->array);
1418 const ArrayType *ArrayTy = Ctx.getAsArrayType(FlexibleArrayMemberTy);
1419 CharUnits BaseSize = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
1420 auto *FlexibleArrayMemberElementSize =
1421 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1422
1423 // flexible_array_member_size = count * flexible_array_member_element_size;
1424 Value *FlexibleArrayMemberSize =
1425 Builder.CreateMul(Count, FlexibleArrayMemberElementSize,
1426 "flexible_array_member_size", !IsSigned, IsSigned);
1427
1428 Value *Result = nullptr;
1429 if (FlexibleArrayMemberFD == FD) {
1430 if (Idx) { // Option (2) '&((cast) ptr->array)[idx]'
1431 // casted_flexible_array_member_element_size =
1432 // sizeof (*((cast) ptr->array));
1433 llvm::ConstantInt *CastedFlexibleArrayMemberElementSize =
1434 FlexibleArrayMemberElementSize;
1435 if (!CastedArrayElementTy.isNull() &&
1436 CastedArrayElementTy->isPointerType()) {
1437 CharUnits BaseSize =
1438 Ctx.getTypeSizeInChars(CastedArrayElementTy->getPointeeType());
1439 CastedFlexibleArrayMemberElementSize =
1440 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1441 }
1442
1443 // index_size = index * casted_flexible_array_member_element_size;
1444 Value *IndexSize =
1445 Builder.CreateMul(Index, CastedFlexibleArrayMemberElementSize,
1446 "index_size", !IsSigned, IsSigned);
1447
1448 // result = flexible_array_member_size - index_size;
1449 Result = Builder.CreateSub(FlexibleArrayMemberSize, IndexSize, "result",
1450 !IsSigned, IsSigned);
1451 } else { // Option (1) 'ptr->array'
1452 // result = flexible_array_member_size;
1453 Result = FlexibleArrayMemberSize;
1454 }
1455 } else {
1456 // sizeof_struct = sizeof (struct s);
1457 llvm::StructType *StructTy = getTypes().getCGRecordLayout(RD).getLLVMType();
1458 const llvm::DataLayout &Layout = CGM.getDataLayout();
1459 TypeSize Size = Layout.getTypeSizeInBits(StructTy);
1460 Value *SizeofStruct =
1461 llvm::ConstantInt::get(ResType, Size.getKnownMinValue() / CharWidth);
1462
1463 if (Idx) { // Option (4) '&((cast) ptr->field_array)[idx]'
1464 // casted_field_element_size = sizeof (*((cast) ptr->field_array));
1465 CharUnits BaseSize;
1466 if (!CastedArrayElementTy.isNull() &&
1467 CastedArrayElementTy->isPointerType()) {
1468 BaseSize =
1469 Ctx.getTypeSizeInChars(CastedArrayElementTy->getPointeeType());
1470 } else {
1471 const ArrayType *ArrayTy = Ctx.getAsArrayType(FD->getType());
1472 BaseSize = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
1473 }
1474
1475 llvm::ConstantInt *CastedFieldElementSize =
1476 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1477
1478 // field_offset += index * casted_field_element_size;
1479 Value *Mul = Builder.CreateMul(Index, CastedFieldElementSize,
1480 "field_offset", !IsSigned, IsSigned);
1481 FieldOffset = Builder.CreateAdd(FieldOffset, Mul);
1482 }
1483 // Option (3) '&ptr->field', and Option (4) continuation.
1484 // offset_diff = flexible_array_member_offset - field_offset;
1485 Value *OffsetDiff = Builder.CreateSub(SizeofStruct, FieldOffset,
1486 "offset_diff", !IsSigned, IsSigned);
1487
1488 // result = offset_diff + flexible_array_member_size;
1489 Result = Builder.CreateAdd(FlexibleArrayMemberSize, OffsetDiff, "result");
1490 }
1491
1492 return EmitPositiveResultOrZero(*this, Result, Index, ResType, IsSigned);
1493}
1494
1495/// Returns a Value corresponding to the size of the given expression.
1496/// This Value may be either of the following:
1497/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
1498/// it)
1499/// - A call to the @llvm.objectsize intrinsic
1500///
1501/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
1502/// and we wouldn't otherwise try to reference a pass_object_size parameter,
1503/// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
1504llvm::Value *
1505CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
1506 llvm::IntegerType *ResType,
1507 llvm::Value *EmittedE, bool IsDynamic) {
1508 // We need to reference an argument if the pointer is a parameter with the
1509 // pass_object_size attribute.
1510 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
1511 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
1512 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
1513 if (Param != nullptr && PS != nullptr &&
1514 areBOSTypesCompatible(PS->getType(), Type)) {
1515 auto Iter = SizeArguments.find(Param);
1516 assert(Iter != SizeArguments.end());
1517
1518 const ImplicitParamDecl *D = Iter->second;
1519 auto DIter = LocalDeclMap.find(D);
1520 assert(DIter != LocalDeclMap.end());
1521
1522 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
1523 getContext().getSizeType(), E->getBeginLoc());
1524 }
1525 }
1526
1527 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
1528 // evaluate E for side-effects. In either case, we shouldn't lower to
1529 // @llvm.objectsize.
1530 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
1531 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1532
1533 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
1534 assert(Ptr->getType()->isPointerTy() &&
1535 "Non-pointer passed to __builtin_object_size?");
1536
1537 if (IsDynamic)
1538 // Emit special code for a flexible array member with the "counted_by"
1539 // attribute.
1540 if (Value *V = emitCountedBySize(E, Ptr, Type, ResType))
1541 return V;
1542
1543 Function *F =
1544 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
1545
1546 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
1547 Value *Min = Builder.getInt1((Type & 2) != 0);
1548 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
1549 Value *NullIsUnknown = Builder.getTrue();
1550 Value *Dynamic = Builder.getInt1(IsDynamic);
1551 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
1552}
1553
1554namespace {
1555/// A struct to generically describe a bit test intrinsic.
1556struct BitTest {
1557 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
1558 enum InterlockingKind : uint8_t {
1559 Unlocked,
1560 Sequential,
1561 Acquire,
1562 Release,
1563 NoFence
1564 };
1565
1566 ActionKind Action;
1567 InterlockingKind Interlocking;
1568 bool Is64Bit;
1569
1570 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
1571};
1572
1573} // namespace
1574
1575BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
1576 switch (BuiltinID) {
1577 // Main portable variants.
1578 case Builtin::BI_bittest:
1579 return {TestOnly, Unlocked, false};
1580 case Builtin::BI_bittestandcomplement:
1581 return {Complement, Unlocked, false};
1582 case Builtin::BI_bittestandreset:
1583 return {Reset, Unlocked, false};
1584 case Builtin::BI_bittestandset:
1585 return {Set, Unlocked, false};
1586 case Builtin::BI_interlockedbittestandreset:
1587 return {Reset, Sequential, false};
1588 case Builtin::BI_interlockedbittestandset:
1589 return {Set, Sequential, false};
1590
1591 // 64-bit variants.
1592 case Builtin::BI_bittest64:
1593 return {TestOnly, Unlocked, true};
1594 case Builtin::BI_bittestandcomplement64:
1595 return {Complement, Unlocked, true};
1596 case Builtin::BI_bittestandreset64:
1597 return {Reset, Unlocked, true};
1598 case Builtin::BI_bittestandset64:
1599 return {Set, Unlocked, true};
1600 case Builtin::BI_interlockedbittestandreset64:
1601 return {Reset, Sequential, true};
1602 case Builtin::BI_interlockedbittestandset64:
1603 return {Set, Sequential, true};
1604
1605 // ARM/AArch64-specific ordering variants.
1606 case Builtin::BI_interlockedbittestandset_acq:
1607 return {Set, Acquire, false};
1608 case Builtin::BI_interlockedbittestandset_rel:
1609 return {Set, Release, false};
1610 case Builtin::BI_interlockedbittestandset_nf:
1611 return {Set, NoFence, false};
1612 case Builtin::BI_interlockedbittestandreset_acq:
1613 return {Reset, Acquire, false};
1614 case Builtin::BI_interlockedbittestandreset_rel:
1615 return {Reset, Release, false};
1616 case Builtin::BI_interlockedbittestandreset_nf:
1617 return {Reset, NoFence, false};
1618 case Builtin::BI_interlockedbittestandreset64_acq:
1619 return {Reset, Acquire, false};
1620 case Builtin::BI_interlockedbittestandreset64_rel:
1621 return {Reset, Release, false};
1622 case Builtin::BI_interlockedbittestandreset64_nf:
1623 return {Reset, NoFence, false};
1624 case Builtin::BI_interlockedbittestandset64_acq:
1625 return {Set, Acquire, false};
1626 case Builtin::BI_interlockedbittestandset64_rel:
1627 return {Set, Release, false};
1628 case Builtin::BI_interlockedbittestandset64_nf:
1629 return {Set, NoFence, false};
1630 }
1631 llvm_unreachable("expected only bittest intrinsics");
1632}
1633
1634static char bitActionToX86BTCode(BitTest::ActionKind A) {
1635 switch (A) {
1636 case BitTest::TestOnly: return '\0';
1637 case BitTest::Complement: return 'c';
1638 case BitTest::Reset: return 'r';
1639 case BitTest::Set: return 's';
1640 }
1641 llvm_unreachable("invalid action");
1642}
1643
1645 BitTest BT,
1646 const CallExpr *E, Value *BitBase,
1647 Value *BitPos) {
1648 char Action = bitActionToX86BTCode(BT.Action);
1649 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
1650
1651 // Build the assembly.
1653 raw_svector_ostream AsmOS(Asm);
1654 if (BT.Interlocking != BitTest::Unlocked)
1655 AsmOS << "lock ";
1656 AsmOS << "bt";
1657 if (Action)
1658 AsmOS << Action;
1659 AsmOS << SizeSuffix << " $2, ($1)";
1660
1661 // Build the constraints. FIXME: We should support immediates when possible.
1662 std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
1663 std::string_view MachineClobbers = CGF.getTarget().getClobbers();
1664 if (!MachineClobbers.empty()) {
1665 Constraints += ',';
1666 Constraints += MachineClobbers;
1667 }
1668 llvm::IntegerType *IntType = llvm::IntegerType::get(
1669 CGF.getLLVMContext(),
1670 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
1671 llvm::FunctionType *FTy =
1672 llvm::FunctionType::get(CGF.Int8Ty, {CGF.DefaultPtrTy, IntType}, false);
1673
1674 llvm::InlineAsm *IA =
1675 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1676 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
1677}
1678
1679static llvm::AtomicOrdering
1680getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
1681 switch (I) {
1682 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
1683 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
1684 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
1685 case BitTest::Release: return llvm::AtomicOrdering::Release;
1686 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
1687 }
1688 llvm_unreachable("invalid interlocking");
1689}
1690
1691static llvm::Value *EmitBitCountExpr(CodeGenFunction &CGF, const Expr *E) {
1692 llvm::Value *ArgValue = CGF.EmitScalarExpr(E);
1693 llvm::Type *ArgType = ArgValue->getType();
1694
1695 // Boolean vectors can be casted directly to its bitfield representation. We
1696 // intentionally do not round up to the next power of two size and let LLVM
1697 // handle the trailing bits.
1698 if (auto *VT = dyn_cast<llvm::FixedVectorType>(ArgType);
1699 VT && VT->getElementType()->isIntegerTy(1)) {
1700 llvm::Type *StorageType =
1701 llvm::Type::getIntNTy(CGF.getLLVMContext(), VT->getNumElements());
1702 ArgValue = CGF.Builder.CreateBitCast(ArgValue, StorageType);
1703 }
1704
1705 return ArgValue;
1706}
1707
1708/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
1709/// bits and a bit position and read and optionally modify the bit at that
1710/// position. The position index can be arbitrarily large, i.e. it can be larger
1711/// than 31 or 63, so we need an indexed load in the general case.
1712static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
1713 unsigned BuiltinID,
1714 const CallExpr *E) {
1715 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
1716 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
1717
1718 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
1719
1720 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
1721 // indexing operation internally. Use them if possible.
1722 if (CGF.getTarget().getTriple().isX86())
1723 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
1724
1725 // Otherwise, use generic code to load one byte and test the bit. Use all but
1726 // the bottom three bits as the array index, and the bottom three bits to form
1727 // a mask.
1728 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
1729 Value *ByteIndex = CGF.Builder.CreateAShr(
1730 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
1731 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBase, ByteIndex,
1732 "bittest.byteaddr"),
1733 CGF.Int8Ty, CharUnits::One());
1734 Value *PosLow =
1735 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
1736 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
1737
1738 // The updating instructions will need a mask.
1739 Value *Mask = nullptr;
1740 if (BT.Action != BitTest::TestOnly) {
1741 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
1742 "bittest.mask");
1743 }
1744
1745 // Check the action and ordering of the interlocked intrinsics.
1746 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
1747
1748 Value *OldByte = nullptr;
1749 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
1750 // Emit a combined atomicrmw load/store operation for the interlocked
1751 // intrinsics.
1752 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
1753 if (BT.Action == BitTest::Reset) {
1754 Mask = CGF.Builder.CreateNot(Mask);
1755 RMWOp = llvm::AtomicRMWInst::And;
1756 }
1757 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr, Mask, Ordering);
1758 } else {
1759 // Emit a plain load for the non-interlocked intrinsics.
1760 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
1761 Value *NewByte = nullptr;
1762 switch (BT.Action) {
1763 case BitTest::TestOnly:
1764 // Don't store anything.
1765 break;
1766 case BitTest::Complement:
1767 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
1768 break;
1769 case BitTest::Reset:
1770 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
1771 break;
1772 case BitTest::Set:
1773 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
1774 break;
1775 }
1776 if (NewByte)
1777 CGF.Builder.CreateStore(NewByte, ByteAddr);
1778 }
1779
1780 // However we loaded the old byte, either by plain load or atomicrmw, shift
1781 // the bit into the low position and mask it to 0 or 1.
1782 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
1783 return CGF.Builder.CreateAnd(
1784 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
1785}
1786
1787namespace {
1788enum class MSVCSetJmpKind {
1789 _setjmpex,
1790 _setjmp3,
1791 _setjmp
1792};
1793}
1794
1795/// MSVC handles setjmp a bit differently on different platforms. On every
1796/// architecture except 32-bit x86, the frame address is passed. On x86, extra
1797/// parameters can be passed as variadic arguments, but we always pass none.
1798static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
1799 const CallExpr *E) {
1800 llvm::Value *Arg1 = nullptr;
1801 llvm::Type *Arg1Ty = nullptr;
1802 StringRef Name;
1803 bool IsVarArg = false;
1804 if (SJKind == MSVCSetJmpKind::_setjmp3) {
1805 Name = "_setjmp3";
1806 Arg1Ty = CGF.Int32Ty;
1807 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
1808 IsVarArg = true;
1809 } else {
1810 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
1811 Arg1Ty = CGF.Int8PtrTy;
1812 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
1813 Arg1 = CGF.Builder.CreateCall(
1814 CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
1815 } else
1816 Arg1 = CGF.Builder.CreateCall(
1817 CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
1818 llvm::ConstantInt::get(CGF.Int32Ty, 0));
1819 }
1820
1821 // Mark the call site and declaration with ReturnsTwice.
1822 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
1823 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
1824 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
1825 llvm::Attribute::ReturnsTwice);
1826 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
1827 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
1828 ReturnsTwiceAttr, /*Local=*/true);
1829
1830 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
1831 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
1832 llvm::Value *Args[] = {Buf, Arg1};
1833 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
1834 CB->setAttributes(ReturnsTwiceAttr);
1835 return RValue::get(CB);
1836}
1837
1838// Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated.
1840 const CallExpr *E) {
1841 switch (BuiltinID) {
1844 Address IndexAddress(EmitPointerWithAlignment(E->getArg(0)));
1845 Value *ArgValue = EmitScalarExpr(E->getArg(1));
1846
1847 llvm::Type *ArgType = ArgValue->getType();
1848 llvm::Type *IndexType = IndexAddress.getElementType();
1849 llvm::Type *ResultType = ConvertType(E->getType());
1850
1851 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
1852 Value *ResZero = llvm::Constant::getNullValue(ResultType);
1853 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
1854
1855 BasicBlock *Begin = Builder.GetInsertBlock();
1856 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
1857 Builder.SetInsertPoint(End);
1858 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
1859
1860 Builder.SetInsertPoint(Begin);
1861 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
1862 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
1863 Builder.CreateCondBr(IsZero, End, NotZero);
1864 Result->addIncoming(ResZero, Begin);
1865
1866 Builder.SetInsertPoint(NotZero);
1867
1868 if (BuiltinID == MSVCIntrin::_BitScanForward) {
1869 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1870 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1871 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1872 Builder.CreateStore(ZeroCount, IndexAddress, false);
1873 } else {
1874 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
1875 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
1876
1877 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1878 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1879 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1880 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
1881 Builder.CreateStore(Index, IndexAddress, false);
1882 }
1883 Builder.CreateBr(End);
1884 Result->addIncoming(ResOne, NotZero);
1885
1886 Builder.SetInsertPoint(End);
1887 return Result;
1888 }
1890 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
1892 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
1894 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
1896 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
1898 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
1900 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
1902 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1903 AtomicOrdering::Acquire);
1905 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1906 AtomicOrdering::Release);
1908 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1909 AtomicOrdering::Monotonic);
1911 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1912 AtomicOrdering::Acquire);
1914 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1915 AtomicOrdering::Release);
1917 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1918 AtomicOrdering::Monotonic);
1920 return EmitAtomicCmpXchgForMSIntrin(*this, E);
1922 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
1924 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
1926 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1929 *this, E, AtomicOrdering::SequentiallyConsistent);
1931 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire);
1933 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release);
1935 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1937 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1938 AtomicOrdering::Acquire);
1940 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1941 AtomicOrdering::Release);
1943 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1944 AtomicOrdering::Monotonic);
1946 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1947 AtomicOrdering::Acquire);
1949 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1950 AtomicOrdering::Release);
1952 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1953 AtomicOrdering::Monotonic);
1955 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1956 AtomicOrdering::Acquire);
1958 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1959 AtomicOrdering::Release);
1961 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1962 AtomicOrdering::Monotonic);
1964 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1966 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1968 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1970 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1972 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1974 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1975
1977 return EmitAtomicDecrementValue(*this, E);
1979 return EmitAtomicIncrementValue(*this, E);
1980
1982 // Request immediate process termination from the kernel. The instruction
1983 // sequences to do this are documented on MSDN:
1984 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1985 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1986 StringRef Asm, Constraints;
1987 switch (ISA) {
1988 default:
1989 ErrorUnsupported(E, "__fastfail call for this architecture");
1990 break;
1991 case llvm::Triple::x86:
1992 case llvm::Triple::x86_64:
1993 Asm = "int $$0x29";
1994 Constraints = "{cx}";
1995 break;
1996 case llvm::Triple::thumb:
1997 Asm = "udf #251";
1998 Constraints = "{r0}";
1999 break;
2000 case llvm::Triple::aarch64:
2001 Asm = "brk #0xF003";
2002 Constraints = "{w0}";
2003 }
2004 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
2005 llvm::InlineAsm *IA =
2006 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
2007 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
2008 getLLVMContext(), llvm::AttributeList::FunctionIndex,
2009 llvm::Attribute::NoReturn);
2010 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
2011 CI->setAttributes(NoReturnAttr);
2012 return CI;
2013 }
2014 }
2015 llvm_unreachable("Incorrect MSVC intrinsic!");
2016}
2017
2018namespace {
2019// ARC cleanup for __builtin_os_log_format
2020struct CallObjCArcUse final : EHScopeStack::Cleanup {
2021 CallObjCArcUse(llvm::Value *object) : object(object) {}
2022 llvm::Value *object;
2023
2024 void Emit(CodeGenFunction &CGF, Flags flags) override {
2025 CGF.EmitARCIntrinsicUse(object);
2026 }
2027};
2028}
2029
2031 BuiltinCheckKind Kind) {
2032 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) &&
2033 "Unsupported builtin check kind");
2034
2035 Value *ArgValue = EmitBitCountExpr(*this, E);
2036 if (!SanOpts.has(SanitizerKind::Builtin))
2037 return ArgValue;
2038
2039 auto CheckOrdinal = SanitizerKind::SO_Builtin;
2040 auto CheckHandler = SanitizerHandler::InvalidBuiltin;
2041 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2042 Value *Cond = Builder.CreateICmpNE(
2043 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
2044 EmitCheck(std::make_pair(Cond, CheckOrdinal), CheckHandler,
2046 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
2047 {});
2048 return ArgValue;
2049}
2050
2052 Value *ArgValue = EvaluateExprAsBool(E);
2053 if (!SanOpts.has(SanitizerKind::Builtin))
2054 return ArgValue;
2055
2056 auto CheckOrdinal = SanitizerKind::SO_Builtin;
2057 auto CheckHandler = SanitizerHandler::InvalidBuiltin;
2058 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2059 EmitCheck(
2060 std::make_pair(ArgValue, CheckOrdinal), CheckHandler,
2062 llvm::ConstantInt::get(Builder.getInt8Ty(), BCK_AssumePassedFalse)},
2063 {});
2064 return ArgValue;
2065}
2066
2067static Value *EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW) {
2068 return CGF.Builder.CreateBinaryIntrinsic(
2069 Intrinsic::abs, ArgValue,
2070 ConstantInt::get(CGF.Builder.getInt1Ty(), HasNSW));
2071}
2072
2074 bool SanitizeOverflow) {
2075 Value *ArgValue = CGF.EmitScalarExpr(E->getArg(0));
2076
2077 // Try to eliminate overflow check.
2078 if (const auto *VCI = dyn_cast<llvm::ConstantInt>(ArgValue)) {
2079 if (!VCI->isMinSignedValue())
2080 return EmitAbs(CGF, ArgValue, true);
2081 }
2082
2084 SanitizerHandler CheckHandler;
2085 if (SanitizeOverflow) {
2086 Ordinals.push_back(SanitizerKind::SO_SignedIntegerOverflow);
2087 CheckHandler = SanitizerHandler::NegateOverflow;
2088 } else
2089 CheckHandler = SanitizerHandler::SubOverflow;
2090
2091 SanitizerDebugLocation SanScope(&CGF, Ordinals, CheckHandler);
2092
2093 Constant *Zero = Constant::getNullValue(ArgValue->getType());
2094 Value *ResultAndOverflow = CGF.Builder.CreateBinaryIntrinsic(
2095 Intrinsic::ssub_with_overflow, Zero, ArgValue);
2096 Value *Result = CGF.Builder.CreateExtractValue(ResultAndOverflow, 0);
2097 Value *NotOverflow = CGF.Builder.CreateNot(
2098 CGF.Builder.CreateExtractValue(ResultAndOverflow, 1));
2099
2100 // TODO: support -ftrapv-handler.
2101 if (SanitizeOverflow) {
2102 CGF.EmitCheck({{NotOverflow, SanitizerKind::SO_SignedIntegerOverflow}},
2103 CheckHandler,
2106 {ArgValue});
2107 } else
2108 CGF.EmitTrapCheck(NotOverflow, CheckHandler);
2109
2110 Value *CmpResult = CGF.Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
2111 return CGF.Builder.CreateSelect(CmpResult, Result, ArgValue, "abs");
2112}
2113
2114/// Get the argument type for arguments to os_log_helper.
2116 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
2117 return C.getCanonicalType(UnsignedTy);
2118}
2119
2122 CharUnits BufferAlignment) {
2123 ASTContext &Ctx = getContext();
2124
2126 {
2127 raw_svector_ostream OS(Name);
2128 OS << "__os_log_helper";
2129 OS << "_" << BufferAlignment.getQuantity();
2130 OS << "_" << int(Layout.getSummaryByte());
2131 OS << "_" << int(Layout.getNumArgsByte());
2132 for (const auto &Item : Layout.Items)
2133 OS << "_" << int(Item.getSizeByte()) << "_"
2134 << int(Item.getDescriptorByte());
2135 }
2136
2137 if (llvm::Function *F = CGM.getModule().getFunction(Name))
2138 return F;
2139
2141 FunctionArgList Args;
2142 Args.push_back(ImplicitParamDecl::Create(
2143 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
2145 ArgTys.emplace_back(Ctx.VoidPtrTy);
2146
2147 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
2148 char Size = Layout.Items[I].getSizeByte();
2149 if (!Size)
2150 continue;
2151
2152 QualType ArgTy = getOSLogArgType(Ctx, Size);
2153 Args.push_back(ImplicitParamDecl::Create(
2154 Ctx, nullptr, SourceLocation(),
2155 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
2157 ArgTys.emplace_back(ArgTy);
2158 }
2159
2160 QualType ReturnTy = Ctx.VoidTy;
2161
2162 // The helper function has linkonce_odr linkage to enable the linker to merge
2163 // identical functions. To ensure the merging always happens, 'noinline' is
2164 // attached to the function when compiling with -Oz.
2165 const CGFunctionInfo &FI =
2166 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
2167 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
2168 llvm::Function *Fn = llvm::Function::Create(
2169 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
2170 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
2171 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn, /*IsThunk=*/false);
2172 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
2173 Fn->setDoesNotThrow();
2174
2175 // Attach 'noinline' at -Oz.
2176 if (CGM.getCodeGenOpts().OptimizeSize == 2)
2177 Fn->addFnAttr(llvm::Attribute::NoInline);
2178
2179 auto NL = ApplyDebugLocation::CreateEmpty(*this);
2180 StartFunction(GlobalDecl(), ReturnTy, Fn, FI, Args);
2181
2182 // Create a scope with an artificial location for the body of this function.
2183 auto AL = ApplyDebugLocation::CreateArtificial(*this);
2184
2185 CharUnits Offset;
2187 Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), Ctx.VoidTy,
2188 BufferAlignment);
2189 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
2190 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
2191 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
2192 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
2193
2194 unsigned I = 1;
2195 for (const auto &Item : Layout.Items) {
2196 Builder.CreateStore(
2197 Builder.getInt8(Item.getDescriptorByte()),
2198 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
2199 Builder.CreateStore(
2200 Builder.getInt8(Item.getSizeByte()),
2201 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
2202
2203 CharUnits Size = Item.size();
2204 if (!Size.getQuantity())
2205 continue;
2206
2207 Address Arg = GetAddrOfLocalVar(Args[I]);
2208 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
2209 Addr = Addr.withElementType(Arg.getElementType());
2210 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
2211 Offset += Size;
2212 ++I;
2213 }
2214
2216
2217 return Fn;
2218}
2219
2221 assert(E.getNumArgs() >= 2 &&
2222 "__builtin_os_log_format takes at least 2 arguments");
2223 ASTContext &Ctx = getContext();
2226 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
2227
2228 // Ignore argument 1, the format string. It is not currently used.
2229 CallArgList Args;
2230 Args.add(RValue::get(BufAddr.emitRawPointer(*this)), Ctx.VoidPtrTy);
2231
2232 for (const auto &Item : Layout.Items) {
2233 int Size = Item.getSizeByte();
2234 if (!Size)
2235 continue;
2236
2237 llvm::Value *ArgVal;
2238
2239 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
2240 uint64_t Val = 0;
2241 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
2242 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
2243 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
2244 } else if (const Expr *TheExpr = Item.getExpr()) {
2245 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
2246
2247 // If a temporary object that requires destruction after the full
2248 // expression is passed, push a lifetime-extended cleanup to extend its
2249 // lifetime to the end of the enclosing block scope.
2250 auto LifetimeExtendObject = [&](const Expr *E) {
2251 E = E->IgnoreParenCasts();
2252 // Extend lifetimes of objects returned by function calls and message
2253 // sends.
2254
2255 // FIXME: We should do this in other cases in which temporaries are
2256 // created including arguments of non-ARC types (e.g., C++
2257 // temporaries).
2259 return true;
2260 return false;
2261 };
2262
2263 if (TheExpr->getType()->isObjCRetainableType() &&
2264 getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
2265 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
2266 "Only scalar can be a ObjC retainable type");
2267 if (!isa<Constant>(ArgVal)) {
2268 CleanupKind Cleanup = getARCCleanupKind();
2269 QualType Ty = TheExpr->getType();
2271 RawAddress Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
2272 ArgVal = EmitARCRetain(Ty, ArgVal);
2273 Builder.CreateStore(ArgVal, Addr);
2274 pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
2276 Cleanup & EHCleanup);
2277
2278 // Push a clang.arc.use call to ensure ARC optimizer knows that the
2279 // argument has to be alive.
2280 if (CGM.getCodeGenOpts().OptimizationLevel != 0)
2282 }
2283 }
2284 } else {
2285 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
2286 }
2287
2288 unsigned ArgValSize =
2289 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
2290 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
2291 ArgValSize);
2292 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
2293 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
2294 // If ArgVal has type x86_fp80, zero-extend ArgVal.
2295 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
2296 Args.add(RValue::get(ArgVal), ArgTy);
2297 }
2298
2299 const CGFunctionInfo &FI =
2300 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
2301 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
2302 Layout, BufAddr.getAlignment());
2304 return RValue::get(BufAddr, *this);
2305}
2306
2308 unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info,
2309 WidthAndSignedness ResultInfo) {
2310 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2311 Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width &&
2312 !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed;
2313}
2314
2316 CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info,
2317 const clang::Expr *Op2, WidthAndSignedness Op2Info,
2318 const clang::Expr *ResultArg, QualType ResultQTy,
2319 WidthAndSignedness ResultInfo) {
2321 Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&
2322 "Cannot specialize this multiply");
2323
2324 llvm::Value *V1 = CGF.EmitScalarExpr(Op1);
2325 llvm::Value *V2 = CGF.EmitScalarExpr(Op2);
2326
2327 llvm::Value *HasOverflow;
2328 llvm::Value *Result = EmitOverflowIntrinsic(
2329 CGF, Intrinsic::umul_with_overflow, V1, V2, HasOverflow);
2330
2331 // The intrinsic call will detect overflow when the value is > UINT_MAX,
2332 // however, since the original builtin had a signed result, we need to report
2333 // an overflow when the result is greater than INT_MAX.
2334 auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width);
2335 llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax);
2336
2337 llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(Result, IntMaxValue);
2338 HasOverflow = CGF.Builder.CreateOr(HasOverflow, IntMaxOverflow);
2339
2340 bool isVolatile =
2341 ResultArg->getType()->getPointeeType().isVolatileQualified();
2342 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2343 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2344 isVolatile);
2345 return RValue::get(HasOverflow);
2346}
2347
2348/// Determine if a binop is a checked mixed-sign multiply we can specialize.
2349static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
2350 WidthAndSignedness Op1Info,
2351 WidthAndSignedness Op2Info,
2352 WidthAndSignedness ResultInfo) {
2353 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2354 std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
2355 Op1Info.Signed != Op2Info.Signed;
2356}
2357
2358/// Emit a checked mixed-sign multiply. This is a cheaper specialization of
2359/// the generic checked-binop irgen.
2360static RValue
2362 WidthAndSignedness Op1Info, const clang::Expr *Op2,
2363 WidthAndSignedness Op2Info,
2364 const clang::Expr *ResultArg, QualType ResultQTy,
2365 WidthAndSignedness ResultInfo) {
2366 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,
2367 Op2Info, ResultInfo) &&
2368 "Not a mixed-sign multipliction we can specialize");
2369
2370 // Emit the signed and unsigned operands.
2371 const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
2372 const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
2373 llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
2374 llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
2375 unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
2376 unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
2377
2378 // One of the operands may be smaller than the other. If so, [s|z]ext it.
2379 if (SignedOpWidth < UnsignedOpWidth)
2380 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
2381 if (UnsignedOpWidth < SignedOpWidth)
2382 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
2383
2384 llvm::Type *OpTy = Signed->getType();
2385 llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
2386 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2387 llvm::Type *ResTy = CGF.getTypes().ConvertType(ResultQTy);
2388 unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
2389
2390 // Take the absolute value of the signed operand.
2391 llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
2392 llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
2393 llvm::Value *AbsSigned =
2394 CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
2395
2396 // Perform a checked unsigned multiplication.
2397 llvm::Value *UnsignedOverflow;
2398 llvm::Value *UnsignedResult =
2399 EmitOverflowIntrinsic(CGF, Intrinsic::umul_with_overflow, AbsSigned,
2400 Unsigned, UnsignedOverflow);
2401
2402 llvm::Value *Overflow, *Result;
2403 if (ResultInfo.Signed) {
2404 // Signed overflow occurs if the result is greater than INT_MAX or lesser
2405 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
2406 auto IntMax =
2407 llvm::APInt::getSignedMaxValue(ResultInfo.Width).zext(OpWidth);
2408 llvm::Value *MaxResult =
2409 CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
2410 CGF.Builder.CreateZExt(IsNegative, OpTy));
2411 llvm::Value *SignedOverflow =
2412 CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
2413 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
2414
2415 // Prepare the signed result (possibly by negating it).
2416 llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
2417 llvm::Value *SignedResult =
2418 CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
2419 Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
2420 } else {
2421 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
2422 llvm::Value *Underflow = CGF.Builder.CreateAnd(
2423 IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
2424 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
2425 if (ResultInfo.Width < OpWidth) {
2426 auto IntMax =
2427 llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
2428 llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
2429 UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
2430 Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
2431 }
2432
2433 // Negate the product if it would be negative in infinite precision.
2434 Result = CGF.Builder.CreateSelect(
2435 IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
2436
2437 Result = CGF.Builder.CreateTrunc(Result, ResTy);
2438 }
2439 assert(Overflow && Result && "Missing overflow or result");
2440
2441 bool isVolatile =
2442 ResultArg->getType()->getPointeeType().isVolatileQualified();
2443 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2444 isVolatile);
2445 return RValue::get(Overflow);
2446}
2447
2448static bool
2450 llvm::SmallPtrSetImpl<const Decl *> &Seen) {
2451 if (const auto *Arr = Ctx.getAsArrayType(Ty))
2452 Ty = Ctx.getBaseElementType(Arr);
2453
2454 const auto *Record = Ty->getAsCXXRecordDecl();
2455 if (!Record)
2456 return false;
2457
2458 // We've already checked this type, or are in the process of checking it.
2459 if (!Seen.insert(Record).second)
2460 return false;
2461
2462 assert(Record->hasDefinition() &&
2463 "Incomplete types should already be diagnosed");
2464
2465 if (Record->isDynamicClass())
2466 return true;
2467
2468 for (FieldDecl *F : Record->fields()) {
2469 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
2470 return true;
2471 }
2472 return false;
2473}
2474
2475/// Determine if the specified type requires laundering by checking if it is a
2476/// dynamic class type or contains a subobject which is a dynamic class type.
2478 if (!CGM.getCodeGenOpts().StrictVTablePointers)
2479 return false;
2481 return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
2482}
2483
2484RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
2485 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
2486 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
2487
2488 // The builtin's shift arg may have a different type than the source arg and
2489 // result, but the LLVM intrinsic uses the same type for all values.
2490 llvm::Type *Ty = Src->getType();
2491 ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
2492
2493 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
2494 unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
2495 Function *F = CGM.getIntrinsic(IID, Ty);
2496 return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
2497}
2498
2499// Map math builtins for long-double to f128 version.
2500static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {
2501 switch (BuiltinID) {
2502#define MUTATE_LDBL(func) \
2503 case Builtin::BI__builtin_##func##l: \
2504 return Builtin::BI__builtin_##func##f128;
2535 MUTATE_LDBL(nans)
2536 MUTATE_LDBL(inf)
2555 MUTATE_LDBL(huge_val)
2565#undef MUTATE_LDBL
2566 default:
2567 return BuiltinID;
2568 }
2569}
2570
2571static Value *tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID,
2572 Value *V) {
2573 if (CGF.Builder.getIsFPConstrained() &&
2574 CGF.Builder.getDefaultConstrainedExcept() != fp::ebIgnore) {
2575 if (Value *Result =
2576 CGF.getTargetHooks().testFPKind(V, BuiltinID, CGF.Builder, CGF.CGM))
2577 return Result;
2578 }
2579 return nullptr;
2580}
2581
2583 const FunctionDecl *FD) {
2584 auto Name = FD->getNameAsString() + "__hipstdpar_unsupported";
2585 auto FnTy = CGF->CGM.getTypes().GetFunctionType(FD);
2586 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2587
2589 for (auto &&FormalTy : FnTy->params())
2590 Args.push_back(llvm::PoisonValue::get(FormalTy));
2591
2592 return RValue::get(CGF->Builder.CreateCall(UBF, Args));
2593}
2594
2596 const CallExpr *E,
2598 assert(!getContext().BuiltinInfo.isImmediate(BuiltinID) &&
2599 "Should not codegen for consteval builtins");
2600
2601 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
2602 // See if we can constant fold this builtin. If so, don't emit it at all.
2603 // TODO: Extend this handling to all builtin calls that we can constant-fold.
2605 if (E->isPRValue() && E->EvaluateAsRValue(Result, CGM.getContext()) &&
2606 !Result.hasSideEffects()) {
2607 if (Result.Val.isInt())
2608 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
2609 Result.Val.getInt()));
2610 if (Result.Val.isFloat())
2611 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
2612 Result.Val.getFloat()));
2613 }
2614
2615 // If current long-double semantics is IEEE 128-bit, replace math builtins
2616 // of long-double with f128 equivalent.
2617 // TODO: This mutation should also be applied to other targets other than PPC,
2618 // after backend supports IEEE 128-bit style libcalls.
2619 if (getTarget().getTriple().isPPC64() &&
2620 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
2621 BuiltinID = mutateLongDoubleBuiltin(BuiltinID);
2622
2623 // If the builtin has been declared explicitly with an assembler label,
2624 // disable the specialized emitting below. Ideally we should communicate the
2625 // rename in IR, or at least avoid generating the intrinsic calls that are
2626 // likely to get lowered to the renamed library functions.
2627 const unsigned BuiltinIDIfNoAsmLabel =
2628 FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID;
2629
2630 std::optional<bool> ErrnoOverriden;
2631 // ErrnoOverriden is true if math-errno is overriden via the
2632 // '#pragma float_control(precise, on)'. This pragma disables fast-math,
2633 // which implies math-errno.
2634 if (E->hasStoredFPFeatures()) {
2636 if (OP.hasMathErrnoOverride())
2637 ErrnoOverriden = OP.getMathErrnoOverride();
2638 }
2639 // True if 'attribute__((optnone))' is used. This attribute overrides
2640 // fast-math which implies math-errno.
2641 bool OptNone = CurFuncDecl && CurFuncDecl->hasAttr<OptimizeNoneAttr>();
2642
2643 bool IsOptimizationEnabled = CGM.getCodeGenOpts().OptimizationLevel != 0;
2644
2645 bool GenerateFPMathIntrinsics =
2647 BuiltinID, CGM.getTriple(), ErrnoOverriden, getLangOpts().MathErrno,
2648 OptNone, IsOptimizationEnabled);
2649
2650 if (GenerateFPMathIntrinsics) {
2651 switch (BuiltinIDIfNoAsmLabel) {
2652 case Builtin::BIacos:
2653 case Builtin::BIacosf:
2654 case Builtin::BIacosl:
2655 case Builtin::BI__builtin_acos:
2656 case Builtin::BI__builtin_acosf:
2657 case Builtin::BI__builtin_acosf16:
2658 case Builtin::BI__builtin_acosl:
2659 case Builtin::BI__builtin_acosf128:
2660 case Builtin::BI__builtin_elementwise_acos:
2662 *this, E, Intrinsic::acos, Intrinsic::experimental_constrained_acos));
2663
2664 case Builtin::BIasin:
2665 case Builtin::BIasinf:
2666 case Builtin::BIasinl:
2667 case Builtin::BI__builtin_asin:
2668 case Builtin::BI__builtin_asinf:
2669 case Builtin::BI__builtin_asinf16:
2670 case Builtin::BI__builtin_asinl:
2671 case Builtin::BI__builtin_asinf128:
2672 case Builtin::BI__builtin_elementwise_asin:
2674 *this, E, Intrinsic::asin, Intrinsic::experimental_constrained_asin));
2675
2676 case Builtin::BIatan:
2677 case Builtin::BIatanf:
2678 case Builtin::BIatanl:
2679 case Builtin::BI__builtin_atan:
2680 case Builtin::BI__builtin_atanf:
2681 case Builtin::BI__builtin_atanf16:
2682 case Builtin::BI__builtin_atanl:
2683 case Builtin::BI__builtin_atanf128:
2684 case Builtin::BI__builtin_elementwise_atan:
2686 *this, E, Intrinsic::atan, Intrinsic::experimental_constrained_atan));
2687
2688 case Builtin::BIatan2:
2689 case Builtin::BIatan2f:
2690 case Builtin::BIatan2l:
2691 case Builtin::BI__builtin_atan2:
2692 case Builtin::BI__builtin_atan2f:
2693 case Builtin::BI__builtin_atan2f16:
2694 case Builtin::BI__builtin_atan2l:
2695 case Builtin::BI__builtin_atan2f128:
2696 case Builtin::BI__builtin_elementwise_atan2:
2698 *this, E, Intrinsic::atan2,
2699 Intrinsic::experimental_constrained_atan2));
2700
2701 case Builtin::BIceil:
2702 case Builtin::BIceilf:
2703 case Builtin::BIceill:
2704 case Builtin::BI__builtin_ceil:
2705 case Builtin::BI__builtin_ceilf:
2706 case Builtin::BI__builtin_ceilf16:
2707 case Builtin::BI__builtin_ceill:
2708 case Builtin::BI__builtin_ceilf128:
2709 case Builtin::BI__builtin_elementwise_ceil:
2711 Intrinsic::ceil,
2712 Intrinsic::experimental_constrained_ceil));
2713
2714 case Builtin::BIcopysign:
2715 case Builtin::BIcopysignf:
2716 case Builtin::BIcopysignl:
2717 case Builtin::BI__builtin_copysign:
2718 case Builtin::BI__builtin_copysignf:
2719 case Builtin::BI__builtin_copysignf16:
2720 case Builtin::BI__builtin_copysignl:
2721 case Builtin::BI__builtin_copysignf128:
2722 return RValue::get(
2723 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::copysign));
2724
2725 case Builtin::BIcos:
2726 case Builtin::BIcosf:
2727 case Builtin::BIcosl:
2728 case Builtin::BI__builtin_cos:
2729 case Builtin::BI__builtin_cosf:
2730 case Builtin::BI__builtin_cosf16:
2731 case Builtin::BI__builtin_cosl:
2732 case Builtin::BI__builtin_cosf128:
2733 case Builtin::BI__builtin_elementwise_cos:
2735 Intrinsic::cos,
2736 Intrinsic::experimental_constrained_cos));
2737
2738 case Builtin::BIcosh:
2739 case Builtin::BIcoshf:
2740 case Builtin::BIcoshl:
2741 case Builtin::BI__builtin_cosh:
2742 case Builtin::BI__builtin_coshf:
2743 case Builtin::BI__builtin_coshf16:
2744 case Builtin::BI__builtin_coshl:
2745 case Builtin::BI__builtin_coshf128:
2746 case Builtin::BI__builtin_elementwise_cosh:
2748 *this, E, Intrinsic::cosh, Intrinsic::experimental_constrained_cosh));
2749
2750 case Builtin::BIexp:
2751 case Builtin::BIexpf:
2752 case Builtin::BIexpl:
2753 case Builtin::BI__builtin_exp:
2754 case Builtin::BI__builtin_expf:
2755 case Builtin::BI__builtin_expf16:
2756 case Builtin::BI__builtin_expl:
2757 case Builtin::BI__builtin_expf128:
2758 case Builtin::BI__builtin_elementwise_exp:
2760 Intrinsic::exp,
2761 Intrinsic::experimental_constrained_exp));
2762
2763 case Builtin::BIexp2:
2764 case Builtin::BIexp2f:
2765 case Builtin::BIexp2l:
2766 case Builtin::BI__builtin_exp2:
2767 case Builtin::BI__builtin_exp2f:
2768 case Builtin::BI__builtin_exp2f16:
2769 case Builtin::BI__builtin_exp2l:
2770 case Builtin::BI__builtin_exp2f128:
2771 case Builtin::BI__builtin_elementwise_exp2:
2773 Intrinsic::exp2,
2774 Intrinsic::experimental_constrained_exp2));
2775 case Builtin::BI__builtin_exp10:
2776 case Builtin::BI__builtin_exp10f:
2777 case Builtin::BI__builtin_exp10f16:
2778 case Builtin::BI__builtin_exp10l:
2779 case Builtin::BI__builtin_exp10f128:
2780 case Builtin::BI__builtin_elementwise_exp10: {
2781 // TODO: strictfp support
2782 if (Builder.getIsFPConstrained())
2783 break;
2784 return RValue::get(
2785 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::exp10));
2786 }
2787 case Builtin::BIfabs:
2788 case Builtin::BIfabsf:
2789 case Builtin::BIfabsl:
2790 case Builtin::BI__builtin_fabs:
2791 case Builtin::BI__builtin_fabsf:
2792 case Builtin::BI__builtin_fabsf16:
2793 case Builtin::BI__builtin_fabsl:
2794 case Builtin::BI__builtin_fabsf128:
2795 return RValue::get(
2796 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::fabs));
2797
2798 case Builtin::BIfloor:
2799 case Builtin::BIfloorf:
2800 case Builtin::BIfloorl:
2801 case Builtin::BI__builtin_floor:
2802 case Builtin::BI__builtin_floorf:
2803 case Builtin::BI__builtin_floorf16:
2804 case Builtin::BI__builtin_floorl:
2805 case Builtin::BI__builtin_floorf128:
2806 case Builtin::BI__builtin_elementwise_floor:
2808 Intrinsic::floor,
2809 Intrinsic::experimental_constrained_floor));
2810
2811 case Builtin::BIfma:
2812 case Builtin::BIfmaf:
2813 case Builtin::BIfmal:
2814 case Builtin::BI__builtin_fma:
2815 case Builtin::BI__builtin_fmaf:
2816 case Builtin::BI__builtin_fmaf16:
2817 case Builtin::BI__builtin_fmal:
2818 case Builtin::BI__builtin_fmaf128:
2819 case Builtin::BI__builtin_elementwise_fma:
2821 Intrinsic::fma,
2822 Intrinsic::experimental_constrained_fma));
2823
2824 case Builtin::BIfmax:
2825 case Builtin::BIfmaxf:
2826 case Builtin::BIfmaxl:
2827 case Builtin::BI__builtin_fmax:
2828 case Builtin::BI__builtin_fmaxf:
2829 case Builtin::BI__builtin_fmaxf16:
2830 case Builtin::BI__builtin_fmaxl:
2831 case Builtin::BI__builtin_fmaxf128:
2833 Intrinsic::maxnum,
2834 Intrinsic::experimental_constrained_maxnum));
2835
2836 case Builtin::BIfmin:
2837 case Builtin::BIfminf:
2838 case Builtin::BIfminl:
2839 case Builtin::BI__builtin_fmin:
2840 case Builtin::BI__builtin_fminf:
2841 case Builtin::BI__builtin_fminf16:
2842 case Builtin::BI__builtin_fminl:
2843 case Builtin::BI__builtin_fminf128:
2845 Intrinsic::minnum,
2846 Intrinsic::experimental_constrained_minnum));
2847
2848 case Builtin::BIfmaximum_num:
2849 case Builtin::BIfmaximum_numf:
2850 case Builtin::BIfmaximum_numl:
2851 case Builtin::BI__builtin_fmaximum_num:
2852 case Builtin::BI__builtin_fmaximum_numf:
2853 case Builtin::BI__builtin_fmaximum_numf16:
2854 case Builtin::BI__builtin_fmaximum_numl:
2855 case Builtin::BI__builtin_fmaximum_numf128:
2856 return RValue::get(
2857 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::maximumnum));
2858
2859 case Builtin::BIfminimum_num:
2860 case Builtin::BIfminimum_numf:
2861 case Builtin::BIfminimum_numl:
2862 case Builtin::BI__builtin_fminimum_num:
2863 case Builtin::BI__builtin_fminimum_numf:
2864 case Builtin::BI__builtin_fminimum_numf16:
2865 case Builtin::BI__builtin_fminimum_numl:
2866 case Builtin::BI__builtin_fminimum_numf128:
2867 return RValue::get(
2868 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::minimumnum));
2869
2870 // fmod() is a special-case. It maps to the frem instruction rather than an
2871 // LLVM intrinsic.
2872 case Builtin::BIfmod:
2873 case Builtin::BIfmodf:
2874 case Builtin::BIfmodl:
2875 case Builtin::BI__builtin_fmod:
2876 case Builtin::BI__builtin_fmodf:
2877 case Builtin::BI__builtin_fmodf16:
2878 case Builtin::BI__builtin_fmodl:
2879 case Builtin::BI__builtin_fmodf128:
2880 case Builtin::BI__builtin_elementwise_fmod: {
2881 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2882 Value *Arg1 = EmitScalarExpr(E->getArg(0));
2883 Value *Arg2 = EmitScalarExpr(E->getArg(1));
2884 if (Builder.getIsFPConstrained()) {
2885 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_frem,
2886 Arg1->getType());
2887 return RValue::get(Builder.CreateConstrainedFPCall(F, {Arg1, Arg2}));
2888 } else {
2889 return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
2890 }
2891 }
2892
2893 case Builtin::BIlog:
2894 case Builtin::BIlogf:
2895 case Builtin::BIlogl:
2896 case Builtin::BI__builtin_log:
2897 case Builtin::BI__builtin_logf:
2898 case Builtin::BI__builtin_logf16:
2899 case Builtin::BI__builtin_logl:
2900 case Builtin::BI__builtin_logf128:
2901 case Builtin::BI__builtin_elementwise_log:
2903 Intrinsic::log,
2904 Intrinsic::experimental_constrained_log));
2905
2906 case Builtin::BIlog10:
2907 case Builtin::BIlog10f:
2908 case Builtin::BIlog10l:
2909 case Builtin::BI__builtin_log10:
2910 case Builtin::BI__builtin_log10f:
2911 case Builtin::BI__builtin_log10f16:
2912 case Builtin::BI__builtin_log10l:
2913 case Builtin::BI__builtin_log10f128:
2914 case Builtin::BI__builtin_elementwise_log10:
2916 Intrinsic::log10,
2917 Intrinsic::experimental_constrained_log10));
2918
2919 case Builtin::BIlog2:
2920 case Builtin::BIlog2f:
2921 case Builtin::BIlog2l:
2922 case Builtin::BI__builtin_log2:
2923 case Builtin::BI__builtin_log2f:
2924 case Builtin::BI__builtin_log2f16:
2925 case Builtin::BI__builtin_log2l:
2926 case Builtin::BI__builtin_log2f128:
2927 case Builtin::BI__builtin_elementwise_log2:
2929 Intrinsic::log2,
2930 Intrinsic::experimental_constrained_log2));
2931
2932 case Builtin::BInearbyint:
2933 case Builtin::BInearbyintf:
2934 case Builtin::BInearbyintl:
2935 case Builtin::BI__builtin_nearbyint:
2936 case Builtin::BI__builtin_nearbyintf:
2937 case Builtin::BI__builtin_nearbyintl:
2938 case Builtin::BI__builtin_nearbyintf128:
2939 case Builtin::BI__builtin_elementwise_nearbyint:
2941 Intrinsic::nearbyint,
2942 Intrinsic::experimental_constrained_nearbyint));
2943
2944 case Builtin::BIpow:
2945 case Builtin::BIpowf:
2946 case Builtin::BIpowl:
2947 case Builtin::BI__builtin_pow:
2948 case Builtin::BI__builtin_powf:
2949 case Builtin::BI__builtin_powf16:
2950 case Builtin::BI__builtin_powl:
2951 case Builtin::BI__builtin_powf128:
2952 case Builtin::BI__builtin_elementwise_pow:
2954 Intrinsic::pow,
2955 Intrinsic::experimental_constrained_pow));
2956
2957 case Builtin::BIrint:
2958 case Builtin::BIrintf:
2959 case Builtin::BIrintl:
2960 case Builtin::BI__builtin_rint:
2961 case Builtin::BI__builtin_rintf:
2962 case Builtin::BI__builtin_rintf16:
2963 case Builtin::BI__builtin_rintl:
2964 case Builtin::BI__builtin_rintf128:
2965 case Builtin::BI__builtin_elementwise_rint:
2967 Intrinsic::rint,
2968 Intrinsic::experimental_constrained_rint));
2969
2970 case Builtin::BIround:
2971 case Builtin::BIroundf:
2972 case Builtin::BIroundl:
2973 case Builtin::BI__builtin_round:
2974 case Builtin::BI__builtin_roundf:
2975 case Builtin::BI__builtin_roundf16:
2976 case Builtin::BI__builtin_roundl:
2977 case Builtin::BI__builtin_roundf128:
2978 case Builtin::BI__builtin_elementwise_round:
2980 Intrinsic::round,
2981 Intrinsic::experimental_constrained_round));
2982
2983 case Builtin::BIroundeven:
2984 case Builtin::BIroundevenf:
2985 case Builtin::BIroundevenl:
2986 case Builtin::BI__builtin_roundeven:
2987 case Builtin::BI__builtin_roundevenf:
2988 case Builtin::BI__builtin_roundevenf16:
2989 case Builtin::BI__builtin_roundevenl:
2990 case Builtin::BI__builtin_roundevenf128:
2991 case Builtin::BI__builtin_elementwise_roundeven:
2993 Intrinsic::roundeven,
2994 Intrinsic::experimental_constrained_roundeven));
2995
2996 case Builtin::BIsin:
2997 case Builtin::BIsinf:
2998 case Builtin::BIsinl:
2999 case Builtin::BI__builtin_sin:
3000 case Builtin::BI__builtin_sinf:
3001 case Builtin::BI__builtin_sinf16:
3002 case Builtin::BI__builtin_sinl:
3003 case Builtin::BI__builtin_sinf128:
3004 case Builtin::BI__builtin_elementwise_sin:
3006 Intrinsic::sin,
3007 Intrinsic::experimental_constrained_sin));
3008
3009 case Builtin::BIsinh:
3010 case Builtin::BIsinhf:
3011 case Builtin::BIsinhl:
3012 case Builtin::BI__builtin_sinh:
3013 case Builtin::BI__builtin_sinhf:
3014 case Builtin::BI__builtin_sinhf16:
3015 case Builtin::BI__builtin_sinhl:
3016 case Builtin::BI__builtin_sinhf128:
3017 case Builtin::BI__builtin_elementwise_sinh:
3019 *this, E, Intrinsic::sinh, Intrinsic::experimental_constrained_sinh));
3020
3021 case Builtin::BI__builtin_sincospi:
3022 case Builtin::BI__builtin_sincospif:
3023 case Builtin::BI__builtin_sincospil:
3024 if (Builder.getIsFPConstrained())
3025 break; // TODO: Emit constrained sincospi intrinsic once one exists.
3026 emitSincosBuiltin(*this, E, Intrinsic::sincospi);
3027 return RValue::get(nullptr);
3028
3029 case Builtin::BIsincos:
3030 case Builtin::BIsincosf:
3031 case Builtin::BIsincosl:
3032 case Builtin::BI__builtin_sincos:
3033 case Builtin::BI__builtin_sincosf:
3034 case Builtin::BI__builtin_sincosf16:
3035 case Builtin::BI__builtin_sincosl:
3036 case Builtin::BI__builtin_sincosf128:
3037 if (Builder.getIsFPConstrained())
3038 break; // TODO: Emit constrained sincos intrinsic once one exists.
3039 emitSincosBuiltin(*this, E, Intrinsic::sincos);
3040 return RValue::get(nullptr);
3041
3042 case Builtin::BIsqrt:
3043 case Builtin::BIsqrtf:
3044 case Builtin::BIsqrtl:
3045 case Builtin::BI__builtin_sqrt:
3046 case Builtin::BI__builtin_sqrtf:
3047 case Builtin::BI__builtin_sqrtf16:
3048 case Builtin::BI__builtin_sqrtl:
3049 case Builtin::BI__builtin_sqrtf128:
3050 case Builtin::BI__builtin_elementwise_sqrt: {
3052 *this, E, Intrinsic::sqrt, Intrinsic::experimental_constrained_sqrt);
3054 return RValue::get(Call);
3055 }
3056
3057 case Builtin::BItan:
3058 case Builtin::BItanf:
3059 case Builtin::BItanl:
3060 case Builtin::BI__builtin_tan:
3061 case Builtin::BI__builtin_tanf:
3062 case Builtin::BI__builtin_tanf16:
3063 case Builtin::BI__builtin_tanl:
3064 case Builtin::BI__builtin_tanf128:
3065 case Builtin::BI__builtin_elementwise_tan:
3067 *this, E, Intrinsic::tan, Intrinsic::experimental_constrained_tan));
3068
3069 case Builtin::BItanh:
3070 case Builtin::BItanhf:
3071 case Builtin::BItanhl:
3072 case Builtin::BI__builtin_tanh:
3073 case Builtin::BI__builtin_tanhf:
3074 case Builtin::BI__builtin_tanhf16:
3075 case Builtin::BI__builtin_tanhl:
3076 case Builtin::BI__builtin_tanhf128:
3077 case Builtin::BI__builtin_elementwise_tanh:
3079 *this, E, Intrinsic::tanh, Intrinsic::experimental_constrained_tanh));
3080
3081 case Builtin::BItrunc:
3082 case Builtin::BItruncf:
3083 case Builtin::BItruncl:
3084 case Builtin::BI__builtin_trunc:
3085 case Builtin::BI__builtin_truncf:
3086 case Builtin::BI__builtin_truncf16:
3087 case Builtin::BI__builtin_truncl:
3088 case Builtin::BI__builtin_truncf128:
3089 case Builtin::BI__builtin_elementwise_trunc:
3091 Intrinsic::trunc,
3092 Intrinsic::experimental_constrained_trunc));
3093
3094 case Builtin::BIlround:
3095 case Builtin::BIlroundf:
3096 case Builtin::BIlroundl:
3097 case Builtin::BI__builtin_lround:
3098 case Builtin::BI__builtin_lroundf:
3099 case Builtin::BI__builtin_lroundl:
3100 case Builtin::BI__builtin_lroundf128:
3102 *this, E, Intrinsic::lround,
3103 Intrinsic::experimental_constrained_lround));
3104
3105 case Builtin::BIllround:
3106 case Builtin::BIllroundf:
3107 case Builtin::BIllroundl:
3108 case Builtin::BI__builtin_llround:
3109 case Builtin::BI__builtin_llroundf:
3110 case Builtin::BI__builtin_llroundl:
3111 case Builtin::BI__builtin_llroundf128:
3113 *this, E, Intrinsic::llround,
3114 Intrinsic::experimental_constrained_llround));
3115
3116 case Builtin::BIlrint:
3117 case Builtin::BIlrintf:
3118 case Builtin::BIlrintl:
3119 case Builtin::BI__builtin_lrint:
3120 case Builtin::BI__builtin_lrintf:
3121 case Builtin::BI__builtin_lrintl:
3122 case Builtin::BI__builtin_lrintf128:
3124 *this, E, Intrinsic::lrint,
3125 Intrinsic::experimental_constrained_lrint));
3126
3127 case Builtin::BIllrint:
3128 case Builtin::BIllrintf:
3129 case Builtin::BIllrintl:
3130 case Builtin::BI__builtin_llrint:
3131 case Builtin::BI__builtin_llrintf:
3132 case Builtin::BI__builtin_llrintl:
3133 case Builtin::BI__builtin_llrintf128:
3135 *this, E, Intrinsic::llrint,
3136 Intrinsic::experimental_constrained_llrint));
3137 case Builtin::BI__builtin_ldexp:
3138 case Builtin::BI__builtin_ldexpf:
3139 case Builtin::BI__builtin_ldexpl:
3140 case Builtin::BI__builtin_ldexpf16:
3141 case Builtin::BI__builtin_ldexpf128:
3142 case Builtin::BI__builtin_elementwise_ldexp:
3144 *this, E, Intrinsic::ldexp,
3145 Intrinsic::experimental_constrained_ldexp));
3146 default:
3147 break;
3148 }
3149 }
3150
3151 // Check NonnullAttribute/NullabilityArg and Alignment.
3152 auto EmitArgCheck = [&](TypeCheckKind Kind, Address A, const Expr *Arg,
3153 unsigned ParmNum) {
3154 Value *Val = A.emitRawPointer(*this);
3155 EmitNonNullArgCheck(RValue::get(Val), Arg->getType(), Arg->getExprLoc(), FD,
3156 ParmNum);
3157
3158 if (SanOpts.has(SanitizerKind::Alignment)) {
3159 SanitizerSet SkippedChecks;
3160 SkippedChecks.set(SanitizerKind::All);
3161 SkippedChecks.clear(SanitizerKind::Alignment);
3162 SourceLocation Loc = Arg->getExprLoc();
3163 // Strip an implicit cast.
3164 if (auto *CE = dyn_cast<ImplicitCastExpr>(Arg))
3165 if (CE->getCastKind() == CK_BitCast)
3166 Arg = CE->getSubExpr();
3167 EmitTypeCheck(Kind, Loc, Val, Arg->getType(), A.getAlignment(),
3168 SkippedChecks);
3169 }
3170 };
3171
3172 switch (BuiltinIDIfNoAsmLabel) {
3173 default: break;
3174 case Builtin::BI__builtin___CFStringMakeConstantString:
3175 case Builtin::BI__builtin___NSStringMakeConstantString:
3176 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
3177 case Builtin::BI__builtin_stdarg_start:
3178 case Builtin::BI__builtin_va_start:
3179 case Builtin::BI__va_start:
3180 case Builtin::BI__builtin_c23_va_start:
3181 case Builtin::BI__builtin_va_end:
3182 EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
3183 ? EmitScalarExpr(E->getArg(0))
3184 : EmitVAListRef(E->getArg(0)).emitRawPointer(*this),
3185 BuiltinID != Builtin::BI__builtin_va_end);
3186 return RValue::get(nullptr);
3187 case Builtin::BI__builtin_va_copy: {
3188 Value *DstPtr = EmitVAListRef(E->getArg(0)).emitRawPointer(*this);
3189 Value *SrcPtr = EmitVAListRef(E->getArg(1)).emitRawPointer(*this);
3190 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy, {DstPtr->getType()}),
3191 {DstPtr, SrcPtr});
3192 return RValue::get(nullptr);
3193 }
3194 case Builtin::BIabs:
3195 case Builtin::BIlabs:
3196 case Builtin::BIllabs:
3197 case Builtin::BI__builtin_abs:
3198 case Builtin::BI__builtin_labs:
3199 case Builtin::BI__builtin_llabs: {
3200 bool SanitizeOverflow = SanOpts.has(SanitizerKind::SignedIntegerOverflow);
3201
3202 Value *Result;
3203 switch (getLangOpts().getSignedOverflowBehavior()) {
3205 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), false);
3206 break;
3208 if (!SanitizeOverflow) {
3209 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), true);
3210 break;
3211 }
3212 [[fallthrough]];
3214 // TODO: Somehow handle the corner case when the address of abs is taken.
3215 Result = EmitOverflowCheckedAbs(*this, E, SanitizeOverflow);
3216 break;
3217 }
3218 return RValue::get(Result);
3219 }
3220 case Builtin::BI__builtin_complex: {
3221 Value *Real = EmitScalarExpr(E->getArg(0));
3222 Value *Imag = EmitScalarExpr(E->getArg(1));
3223 return RValue::getComplex({Real, Imag});
3224 }
3225 case Builtin::BI__builtin_conj:
3226 case Builtin::BI__builtin_conjf:
3227 case Builtin::BI__builtin_conjl:
3228 case Builtin::BIconj:
3229 case Builtin::BIconjf:
3230 case Builtin::BIconjl: {
3231 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3232 Value *Real = ComplexVal.first;
3233 Value *Imag = ComplexVal.second;
3234 Imag = Builder.CreateFNeg(Imag, "neg");
3235 return RValue::getComplex(std::make_pair(Real, Imag));
3236 }
3237 case Builtin::BI__builtin_creal:
3238 case Builtin::BI__builtin_crealf:
3239 case Builtin::BI__builtin_creall:
3240 case Builtin::BIcreal:
3241 case Builtin::BIcrealf:
3242 case Builtin::BIcreall: {
3243 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3244 return RValue::get(ComplexVal.first);
3245 }
3246
3247 case Builtin::BI__builtin_preserve_access_index: {
3248 // Only enabled preserved access index region when debuginfo
3249 // is available as debuginfo is needed to preserve user-level
3250 // access pattern.
3251 if (!getDebugInfo()) {
3252 CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
3253 return RValue::get(EmitScalarExpr(E->getArg(0)));
3254 }
3255
3256 // Nested builtin_preserve_access_index() not supported
3258 CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
3259 return RValue::get(EmitScalarExpr(E->getArg(0)));
3260 }
3261
3262 IsInPreservedAIRegion = true;
3263 Value *Res = EmitScalarExpr(E->getArg(0));
3264 IsInPreservedAIRegion = false;
3265 return RValue::get(Res);
3266 }
3267
3268 case Builtin::BI__builtin_cimag:
3269 case Builtin::BI__builtin_cimagf:
3270 case Builtin::BI__builtin_cimagl:
3271 case Builtin::BIcimag:
3272 case Builtin::BIcimagf:
3273 case Builtin::BIcimagl: {
3274 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3275 return RValue::get(ComplexVal.second);
3276 }
3277
3278 case Builtin::BI__builtin_clrsb:
3279 case Builtin::BI__builtin_clrsbl:
3280 case Builtin::BI__builtin_clrsbll: {
3281 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
3282 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3283
3284 llvm::Type *ArgType = ArgValue->getType();
3285 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3286
3287 llvm::Type *ResultType = ConvertType(E->getType());
3288 Value *Zero = llvm::Constant::getNullValue(ArgType);
3289 Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
3290 Value *Inverse = Builder.CreateNot(ArgValue, "not");
3291 Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
3292 Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
3293 Value *Result = Builder.CreateSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
3294 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3295 "cast");
3296 return RValue::get(Result);
3297 }
3298 case Builtin::BI__builtin_ctzs:
3299 case Builtin::BI__builtin_ctz:
3300 case Builtin::BI__builtin_ctzl:
3301 case Builtin::BI__builtin_ctzll:
3302 case Builtin::BI__builtin_ctzg:
3303 case Builtin::BI__builtin_elementwise_ctzg: {
3304 bool HasFallback =
3305 (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_ctzg ||
3306 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_ctzg) &&
3307 E->getNumArgs() > 1;
3308
3309 Value *ArgValue =
3310 HasFallback ? EmitBitCountExpr(*this, E->getArg(0))
3312
3313 llvm::Type *ArgType = ArgValue->getType();
3314 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
3315
3316 llvm::Type *ResultType = ConvertType(E->getType());
3317 // The elementwise builtins always exhibit zero-is-undef behaviour
3318 Value *ZeroUndef = Builder.getInt1(
3319 HasFallback || getTarget().isCLZForZeroUndef() ||
3320 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_ctzg);
3321 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
3322 if (Result->getType() != ResultType)
3323 Result =
3324 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3325 if (!HasFallback)
3326 return RValue::get(Result);
3327
3328 Value *Zero = Constant::getNullValue(ArgType);
3329 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3330 Value *FallbackValue = EmitScalarExpr(E->getArg(1));
3331 Value *ResultOrFallback =
3332 Builder.CreateSelect(IsZero, FallbackValue, Result, "ctzg");
3333 return RValue::get(ResultOrFallback);
3334 }
3335 case Builtin::BI__builtin_clzs:
3336 case Builtin::BI__builtin_clz:
3337 case Builtin::BI__builtin_clzl:
3338 case Builtin::BI__builtin_clzll:
3339 case Builtin::BI__builtin_clzg:
3340 case Builtin::BI__builtin_elementwise_clzg: {
3341 bool HasFallback =
3342 (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_clzg ||
3343 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_clzg) &&
3344 E->getNumArgs() > 1;
3345
3346 Value *ArgValue =
3347 HasFallback ? EmitBitCountExpr(*this, E->getArg(0))
3349
3350 llvm::Type *ArgType = ArgValue->getType();
3351 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3352
3353 llvm::Type *ResultType = ConvertType(E->getType());
3354 // The elementwise builtins always exhibit zero-is-undef behaviour
3355 Value *ZeroUndef = Builder.getInt1(
3356 HasFallback || getTarget().isCLZForZeroUndef() ||
3357 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_clzg);
3358 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
3359 if (Result->getType() != ResultType)
3360 Result =
3361 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3362 if (!HasFallback)
3363 return RValue::get(Result);
3364
3365 Value *Zero = Constant::getNullValue(ArgType);
3366 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3367 Value *FallbackValue = EmitScalarExpr(E->getArg(1));
3368 Value *ResultOrFallback =
3369 Builder.CreateSelect(IsZero, FallbackValue, Result, "clzg");
3370 return RValue::get(ResultOrFallback);
3371 }
3372 case Builtin::BI__builtin_ffs:
3373 case Builtin::BI__builtin_ffsl:
3374 case Builtin::BI__builtin_ffsll: {
3375 // ffs(x) -> x ? cttz(x) + 1 : 0
3376 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3377
3378 llvm::Type *ArgType = ArgValue->getType();
3379 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
3380
3381 llvm::Type *ResultType = ConvertType(E->getType());
3382 Value *Tmp =
3383 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
3384 llvm::ConstantInt::get(ArgType, 1));
3385 Value *Zero = llvm::Constant::getNullValue(ArgType);
3386 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3387 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
3388 if (Result->getType() != ResultType)
3389 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3390 "cast");
3391 return RValue::get(Result);
3392 }
3393 case Builtin::BI__builtin_parity:
3394 case Builtin::BI__builtin_parityl:
3395 case Builtin::BI__builtin_parityll: {
3396 // parity(x) -> ctpop(x) & 1
3397 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3398
3399 llvm::Type *ArgType = ArgValue->getType();
3400 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3401
3402 llvm::Type *ResultType = ConvertType(E->getType());
3403 Value *Tmp = Builder.CreateCall(F, ArgValue);
3404 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
3405 if (Result->getType() != ResultType)
3406 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3407 "cast");
3408 return RValue::get(Result);
3409 }
3410 case Builtin::BI__lzcnt16:
3411 case Builtin::BI__lzcnt:
3412 case Builtin::BI__lzcnt64: {
3413 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3414
3415 llvm::Type *ArgType = ArgValue->getType();
3416 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3417
3418 llvm::Type *ResultType = ConvertType(E->getType());
3419 Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
3420 if (Result->getType() != ResultType)
3421 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3422 "cast");
3423 return RValue::get(Result);
3424 }
3425 case Builtin::BI__popcnt16:
3426 case Builtin::BI__popcnt:
3427 case Builtin::BI__popcnt64:
3428 case Builtin::BI__builtin_popcount:
3429 case Builtin::BI__builtin_popcountl:
3430 case Builtin::BI__builtin_popcountll:
3431 case Builtin::BI__builtin_popcountg: {
3432 Value *ArgValue = EmitBitCountExpr(*this, E->getArg(0));
3433
3434 llvm::Type *ArgType = ArgValue->getType();
3435 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3436
3437 llvm::Type *ResultType = ConvertType(E->getType());
3438 Value *Result = Builder.CreateCall(F, ArgValue);
3439 if (Result->getType() != ResultType)
3440 Result =
3441 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3442 return RValue::get(Result);
3443 }
3444 case Builtin::BI__builtin_unpredictable: {
3445 // Always return the argument of __builtin_unpredictable. LLVM does not
3446 // handle this builtin. Metadata for this builtin should be added directly
3447 // to instructions such as branches or switches that use it.
3448 return RValue::get(EmitScalarExpr(E->getArg(0)));
3449 }
3450 case Builtin::BI__builtin_expect: {
3451 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3452 llvm::Type *ArgType = ArgValue->getType();
3453
3454 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
3455 // Don't generate llvm.expect on -O0 as the backend won't use it for
3456 // anything.
3457 // Note, we still IRGen ExpectedValue because it could have side-effects.
3458 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3459 return RValue::get(ArgValue);
3460
3461 Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
3462 Value *Result =
3463 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
3464 return RValue::get(Result);
3465 }
3466 case Builtin::BI__builtin_expect_with_probability: {
3467 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3468 llvm::Type *ArgType = ArgValue->getType();
3469
3470 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
3471 llvm::APFloat Probability(0.0);
3472 const Expr *ProbArg = E->getArg(2);
3473 bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
3474 assert(EvalSucceed && "probability should be able to evaluate as float");
3475 (void)EvalSucceed;
3476 bool LoseInfo = false;
3477 Probability.convert(llvm::APFloat::IEEEdouble(),
3478 llvm::RoundingMode::Dynamic, &LoseInfo);
3479 llvm::Type *Ty = ConvertType(ProbArg->getType());
3480 Constant *Confidence = ConstantFP::get(Ty, Probability);
3481 // Don't generate llvm.expect.with.probability on -O0 as the backend
3482 // won't use it for anything.
3483 // Note, we still IRGen ExpectedValue because it could have side-effects.
3484 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3485 return RValue::get(ArgValue);
3486
3487 Function *FnExpect =
3488 CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
3489 Value *Result = Builder.CreateCall(
3490 FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
3491 return RValue::get(Result);
3492 }
3493 case Builtin::BI__builtin_assume_aligned: {
3494 const Expr *Ptr = E->getArg(0);
3495 Value *PtrValue = EmitScalarExpr(Ptr);
3496 Value *OffsetValue =
3497 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
3498
3499 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
3500 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
3501 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
3502 AlignmentCI = ConstantInt::get(AlignmentCI->getIntegerType(),
3503 llvm::Value::MaximumAlignment);
3504
3505 emitAlignmentAssumption(PtrValue, Ptr,
3506 /*The expr loc is sufficient.*/ SourceLocation(),
3507 AlignmentCI, OffsetValue);
3508 return RValue::get(PtrValue);
3509 }
3510 case Builtin::BI__builtin_assume_dereferenceable: {
3511 const Expr *Ptr = E->getArg(0);
3512 const Expr *Size = E->getArg(1);
3513 Value *PtrValue = EmitScalarExpr(Ptr);
3514 Value *SizeValue = EmitScalarExpr(Size);
3515 if (SizeValue->getType() != IntPtrTy)
3516 SizeValue =
3517 Builder.CreateIntCast(SizeValue, IntPtrTy, false, "casted.size");
3518 Builder.CreateDereferenceableAssumption(PtrValue, SizeValue);
3519 return RValue::get(nullptr);
3520 }
3521 case Builtin::BI__assume:
3522 case Builtin::BI__builtin_assume: {
3523 if (E->getArg(0)->HasSideEffects(getContext()))
3524 return RValue::get(nullptr);
3525
3526 Value *ArgValue = EmitCheckedArgForAssume(E->getArg(0));
3527 Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
3528 Builder.CreateCall(FnAssume, ArgValue);
3529 return RValue::get(nullptr);
3530 }
3531 case Builtin::BI__builtin_assume_separate_storage: {
3532 const Expr *Arg0 = E->getArg(0);
3533 const Expr *Arg1 = E->getArg(1);
3534
3535 Value *Value0 = EmitScalarExpr(Arg0);
3536 Value *Value1 = EmitScalarExpr(Arg1);
3537
3538 Value *Values[] = {Value0, Value1};
3539 OperandBundleDefT<Value *> OBD("separate_storage", Values);
3540 Builder.CreateAssumption(ConstantInt::getTrue(getLLVMContext()), {OBD});
3541 return RValue::get(nullptr);
3542 }
3543 case Builtin::BI__builtin_allow_runtime_check: {
3544 StringRef Kind =
3545 cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts())->getString();
3546 LLVMContext &Ctx = CGM.getLLVMContext();
3547 llvm::Value *Allow = Builder.CreateCall(
3548 CGM.getIntrinsic(Intrinsic::allow_runtime_check),
3549 llvm::MetadataAsValue::get(Ctx, llvm::MDString::get(Ctx, Kind)));
3550 return RValue::get(Allow);
3551 }
3552 case Builtin::BI__arithmetic_fence: {
3553 // Create the builtin call if FastMath is selected, and the target
3554 // supports the builtin, otherwise just return the argument.
3555 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3556 llvm::FastMathFlags FMF = Builder.getFastMathFlags();
3557 bool isArithmeticFenceEnabled =
3558 FMF.allowReassoc() &&
3560 QualType ArgType = E->getArg(0)->getType();
3561 if (ArgType->isComplexType()) {
3562 if (isArithmeticFenceEnabled) {
3563 QualType ElementType = ArgType->castAs<ComplexType>()->getElementType();
3564 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3565 Value *Real = Builder.CreateArithmeticFence(ComplexVal.first,
3566 ConvertType(ElementType));
3567 Value *Imag = Builder.CreateArithmeticFence(ComplexVal.second,
3568 ConvertType(ElementType));
3569 return RValue::getComplex(std::make_pair(Real, Imag));
3570 }
3571 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3572 Value *Real = ComplexVal.first;
3573 Value *Imag = ComplexVal.second;
3574 return RValue::getComplex(std::make_pair(Real, Imag));
3575 }
3576 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3577 if (isArithmeticFenceEnabled)
3578 return RValue::get(
3579 Builder.CreateArithmeticFence(ArgValue, ConvertType(ArgType)));
3580 return RValue::get(ArgValue);
3581 }
3582 case Builtin::BI__builtin_bswapg: {
3583 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3584 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(ArgValue->getType());
3585 assert(IntTy && "LLVM's __builtin_bswapg only supports integer variants");
3586 assert(((IntTy->getBitWidth() % 16 == 0 && IntTy->getBitWidth() != 0) ||
3587 IntTy->getBitWidth() == 8) &&
3588 "LLVM's __builtin_bswapg only supports integer variants that has a "
3589 "multiple of 16 bits as well as a single byte");
3590 if (IntTy->getBitWidth() == 8)
3591 return RValue::get(ArgValue);
3592 return RValue::get(
3593 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bswap));
3594 }
3595 case Builtin::BI__builtin_bswap16:
3596 case Builtin::BI__builtin_bswap32:
3597 case Builtin::BI__builtin_bswap64:
3598 case Builtin::BI_byteswap_ushort:
3599 case Builtin::BI_byteswap_ulong:
3600 case Builtin::BI_byteswap_uint64: {
3601 return RValue::get(
3602 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bswap));
3603 }
3604 case Builtin::BI__builtin_bitreverse8:
3605 case Builtin::BI__builtin_bitreverse16:
3606 case Builtin::BI__builtin_bitreverse32:
3607 case Builtin::BI__builtin_bitreverse64: {
3608 return RValue::get(
3609 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bitreverse));
3610 }
3611 case Builtin::BI__builtin_rotateleft8:
3612 case Builtin::BI__builtin_rotateleft16:
3613 case Builtin::BI__builtin_rotateleft32:
3614 case Builtin::BI__builtin_rotateleft64:
3615 case Builtin::BI_rotl8: // Microsoft variants of rotate left
3616 case Builtin::BI_rotl16:
3617 case Builtin::BI_rotl:
3618 case Builtin::BI_lrotl:
3619 case Builtin::BI_rotl64:
3620 return emitRotate(E, false);
3621
3622 case Builtin::BI__builtin_rotateright8:
3623 case Builtin::BI__builtin_rotateright16:
3624 case Builtin::BI__builtin_rotateright32:
3625 case Builtin::BI__builtin_rotateright64:
3626 case Builtin::BI_rotr8: // Microsoft variants of rotate right
3627 case Builtin::BI_rotr16:
3628 case Builtin::BI_rotr:
3629 case Builtin::BI_lrotr:
3630 case Builtin::BI_rotr64:
3631 return emitRotate(E, true);
3632
3633 case Builtin::BI__builtin_constant_p: {
3634 llvm::Type *ResultType = ConvertType(E->getType());
3635
3636 const Expr *Arg = E->getArg(0);
3637 QualType ArgType = Arg->getType();
3638 // FIXME: The allowance for Obj-C pointers and block pointers is historical
3639 // and likely a mistake.
3640 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
3641 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
3642 // Per the GCC documentation, only numeric constants are recognized after
3643 // inlining.
3644 return RValue::get(ConstantInt::get(ResultType, 0));
3645
3646 if (Arg->HasSideEffects(getContext()))
3647 // The argument is unevaluated, so be conservative if it might have
3648 // side-effects.
3649 return RValue::get(ConstantInt::get(ResultType, 0));
3650
3651 Value *ArgValue = EmitScalarExpr(Arg);
3652 if (ArgType->isObjCObjectPointerType()) {
3653 // Convert Objective-C objects to id because we cannot distinguish between
3654 // LLVM types for Obj-C classes as they are opaque.
3655 ArgType = CGM.getContext().getObjCIdType();
3656 ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
3657 }
3658 Function *F =
3659 CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
3660 Value *Result = Builder.CreateCall(F, ArgValue);
3661 if (Result->getType() != ResultType)
3662 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
3663 return RValue::get(Result);
3664 }
3665 case Builtin::BI__builtin_dynamic_object_size:
3666 case Builtin::BI__builtin_object_size: {
3667 unsigned Type =
3668 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
3669 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
3670
3671 // We pass this builtin onto the optimizer so that it can figure out the
3672 // object size in more complex cases.
3673 bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
3674 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
3675 /*EmittedE=*/nullptr, IsDynamic));
3676 }
3677 case Builtin::BI__builtin_counted_by_ref: {
3678 // Default to returning '(void *) 0'.
3679 llvm::Value *Result = llvm::ConstantPointerNull::get(
3680 llvm::PointerType::getUnqual(getLLVMContext()));
3681
3682 const Expr *Arg = E->getArg(0)->IgnoreParenImpCasts();
3683
3684 if (auto *UO = dyn_cast<UnaryOperator>(Arg);
3685 UO && UO->getOpcode() == UO_AddrOf) {
3686 Arg = UO->getSubExpr()->IgnoreParenImpCasts();
3687
3688 if (auto *ASE = dyn_cast<ArraySubscriptExpr>(Arg))
3689 Arg = ASE->getBase()->IgnoreParenImpCasts();
3690 }
3691
3692 if (const MemberExpr *ME = dyn_cast_if_present<MemberExpr>(Arg)) {
3693 if (auto *CATy =
3695 CATy && CATy->getKind() == CountAttributedType::CountedBy) {
3696 const auto *FAMDecl = cast<FieldDecl>(ME->getMemberDecl());
3697 if (const FieldDecl *CountFD = FAMDecl->findCountedByField())
3698 Result = GetCountedByFieldExprGEP(Arg, FAMDecl, CountFD);
3699 else
3700 llvm::report_fatal_error("Cannot find the counted_by 'count' field");
3701 }
3702 }
3703
3704 return RValue::get(Result);
3705 }
3706 case Builtin::BI__builtin_prefetch: {
3707 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
3708 // FIXME: Technically these constants should of type 'int', yes?
3709 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
3710 llvm::ConstantInt::get(Int32Ty, 0);
3711 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
3712 llvm::ConstantInt::get(Int32Ty, 3);
3713 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
3714 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
3715 Builder.CreateCall(F, {Address, RW, Locality, Data});
3716 return RValue::get(nullptr);
3717 }
3718 case Builtin::BI__builtin_readcyclecounter: {
3719 Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
3720 return RValue::get(Builder.CreateCall(F));
3721 }
3722 case Builtin::BI__builtin_readsteadycounter: {
3723 Function *F = CGM.getIntrinsic(Intrinsic::readsteadycounter);
3724 return RValue::get(Builder.CreateCall(F));
3725 }
3726 case Builtin::BI__builtin___clear_cache: {
3727 Value *Begin = EmitScalarExpr(E->getArg(0));
3728 Value *End = EmitScalarExpr(E->getArg(1));
3729 Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
3730 return RValue::get(Builder.CreateCall(F, {Begin, End}));
3731 }
3732 case Builtin::BI__builtin_trap:
3733 EmitTrapCall(Intrinsic::trap);
3734 return RValue::get(nullptr);
3735 case Builtin::BI__builtin_verbose_trap: {
3736 llvm::DILocation *TrapLocation = Builder.getCurrentDebugLocation();
3737 if (getDebugInfo()) {
3738 TrapLocation = getDebugInfo()->CreateTrapFailureMessageFor(
3739 TrapLocation, *E->getArg(0)->tryEvaluateString(getContext()),
3741 }
3742 ApplyDebugLocation ApplyTrapDI(*this, TrapLocation);
3743 // Currently no attempt is made to prevent traps from being merged.
3744 EmitTrapCall(Intrinsic::trap);
3745 return RValue::get(nullptr);
3746 }
3747 case Builtin::BI__debugbreak:
3748 EmitTrapCall(Intrinsic::debugtrap);
3749 return RValue::get(nullptr);
3750 case Builtin::BI__builtin_unreachable: {
3752
3753 // We do need to preserve an insertion point.
3754 EmitBlock(createBasicBlock("unreachable.cont"));
3755
3756 return RValue::get(nullptr);
3757 }
3758
3759 case Builtin::BI__builtin_powi:
3760 case Builtin::BI__builtin_powif:
3761 case Builtin::BI__builtin_powil: {
3762 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
3763 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
3764
3765 if (Builder.getIsFPConstrained()) {
3766 // FIXME: llvm.powi has 2 mangling types,
3767 // llvm.experimental.constrained.powi has one.
3768 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3769 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi,
3770 Src0->getType());
3771 return RValue::get(Builder.CreateConstrainedFPCall(F, { Src0, Src1 }));
3772 }
3773
3774 Function *F = CGM.getIntrinsic(Intrinsic::powi,
3775 { Src0->getType(), Src1->getType() });
3776 return RValue::get(Builder.CreateCall(F, { Src0, Src1 }));
3777 }
3778 case Builtin::BI__builtin_frexpl: {
3779 // Linux PPC will not be adding additional PPCDoubleDouble support.
3780 // WIP to switch default to IEEE long double. Will emit libcall for
3781 // frexpl instead of legalizing this type in the BE.
3782 if (&getTarget().getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble())
3783 break;
3784 [[fallthrough]];
3785 }
3786 case Builtin::BI__builtin_frexp:
3787 case Builtin::BI__builtin_frexpf:
3788 case Builtin::BI__builtin_frexpf128:
3789 case Builtin::BI__builtin_frexpf16:
3790 return RValue::get(emitFrexpBuiltin(*this, E, Intrinsic::frexp));
3791 case Builtin::BImodf:
3792 case Builtin::BImodff:
3793 case Builtin::BImodfl:
3794 case Builtin::BI__builtin_modf:
3795 case Builtin::BI__builtin_modff:
3796 case Builtin::BI__builtin_modfl:
3797 if (Builder.getIsFPConstrained())
3798 break; // TODO: Emit constrained modf intrinsic once one exists.
3799 return RValue::get(emitModfBuiltin(*this, E, Intrinsic::modf));
3800 case Builtin::BI__builtin_isgreater:
3801 case Builtin::BI__builtin_isgreaterequal:
3802 case Builtin::BI__builtin_isless:
3803 case Builtin::BI__builtin_islessequal:
3804 case Builtin::BI__builtin_islessgreater:
3805 case Builtin::BI__builtin_isunordered: {
3806 // Ordered comparisons: we know the arguments to these are matching scalar
3807 // floating point values.
3808 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3809 Value *LHS = EmitScalarExpr(E->getArg(0));
3810 Value *RHS = EmitScalarExpr(E->getArg(1));
3811
3812 switch (BuiltinID) {
3813 default: llvm_unreachable("Unknown ordered comparison");
3814 case Builtin::BI__builtin_isgreater:
3815 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
3816 break;
3817 case Builtin::BI__builtin_isgreaterequal:
3818 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
3819 break;
3820 case Builtin::BI__builtin_isless:
3821 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
3822 break;
3823 case Builtin::BI__builtin_islessequal:
3824 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
3825 break;
3826 case Builtin::BI__builtin_islessgreater:
3827 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
3828 break;
3829 case Builtin::BI__builtin_isunordered:
3830 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
3831 break;
3832 }
3833 // ZExt bool to int type.
3834 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
3835 }
3836
3837 case Builtin::BI__builtin_isnan: {
3838 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3839 Value *V = EmitScalarExpr(E->getArg(0));
3840 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3841 return RValue::get(Result);
3842 return RValue::get(
3843 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNan),
3844 ConvertType(E->getType())));
3845 }
3846
3847 case Builtin::BI__builtin_issignaling: {
3848 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3849 Value *V = EmitScalarExpr(E->getArg(0));
3850 return RValue::get(
3851 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSNan),
3852 ConvertType(E->getType())));
3853 }
3854
3855 case Builtin::BI__builtin_isinf: {
3856 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3857 Value *V = EmitScalarExpr(E->getArg(0));
3858 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3859 return RValue::get(Result);
3860 return RValue::get(
3861 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcInf),
3862 ConvertType(E->getType())));
3863 }
3864
3865 case Builtin::BIfinite:
3866 case Builtin::BI__finite:
3867 case Builtin::BIfinitef:
3868 case Builtin::BI__finitef:
3869 case Builtin::BIfinitel:
3870 case Builtin::BI__finitel:
3871 case Builtin::BI__builtin_isfinite: {
3872 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3873 Value *V = EmitScalarExpr(E->getArg(0));
3874 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3875 return RValue::get(Result);
3876 return RValue::get(
3877 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcFinite),
3878 ConvertType(E->getType())));
3879 }
3880
3881 case Builtin::BI__builtin_isnormal: {
3882 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3883 Value *V = EmitScalarExpr(E->getArg(0));
3884 return RValue::get(
3885 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNormal),
3886 ConvertType(E->getType())));
3887 }
3888
3889 case Builtin::BI__builtin_issubnormal: {
3890 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3891 Value *V = EmitScalarExpr(E->getArg(0));
3892 return RValue::get(
3893 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSubnormal),
3894 ConvertType(E->getType())));
3895 }
3896
3897 case Builtin::BI__builtin_iszero: {
3898 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3899 Value *V = EmitScalarExpr(E->getArg(0));
3900 return RValue::get(
3901 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcZero),
3902 ConvertType(E->getType())));
3903 }
3904
3905 case Builtin::BI__builtin_isfpclass: {
3907 if (!E->getArg(1)->EvaluateAsInt(Result, CGM.getContext()))
3908 break;
3909 uint64_t Test = Result.Val.getInt().getLimitedValue();
3910 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3911 Value *V = EmitScalarExpr(E->getArg(0));
3912 return RValue::get(Builder.CreateZExt(Builder.createIsFPClass(V, Test),
3913 ConvertType(E->getType())));
3914 }
3915
3916 case Builtin::BI__builtin_nondeterministic_value: {
3917 llvm::Type *Ty = ConvertType(E->getArg(0)->getType());
3918
3919 Value *Result = PoisonValue::get(Ty);
3920 Result = Builder.CreateFreeze(Result);
3921
3922 return RValue::get(Result);
3923 }
3924
3925 case Builtin::BI__builtin_elementwise_abs: {
3926 Value *Result;
3927 QualType QT = E->getArg(0)->getType();
3928
3929 if (auto *VecTy = QT->getAs<VectorType>())
3930 QT = VecTy->getElementType();
3931 if (QT->isIntegerType())
3932 Result = Builder.CreateBinaryIntrinsic(
3933 Intrinsic::abs, EmitScalarExpr(E->getArg(0)), Builder.getFalse(),
3934 nullptr, "elt.abs");
3935 else
3936 Result = emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::fabs,
3937 "elt.abs");
3938
3939 return RValue::get(Result);
3940 }
3941 case Builtin::BI__builtin_elementwise_bitreverse:
3943 *this, E, Intrinsic::bitreverse, "elt.bitreverse"));
3944 case Builtin::BI__builtin_elementwise_popcount:
3946 *this, E, Intrinsic::ctpop, "elt.ctpop"));
3947 case Builtin::BI__builtin_elementwise_canonicalize:
3949 *this, E, Intrinsic::canonicalize, "elt.canonicalize"));
3950 case Builtin::BI__builtin_elementwise_copysign:
3951 return RValue::get(
3952 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::copysign));
3953 case Builtin::BI__builtin_elementwise_fshl:
3954 return RValue::get(
3955 emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fshl));
3956 case Builtin::BI__builtin_elementwise_fshr:
3957 return RValue::get(
3958 emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fshr));
3959
3960 case Builtin::BI__builtin_elementwise_add_sat:
3961 case Builtin::BI__builtin_elementwise_sub_sat: {
3962 Value *Op0 = EmitScalarExpr(E->getArg(0));
3963 Value *Op1 = EmitScalarExpr(E->getArg(1));
3964 Value *Result;
3965 assert(Op0->getType()->isIntOrIntVectorTy() && "integer type expected");
3966 QualType Ty = E->getArg(0)->getType();
3967 if (auto *VecTy = Ty->getAs<VectorType>())
3968 Ty = VecTy->getElementType();
3969 bool IsSigned = Ty->isSignedIntegerType();
3970 unsigned Opc;
3971 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_add_sat)
3972 Opc = IsSigned ? Intrinsic::sadd_sat : Intrinsic::uadd_sat;
3973 else
3974 Opc = IsSigned ? Intrinsic::ssub_sat : Intrinsic::usub_sat;
3975 Result = Builder.CreateBinaryIntrinsic(Opc, Op0, Op1, nullptr, "elt.sat");
3976 return RValue::get(Result);
3977 }
3978
3979 case Builtin::BI__builtin_elementwise_max: {
3980 Value *Op0 = EmitScalarExpr(E->getArg(0));
3981 Value *Op1 = EmitScalarExpr(E->getArg(1));
3982 Value *Result;
3983 if (Op0->getType()->isIntOrIntVectorTy()) {
3984 QualType Ty = E->getArg(0)->getType();
3985 if (auto *VecTy = Ty->getAs<VectorType>())
3986 Ty = VecTy->getElementType();
3987 Result = Builder.CreateBinaryIntrinsic(
3988 Ty->isSignedIntegerType() ? Intrinsic::smax : Intrinsic::umax, Op0,
3989 Op1, nullptr, "elt.max");
3990 } else
3991 Result = Builder.CreateMaxNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.max");
3992 return RValue::get(Result);
3993 }
3994 case Builtin::BI__builtin_elementwise_min: {
3995 Value *Op0 = EmitScalarExpr(E->getArg(0));
3996 Value *Op1 = EmitScalarExpr(E->getArg(1));
3997 Value *Result;
3998 if (Op0->getType()->isIntOrIntVectorTy()) {
3999 QualType Ty = E->getArg(0)->getType();
4000 if (auto *VecTy = Ty->getAs<VectorType>())
4001 Ty = VecTy->getElementType();
4002 Result = Builder.CreateBinaryIntrinsic(
4003 Ty->isSignedIntegerType() ? Intrinsic::smin : Intrinsic::umin, Op0,
4004 Op1, nullptr, "elt.min");
4005 } else
4006 Result = Builder.CreateMinNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.min");
4007 return RValue::get(Result);
4008 }
4009
4010 case Builtin::BI__builtin_elementwise_maxnum: {
4011 Value *Op0 = EmitScalarExpr(E->getArg(0));
4012 Value *Op1 = EmitScalarExpr(E->getArg(1));
4013 Value *Result = Builder.CreateBinaryIntrinsic(llvm::Intrinsic::maxnum, Op0,
4014 Op1, nullptr, "elt.maxnum");
4015 return RValue::get(Result);
4016 }
4017
4018 case Builtin::BI__builtin_elementwise_minnum: {
4019 Value *Op0 = EmitScalarExpr(E->getArg(0));
4020 Value *Op1 = EmitScalarExpr(E->getArg(1));
4021 Value *Result = Builder.CreateBinaryIntrinsic(llvm::Intrinsic::minnum, Op0,
4022 Op1, nullptr, "elt.minnum");
4023 return RValue::get(Result);
4024 }
4025
4026 case Builtin::BI__builtin_elementwise_maximum: {
4027 Value *Op0 = EmitScalarExpr(E->getArg(0));
4028 Value *Op1 = EmitScalarExpr(E->getArg(1));
4029 Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::maximum, Op0, Op1,
4030 nullptr, "elt.maximum");
4031 return RValue::get(Result);
4032 }
4033
4034 case Builtin::BI__builtin_elementwise_minimum: {
4035 Value *Op0 = EmitScalarExpr(E->getArg(0));
4036 Value *Op1 = EmitScalarExpr(E->getArg(1));
4037 Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::minimum, Op0, Op1,
4038 nullptr, "elt.minimum");
4039 return RValue::get(Result);
4040 }
4041
4042 case Builtin::BI__builtin_elementwise_maximumnum: {
4043 Value *Op0 = EmitScalarExpr(E->getArg(0));
4044 Value *Op1 = EmitScalarExpr(E->getArg(1));
4045 Value *Result = Builder.CreateBinaryIntrinsic(
4046 Intrinsic::maximumnum, Op0, Op1, nullptr, "elt.maximumnum");
4047 return RValue::get(Result);
4048 }
4049
4050 case Builtin::BI__builtin_elementwise_minimumnum: {
4051 Value *Op0 = EmitScalarExpr(E->getArg(0));
4052 Value *Op1 = EmitScalarExpr(E->getArg(1));
4053 Value *Result = Builder.CreateBinaryIntrinsic(
4054 Intrinsic::minimumnum, Op0, Op1, nullptr, "elt.minimumnum");
4055 return RValue::get(Result);
4056 }
4057
4058 case Builtin::BI__builtin_reduce_max: {
4059 auto GetIntrinsicID = [this](QualType QT) {
4060 if (auto *VecTy = QT->getAs<VectorType>())
4061 QT = VecTy->getElementType();
4062 else if (QT->isSizelessVectorType())
4063 QT = QT->getSizelessVectorEltType(CGM.getContext());
4064
4065 if (QT->isSignedIntegerType())
4066 return Intrinsic::vector_reduce_smax;
4067 if (QT->isUnsignedIntegerType())
4068 return Intrinsic::vector_reduce_umax;
4069 assert(QT->isFloatingType() && "must have a float here");
4070 return Intrinsic::vector_reduce_fmax;
4071 };
4073 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
4074 }
4075
4076 case Builtin::BI__builtin_reduce_min: {
4077 auto GetIntrinsicID = [this](QualType QT) {
4078 if (auto *VecTy = QT->getAs<VectorType>())
4079 QT = VecTy->getElementType();
4080 else if (QT->isSizelessVectorType())
4081 QT = QT->getSizelessVectorEltType(CGM.getContext());
4082
4083 if (QT->isSignedIntegerType())
4084 return Intrinsic::vector_reduce_smin;
4085 if (QT->isUnsignedIntegerType())
4086 return Intrinsic::vector_reduce_umin;
4087 assert(QT->isFloatingType() && "must have a float here");
4088 return Intrinsic::vector_reduce_fmin;
4089 };
4090
4092 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
4093 }
4094
4095 case Builtin::BI__builtin_reduce_add:
4097 *this, E, Intrinsic::vector_reduce_add, "rdx.add"));
4098 case Builtin::BI__builtin_reduce_mul:
4100 *this, E, Intrinsic::vector_reduce_mul, "rdx.mul"));
4101 case Builtin::BI__builtin_reduce_xor:
4103 *this, E, Intrinsic::vector_reduce_xor, "rdx.xor"));
4104 case Builtin::BI__builtin_reduce_or:
4106 *this, E, Intrinsic::vector_reduce_or, "rdx.or"));
4107 case Builtin::BI__builtin_reduce_and:
4109 *this, E, Intrinsic::vector_reduce_and, "rdx.and"));
4110 case Builtin::BI__builtin_reduce_maximum:
4112 *this, E, Intrinsic::vector_reduce_fmaximum, "rdx.maximum"));
4113 case Builtin::BI__builtin_reduce_minimum:
4115 *this, E, Intrinsic::vector_reduce_fminimum, "rdx.minimum"));
4116
4117 case Builtin::BI__builtin_matrix_transpose: {
4118 auto *MatrixTy = E->getArg(0)->getType()->castAs<ConstantMatrixType>();
4119 Value *MatValue = EmitScalarExpr(E->getArg(0));
4120 MatrixBuilder MB(Builder);
4121 Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
4122 MatrixTy->getNumColumns());
4123 return RValue::get(Result);
4124 }
4125
4126 case Builtin::BI__builtin_matrix_column_major_load: {
4127 MatrixBuilder MB(Builder);
4128 // Emit everything that isn't dependent on the first parameter type
4129 Value *Stride = EmitScalarExpr(E->getArg(3));
4130 const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
4131 auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
4132 assert(PtrTy && "arg0 must be of pointer type");
4133 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
4134
4137 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4138 0);
4139 Value *Result = MB.CreateColumnMajorLoad(
4140 Src.getElementType(), Src.emitRawPointer(*this),
4141 Align(Src.getAlignment().getQuantity()), Stride, IsVolatile,
4142 ResultTy->getNumRows(), ResultTy->getNumColumns(), "matrix");
4143 return RValue::get(Result);
4144 }
4145
4146 case Builtin::BI__builtin_matrix_column_major_store: {
4147 MatrixBuilder MB(Builder);
4148 Value *Matrix = EmitScalarExpr(E->getArg(0));
4150 Value *Stride = EmitScalarExpr(E->getArg(2));
4151
4152 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
4153 auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
4154 assert(PtrTy && "arg1 must be of pointer type");
4155 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
4156
4158 E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
4159 0);
4160 Value *Result = MB.CreateColumnMajorStore(
4161 Matrix, Dst.emitRawPointer(*this),
4162 Align(Dst.getAlignment().getQuantity()), Stride, IsVolatile,
4163 MatrixTy->getNumRows(), MatrixTy->getNumColumns());
4165 return RValue::get(Result);
4166 }
4167
4168 case Builtin::BI__builtin_masked_load:
4169 case Builtin::BI__builtin_masked_expand_load: {
4170 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4171 llvm::Value *Ptr = EmitScalarExpr(E->getArg(1));
4172
4173 llvm::Type *RetTy = CGM.getTypes().ConvertType(E->getType());
4174 llvm::Value *PassThru = llvm::PoisonValue::get(RetTy);
4175 if (E->getNumArgs() > 2)
4176 PassThru = EmitScalarExpr(E->getArg(2));
4177
4178 CharUnits Align = CGM.getNaturalTypeAlignment(
4179 E->getType()->getAs<VectorType>()->getElementType(), nullptr);
4180
4181 llvm::Value *Result;
4182 if (BuiltinID == Builtin::BI__builtin_masked_load) {
4183 Result = Builder.CreateMaskedLoad(RetTy, Ptr, Align.getAsAlign(), Mask,
4184 PassThru, "masked_load");
4185 } else {
4186 Function *F = CGM.getIntrinsic(Intrinsic::masked_expandload, {RetTy});
4187 Result =
4188 Builder.CreateCall(F, {Ptr, Mask, PassThru}, "masked_expand_load");
4189 }
4190 return RValue::get(Result);
4191 };
4192 case Builtin::BI__builtin_masked_gather: {
4193 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4194 llvm::Value *Idx = EmitScalarExpr(E->getArg(1));
4195 llvm::Value *Ptr = EmitScalarExpr(E->getArg(2));
4196
4197 llvm::Type *RetTy = CGM.getTypes().ConvertType(E->getType());
4198 CharUnits Align = CGM.getNaturalTypeAlignment(
4199 E->getType()->getAs<VectorType>()->getElementType(), nullptr);
4200
4201 llvm::Value *PassThru = llvm::PoisonValue::get(RetTy);
4202 if (E->getNumArgs() > 3)
4203 PassThru = EmitScalarExpr(E->getArg(3));
4204
4205 llvm::Type *ElemTy = CGM.getTypes().ConvertType(
4207 llvm::Value *PtrVec = Builder.CreateGEP(ElemTy, Ptr, Idx);
4208
4209 llvm::Value *Result = Builder.CreateMaskedGather(
4210 RetTy, PtrVec, Align.getAsAlign(), Mask, PassThru, "masked_gather");
4211 return RValue::get(Result);
4212 }
4213 case Builtin::BI__builtin_masked_store:
4214 case Builtin::BI__builtin_masked_compress_store: {
4215 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4216 llvm::Value *Val = EmitScalarExpr(E->getArg(1));
4217 llvm::Value *Ptr = EmitScalarExpr(E->getArg(2));
4218
4219 QualType ValTy = E->getArg(1)->getType();
4220 llvm::Type *ValLLTy = CGM.getTypes().ConvertType(ValTy);
4221
4222 CharUnits Align = CGM.getNaturalTypeAlignment(
4224 nullptr);
4225
4226 if (BuiltinID == Builtin::BI__builtin_masked_store) {
4227 Builder.CreateMaskedStore(Val, Ptr, Align.getAsAlign(), Mask);
4228 } else {
4229 llvm::Function *F =
4230 CGM.getIntrinsic(llvm::Intrinsic::masked_compressstore, {ValLLTy});
4231 Builder.CreateCall(F, {Val, Ptr, Mask});
4232 }
4233 return RValue::get(nullptr);
4234 }
4235 case Builtin::BI__builtin_masked_scatter: {
4236 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4237 llvm::Value *Idx = EmitScalarExpr(E->getArg(1));
4238 llvm::Value *Val = EmitScalarExpr(E->getArg(2));
4239 llvm::Value *Ptr = EmitScalarExpr(E->getArg(3));
4240
4241 CharUnits Align = CGM.getNaturalTypeAlignment(
4243 nullptr);
4244
4245 llvm::Type *ElemTy = CGM.getTypes().ConvertType(
4246 E->getArg(1)->getType()->getAs<VectorType>()->getElementType());
4247 llvm::Value *PtrVec = Builder.CreateGEP(ElemTy, Ptr, Idx);
4248
4249 Builder.CreateMaskedScatter(Val, PtrVec, Align.getAsAlign(), Mask);
4250 return RValue();
4251 }
4252 case Builtin::BI__builtin_isinf_sign: {
4253 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
4254 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4255 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
4256 Value *Arg = EmitScalarExpr(E->getArg(0));
4257 Value *AbsArg = EmitFAbs(*this, Arg);
4258 Value *IsInf = Builder.CreateFCmpOEQ(
4259 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
4260 Value *IsNeg = EmitSignBit(*this, Arg);
4261
4262 llvm::Type *IntTy = ConvertType(E->getType());
4263 Value *Zero = Constant::getNullValue(IntTy);
4264 Value *One = ConstantInt::get(IntTy, 1);
4265 Value *NegativeOne = ConstantInt::get(IntTy, -1);
4266 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
4267 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
4268 return RValue::get(Result);
4269 }
4270
4271 case Builtin::BI__builtin_flt_rounds: {
4272 Function *F = CGM.getIntrinsic(Intrinsic::get_rounding);
4273
4274 llvm::Type *ResultType = ConvertType(E->getType());
4275 Value *Result = Builder.CreateCall(F);
4276 if (Result->getType() != ResultType)
4277 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
4278 "cast");
4279 return RValue::get(Result);
4280 }
4281
4282 case Builtin::BI__builtin_set_flt_rounds: {
4283 Function *F = CGM.getIntrinsic(Intrinsic::set_rounding);
4284
4285 Value *V = EmitScalarExpr(E->getArg(0));
4286 Builder.CreateCall(F, V);
4287 return RValue::get(nullptr);
4288 }
4289
4290 case Builtin::BI__builtin_fpclassify: {
4291 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4292 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
4293 Value *V = EmitScalarExpr(E->getArg(5));
4294 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
4295
4296 // Create Result
4297 BasicBlock *Begin = Builder.GetInsertBlock();
4298 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
4299 Builder.SetInsertPoint(End);
4300 PHINode *Result =
4301 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
4302 "fpclassify_result");
4303
4304 // if (V==0) return FP_ZERO
4305 Builder.SetInsertPoint(Begin);
4306 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
4307 "iszero");
4308 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
4309 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
4310 Builder.CreateCondBr(IsZero, End, NotZero);
4311 Result->addIncoming(ZeroLiteral, Begin);
4312
4313 // if (V != V) return FP_NAN
4314 Builder.SetInsertPoint(NotZero);
4315 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
4316 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
4317 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
4318 Builder.CreateCondBr(IsNan, End, NotNan);
4319 Result->addIncoming(NanLiteral, NotZero);
4320
4321 // if (fabs(V) == infinity) return FP_INFINITY
4322 Builder.SetInsertPoint(NotNan);
4323 Value *VAbs = EmitFAbs(*this, V);
4324 Value *IsInf =
4325 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
4326 "isinf");
4327 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
4328 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
4329 Builder.CreateCondBr(IsInf, End, NotInf);
4330 Result->addIncoming(InfLiteral, NotNan);
4331
4332 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
4333 Builder.SetInsertPoint(NotInf);
4334 APFloat Smallest = APFloat::getSmallestNormalized(
4335 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
4336 Value *IsNormal =
4337 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
4338 "isnormal");
4339 Value *NormalResult =
4340 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
4341 EmitScalarExpr(E->getArg(3)));
4342 Builder.CreateBr(End);
4343 Result->addIncoming(NormalResult, NotInf);
4344
4345 // return Result
4346 Builder.SetInsertPoint(End);
4347 return RValue::get(Result);
4348 }
4349
4350 // An alloca will always return a pointer to the alloca (stack) address
4351 // space. This address space need not be the same as the AST / Language
4352 // default (e.g. in C / C++ auto vars are in the generic address space). At
4353 // the AST level this is handled within CreateTempAlloca et al., but for the
4354 // builtin / dynamic alloca we have to handle it here. We use an explicit cast
4355 // instead of passing an AS to CreateAlloca so as to not inhibit optimisation.
4356 case Builtin::BIalloca:
4357 case Builtin::BI_alloca:
4358 case Builtin::BI__builtin_alloca_uninitialized:
4359 case Builtin::BI__builtin_alloca: {
4360 Value *Size = EmitScalarExpr(E->getArg(0));
4361 const TargetInfo &TI = getContext().getTargetInfo();
4362 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
4363 const Align SuitableAlignmentInBytes =
4364 CGM.getContext()
4365 .toCharUnitsFromBits(TI.getSuitableAlign())
4366 .getAsAlign();
4367 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
4368 AI->setAlignment(SuitableAlignmentInBytes);
4369 if (BuiltinID != Builtin::BI__builtin_alloca_uninitialized)
4370 initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
4373 if (AAS != EAS) {
4374 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
4375 return RValue::get(
4376 getTargetHooks().performAddrSpaceCast(*this, AI, AAS, Ty));
4377 }
4378 return RValue::get(AI);
4379 }
4380
4381 case Builtin::BI__builtin_alloca_with_align_uninitialized:
4382 case Builtin::BI__builtin_alloca_with_align: {
4383 Value *Size = EmitScalarExpr(E->getArg(0));
4384 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
4385 auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
4386 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
4387 const Align AlignmentInBytes =
4388 CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
4389 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
4390 AI->setAlignment(AlignmentInBytes);
4391 if (BuiltinID != Builtin::BI__builtin_alloca_with_align_uninitialized)
4392 initializeAlloca(*this, AI, Size, AlignmentInBytes);
4395 if (AAS != EAS) {
4396 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
4397 return RValue::get(
4398 getTargetHooks().performAddrSpaceCast(*this, AI, AAS, Ty));
4399 }
4400 return RValue::get(AI);
4401 }
4402
4403 case Builtin::BI__builtin_infer_alloc_token: {
4404 llvm::MDNode *MDN = buildAllocToken(E);
4405 llvm::Value *MDV = MetadataAsValue::get(getLLVMContext(), MDN);
4406 llvm::Function *F =
4407 CGM.getIntrinsic(llvm::Intrinsic::alloc_token_id, {IntPtrTy});
4408 llvm::CallBase *TokenID = Builder.CreateCall(F, MDV);
4409 return RValue::get(TokenID);
4410 }
4411
4412 case Builtin::BIbzero:
4413 case Builtin::BI__builtin_bzero: {
4415 Value *SizeVal = EmitScalarExpr(E->getArg(1));
4416 EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
4417 E->getArg(0)->getExprLoc(), FD, 0);
4418 auto *I = Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
4419 addInstToNewSourceAtom(I, nullptr);
4420 return RValue::get(nullptr);
4421 }
4422
4423 case Builtin::BIbcopy:
4424 case Builtin::BI__builtin_bcopy: {
4427 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4429 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4430 0);
4432 E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
4433 0);
4434 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4435 addInstToNewSourceAtom(I, nullptr);
4436 return RValue::get(nullptr);
4437 }
4438
4439 case Builtin::BImemcpy:
4440 case Builtin::BI__builtin_memcpy:
4441 case Builtin::BImempcpy:
4442 case Builtin::BI__builtin_mempcpy: {
4445 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4446 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4447 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4448 auto *I = Builder.CreateMemCpy(Dest, Src, SizeVal, false);
4449 addInstToNewSourceAtom(I, nullptr);
4450 if (BuiltinID == Builtin::BImempcpy ||
4451 BuiltinID == Builtin::BI__builtin_mempcpy)
4452 return RValue::get(Builder.CreateInBoundsGEP(
4453 Dest.getElementType(), Dest.emitRawPointer(*this), SizeVal));
4454 else
4455 return RValue::get(Dest, *this);
4456 }
4457
4458 case Builtin::BI__builtin_memcpy_inline: {
4461 uint64_t Size =
4462 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
4463 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4464 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4465 auto *I = Builder.CreateMemCpyInline(Dest, Src, Size);
4466 addInstToNewSourceAtom(I, nullptr);
4467 return RValue::get(nullptr);
4468 }
4469
4470 case Builtin::BI__builtin_char_memchr:
4471 BuiltinID = Builtin::BI__builtin_memchr;
4472 break;
4473
4474 case Builtin::BI__builtin___memcpy_chk: {
4475 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
4476 Expr::EvalResult SizeResult, DstSizeResult;
4477 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4478 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4479 break;
4480 llvm::APSInt Size = SizeResult.Val.getInt();
4481 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4482 if (Size.ugt(DstSize))
4483 break;
4486 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4487 auto *I = Builder.CreateMemCpy(Dest, Src, SizeVal, false);
4488 addInstToNewSourceAtom(I, nullptr);
4489 return RValue::get(Dest, *this);
4490 }
4491
4492 case Builtin::BI__builtin_objc_memmove_collectable: {
4493 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
4494 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
4495 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4496 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
4497 DestAddr, SrcAddr, SizeVal);
4498 return RValue::get(DestAddr, *this);
4499 }
4500
4501 case Builtin::BI__builtin___memmove_chk: {
4502 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
4503 Expr::EvalResult SizeResult, DstSizeResult;
4504 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4505 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4506 break;
4507 llvm::APSInt Size = SizeResult.Val.getInt();
4508 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4509 if (Size.ugt(DstSize))
4510 break;
4513 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4514 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4515 addInstToNewSourceAtom(I, nullptr);
4516 return RValue::get(Dest, *this);
4517 }
4518
4519 case Builtin::BI__builtin_trivially_relocate:
4520 case Builtin::BImemmove:
4521 case Builtin::BI__builtin_memmove: {
4524 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4525 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_trivially_relocate)
4526 SizeVal = Builder.CreateMul(
4527 SizeVal,
4528 ConstantInt::get(
4529 SizeVal->getType(),
4530 getContext()
4531 .getTypeSizeInChars(E->getArg(0)->getType()->getPointeeType())
4532 .getQuantity()));
4533 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4534 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4535 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4536 addInstToNewSourceAtom(I, nullptr);
4537 return RValue::get(Dest, *this);
4538 }
4539 case Builtin::BImemset:
4540 case Builtin::BI__builtin_memset: {
4542 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
4543 Builder.getInt8Ty());
4544 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4545 EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
4546 E->getArg(0)->getExprLoc(), FD, 0);
4547 auto *I = Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
4548 addInstToNewSourceAtom(I, ByteVal);
4549 return RValue::get(Dest, *this);
4550 }
4551 case Builtin::BI__builtin_memset_inline: {
4553 Value *ByteVal =
4554 Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty());
4555 uint64_t Size =
4556 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
4558 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4559 0);
4560 auto *I = Builder.CreateMemSetInline(Dest, ByteVal, Size);
4561 addInstToNewSourceAtom(I, nullptr);
4562 return RValue::get(nullptr);
4563 }
4564 case Builtin::BI__builtin___memset_chk: {
4565 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
4566 Expr::EvalResult SizeResult, DstSizeResult;
4567 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4568 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4569 break;
4570 llvm::APSInt Size = SizeResult.Val.getInt();
4571 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4572 if (Size.ugt(DstSize))
4573 break;
4575 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
4576 Builder.getInt8Ty());
4577 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4578 auto *I = Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
4579 addInstToNewSourceAtom(I, nullptr);
4580 return RValue::get(Dest, *this);
4581 }
4582 case Builtin::BI__builtin_wmemchr: {
4583 // The MSVC runtime library does not provide a definition of wmemchr, so we
4584 // need an inline implementation.
4585 if (!getTarget().getTriple().isOSMSVCRT())
4586 break;
4587
4588 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
4589 Value *Str = EmitScalarExpr(E->getArg(0));
4590 Value *Chr = EmitScalarExpr(E->getArg(1));
4591 Value *Size = EmitScalarExpr(E->getArg(2));
4592
4593 BasicBlock *Entry = Builder.GetInsertBlock();
4594 BasicBlock *CmpEq = createBasicBlock("wmemchr.eq");
4595 BasicBlock *Next = createBasicBlock("wmemchr.next");
4596 BasicBlock *Exit = createBasicBlock("wmemchr.exit");
4597 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
4598 Builder.CreateCondBr(SizeEq0, Exit, CmpEq);
4599
4600 EmitBlock(CmpEq);
4601 PHINode *StrPhi = Builder.CreatePHI(Str->getType(), 2);
4602 StrPhi->addIncoming(Str, Entry);
4603 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
4604 SizePhi->addIncoming(Size, Entry);
4605 CharUnits WCharAlign =
4607 Value *StrCh = Builder.CreateAlignedLoad(WCharTy, StrPhi, WCharAlign);
4608 Value *FoundChr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 0);
4609 Value *StrEqChr = Builder.CreateICmpEQ(StrCh, Chr);
4610 Builder.CreateCondBr(StrEqChr, Exit, Next);
4611
4612 EmitBlock(Next);
4613 Value *NextStr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 1);
4614 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
4615 Value *NextSizeEq0 =
4616 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
4617 Builder.CreateCondBr(NextSizeEq0, Exit, CmpEq);
4618 StrPhi->addIncoming(NextStr, Next);
4619 SizePhi->addIncoming(NextSize, Next);
4620
4621 EmitBlock(Exit);
4622 PHINode *Ret = Builder.CreatePHI(Str->getType(), 3);
4623 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Entry);
4624 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Next);
4625 Ret->addIncoming(FoundChr, CmpEq);
4626 return RValue::get(Ret);
4627 }
4628 case Builtin::BI__builtin_wmemcmp: {
4629 // The MSVC runtime library does not provide a definition of wmemcmp, so we
4630 // need an inline implementation.
4631 if (!getTarget().getTriple().isOSMSVCRT())
4632 break;
4633
4634 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
4635
4636 Value *Dst = EmitScalarExpr(E->getArg(0));
4637 Value *Src = EmitScalarExpr(E->getArg(1));
4638 Value *Size = EmitScalarExpr(E->getArg(2));
4639
4640 BasicBlock *Entry = Builder.GetInsertBlock();
4641 BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
4642 BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
4643 BasicBlock *Next = createBasicBlock("wmemcmp.next");
4644 BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
4645 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
4646 Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
4647
4648 EmitBlock(CmpGT);
4649 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
4650 DstPhi->addIncoming(Dst, Entry);
4651 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
4652 SrcPhi->addIncoming(Src, Entry);
4653 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
4654 SizePhi->addIncoming(Size, Entry);
4655 CharUnits WCharAlign =
4657 Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
4658 Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
4659 Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
4660 Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
4661
4662 EmitBlock(CmpLT);
4663 Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
4664 Builder.CreateCondBr(DstLtSrc, Exit, Next);
4665
4666 EmitBlock(Next);
4667 Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
4668 Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
4669 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
4670 Value *NextSizeEq0 =
4671 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
4672 Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
4673 DstPhi->addIncoming(NextDst, Next);
4674 SrcPhi->addIncoming(NextSrc, Next);
4675 SizePhi->addIncoming(NextSize, Next);
4676
4677 EmitBlock(Exit);
4678 PHINode *Ret = Builder.CreatePHI(IntTy, 4);
4679 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
4680 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
4681 Ret->addIncoming(ConstantInt::get(IntTy, -1), CmpLT);
4682 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
4683 return RValue::get(Ret);
4684 }
4685 case Builtin::BI__builtin_dwarf_cfa: {
4686 // The offset in bytes from the first argument to the CFA.
4687 //
4688 // Why on earth is this in the frontend? Is there any reason at
4689 // all that the backend can't reasonably determine this while
4690 // lowering llvm.eh.dwarf.cfa()?
4691 //
4692 // TODO: If there's a satisfactory reason, add a target hook for
4693 // this instead of hard-coding 0, which is correct for most targets.
4694 int32_t Offset = 0;
4695
4696 Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
4697 return RValue::get(Builder.CreateCall(F,
4698 llvm::ConstantInt::get(Int32Ty, Offset)));
4699 }
4700 case Builtin::BI__builtin_return_address: {
4701 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
4702 getContext().UnsignedIntTy);
4703 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
4704 return RValue::get(Builder.CreateCall(F, Depth));
4705 }
4706 case Builtin::BI_ReturnAddress: {
4707 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
4708 return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
4709 }
4710 case Builtin::BI__builtin_frame_address: {
4711 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
4712 getContext().UnsignedIntTy);
4713 Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
4714 return RValue::get(Builder.CreateCall(F, Depth));
4715 }
4716 case Builtin::BI__builtin_extract_return_addr: {
4719 return RValue::get(Result);
4720 }
4721 case Builtin::BI__builtin_frob_return_addr: {
4724 return RValue::get(Result);
4725 }
4726 case Builtin::BI__builtin_dwarf_sp_column: {
4727 llvm::IntegerType *Ty
4730 if (Column == -1) {
4731 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
4732 return RValue::get(llvm::UndefValue::get(Ty));
4733 }
4734 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
4735 }
4736 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
4738 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
4739 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
4740 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
4741 }
4742 case Builtin::BI__builtin_eh_return: {
4743 Value *Int = EmitScalarExpr(E->getArg(0));
4744 Value *Ptr = EmitScalarExpr(E->getArg(1));
4745
4746 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
4747 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
4748 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
4749 Function *F =
4750 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
4751 : Intrinsic::eh_return_i64);
4752 Builder.CreateCall(F, {Int, Ptr});
4753 Builder.CreateUnreachable();
4754
4755 // We do need to preserve an insertion point.
4756 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
4757
4758 return RValue::get(nullptr);
4759 }
4760 case Builtin::BI__builtin_unwind_init: {
4761 Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
4762 Builder.CreateCall(F);
4763 return RValue::get(nullptr);
4764 }
4765 case Builtin::BI__builtin_extend_pointer: {
4766 // Extends a pointer to the size of an _Unwind_Word, which is
4767 // uint64_t on all platforms. Generally this gets poked into a
4768 // register and eventually used as an address, so if the
4769 // addressing registers are wider than pointers and the platform
4770 // doesn't implicitly ignore high-order bits when doing
4771 // addressing, we need to make sure we zext / sext based on
4772 // the platform's expectations.
4773 //
4774 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
4775
4776 // Cast the pointer to intptr_t.
4777 Value *Ptr = EmitScalarExpr(E->getArg(0));
4778 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
4779
4780 // If that's 64 bits, we're done.
4781 if (IntPtrTy->getBitWidth() == 64)
4782 return RValue::get(Result);
4783
4784 // Otherwise, ask the codegen data what to do.
4785 if (getTargetHooks().extendPointerWithSExt())
4786 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
4787 else
4788 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
4789 }
4790 case Builtin::BI__builtin_setjmp: {
4791 // Buffer is a void**.
4793
4794 if (getTarget().getTriple().getArch() == llvm::Triple::systemz) {
4795 // On this target, the back end fills in the context buffer completely.
4796 // It doesn't really matter if the frontend stores to the buffer before
4797 // calling setjmp, the back-end is going to overwrite them anyway.
4798 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
4799 return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
4800 }
4801
4802 // Store the frame pointer to the setjmp buffer.
4803 Value *FrameAddr = Builder.CreateCall(
4804 CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
4805 ConstantInt::get(Int32Ty, 0));
4806 Builder.CreateStore(FrameAddr, Buf);
4807
4808 // Store the stack pointer to the setjmp buffer.
4809 Value *StackAddr = Builder.CreateStackSave();
4810 assert(Buf.emitRawPointer(*this)->getType() == StackAddr->getType());
4811
4812 Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
4813 Builder.CreateStore(StackAddr, StackSaveSlot);
4814
4815 // Call LLVM's EH setjmp, which is lightweight.
4816 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
4817 return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
4818 }
4819 case Builtin::BI__builtin_longjmp: {
4820 Value *Buf = EmitScalarExpr(E->getArg(0));
4821
4822 // Call LLVM's EH longjmp, which is lightweight.
4823 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
4824
4825 // longjmp doesn't return; mark this as unreachable.
4826 Builder.CreateUnreachable();
4827
4828 // We do need to preserve an insertion point.
4829 EmitBlock(createBasicBlock("longjmp.cont"));
4830
4831 return RValue::get(nullptr);
4832 }
4833 case Builtin::BI__builtin_launder: {
4834 const Expr *Arg = E->getArg(0);
4835 QualType ArgTy = Arg->getType()->getPointeeType();
4836 Value *Ptr = EmitScalarExpr(Arg);
4837 if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
4838 Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
4839
4840 return RValue::get(Ptr);
4841 }
4842 case Builtin::BI__sync_fetch_and_add:
4843 case Builtin::BI__sync_fetch_and_sub:
4844 case Builtin::BI__sync_fetch_and_or:
4845 case Builtin::BI__sync_fetch_and_and:
4846 case Builtin::BI__sync_fetch_and_xor:
4847 case Builtin::BI__sync_fetch_and_nand:
4848 case Builtin::BI__sync_add_and_fetch:
4849 case Builtin::BI__sync_sub_and_fetch:
4850 case Builtin::BI__sync_and_and_fetch:
4851 case Builtin::BI__sync_or_and_fetch:
4852 case Builtin::BI__sync_xor_and_fetch:
4853 case Builtin::BI__sync_nand_and_fetch:
4854 case Builtin::BI__sync_val_compare_and_swap:
4855 case Builtin::BI__sync_bool_compare_and_swap:
4856 case Builtin::BI__sync_lock_test_and_set:
4857 case Builtin::BI__sync_lock_release:
4858 case Builtin::BI__sync_swap:
4859 llvm_unreachable("Shouldn't make it through sema");
4860 case Builtin::BI__sync_fetch_and_add_1:
4861 case Builtin::BI__sync_fetch_and_add_2:
4862 case Builtin::BI__sync_fetch_and_add_4:
4863 case Builtin::BI__sync_fetch_and_add_8:
4864 case Builtin::BI__sync_fetch_and_add_16:
4865 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
4866 case Builtin::BI__sync_fetch_and_sub_1:
4867 case Builtin::BI__sync_fetch_and_sub_2:
4868 case Builtin::BI__sync_fetch_and_sub_4:
4869 case Builtin::BI__sync_fetch_and_sub_8:
4870 case Builtin::BI__sync_fetch_and_sub_16:
4871 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
4872 case Builtin::BI__sync_fetch_and_or_1:
4873 case Builtin::BI__sync_fetch_and_or_2:
4874 case Builtin::BI__sync_fetch_and_or_4:
4875 case Builtin::BI__sync_fetch_and_or_8:
4876 case Builtin::BI__sync_fetch_and_or_16:
4877 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
4878 case Builtin::BI__sync_fetch_and_and_1:
4879 case Builtin::BI__sync_fetch_and_and_2:
4880 case Builtin::BI__sync_fetch_and_and_4:
4881 case Builtin::BI__sync_fetch_and_and_8:
4882 case Builtin::BI__sync_fetch_and_and_16:
4883 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
4884 case Builtin::BI__sync_fetch_and_xor_1:
4885 case Builtin::BI__sync_fetch_and_xor_2:
4886 case Builtin::BI__sync_fetch_and_xor_4:
4887 case Builtin::BI__sync_fetch_and_xor_8:
4888 case Builtin::BI__sync_fetch_and_xor_16:
4889 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
4890 case Builtin::BI__sync_fetch_and_nand_1:
4891 case Builtin::BI__sync_fetch_and_nand_2:
4892 case Builtin::BI__sync_fetch_and_nand_4:
4893 case Builtin::BI__sync_fetch_and_nand_8:
4894 case Builtin::BI__sync_fetch_and_nand_16:
4895 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
4896
4897 // Clang extensions: not overloaded yet.
4898 case Builtin::BI__sync_fetch_and_min:
4899 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
4900 case Builtin::BI__sync_fetch_and_max:
4901 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
4902 case Builtin::BI__sync_fetch_and_umin:
4903 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
4904 case Builtin::BI__sync_fetch_and_umax:
4905 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
4906
4907 case Builtin::BI__sync_add_and_fetch_1:
4908 case Builtin::BI__sync_add_and_fetch_2:
4909 case Builtin::BI__sync_add_and_fetch_4:
4910 case Builtin::BI__sync_add_and_fetch_8:
4911 case Builtin::BI__sync_add_and_fetch_16:
4912 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
4913 llvm::Instruction::Add);
4914 case Builtin::BI__sync_sub_and_fetch_1:
4915 case Builtin::BI__sync_sub_and_fetch_2:
4916 case Builtin::BI__sync_sub_and_fetch_4:
4917 case Builtin::BI__sync_sub_and_fetch_8:
4918 case Builtin::BI__sync_sub_and_fetch_16:
4919 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
4920 llvm::Instruction::Sub);
4921 case Builtin::BI__sync_and_and_fetch_1:
4922 case Builtin::BI__sync_and_and_fetch_2:
4923 case Builtin::BI__sync_and_and_fetch_4:
4924 case Builtin::BI__sync_and_and_fetch_8:
4925 case Builtin::BI__sync_and_and_fetch_16:
4926 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
4927 llvm::Instruction::And);
4928 case Builtin::BI__sync_or_and_fetch_1:
4929 case Builtin::BI__sync_or_and_fetch_2:
4930 case Builtin::BI__sync_or_and_fetch_4:
4931 case Builtin::BI__sync_or_and_fetch_8:
4932 case Builtin::BI__sync_or_and_fetch_16:
4933 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
4934 llvm::Instruction::Or);
4935 case Builtin::BI__sync_xor_and_fetch_1:
4936 case Builtin::BI__sync_xor_and_fetch_2:
4937 case Builtin::BI__sync_xor_and_fetch_4:
4938 case Builtin::BI__sync_xor_and_fetch_8:
4939 case Builtin::BI__sync_xor_and_fetch_16:
4940 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
4941 llvm::Instruction::Xor);
4942 case Builtin::BI__sync_nand_and_fetch_1:
4943 case Builtin::BI__sync_nand_and_fetch_2:
4944 case Builtin::BI__sync_nand_and_fetch_4:
4945 case Builtin::BI__sync_nand_and_fetch_8:
4946 case Builtin::BI__sync_nand_and_fetch_16:
4947 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
4948 llvm::Instruction::And, true);
4949
4950 case Builtin::BI__sync_val_compare_and_swap_1:
4951 case Builtin::BI__sync_val_compare_and_swap_2:
4952 case Builtin::BI__sync_val_compare_and_swap_4:
4953 case Builtin::BI__sync_val_compare_and_swap_8:
4954 case Builtin::BI__sync_val_compare_and_swap_16:
4955 return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
4956
4957 case Builtin::BI__sync_bool_compare_and_swap_1:
4958 case Builtin::BI__sync_bool_compare_and_swap_2:
4959 case Builtin::BI__sync_bool_compare_and_swap_4:
4960 case Builtin::BI__sync_bool_compare_and_swap_8:
4961 case Builtin::BI__sync_bool_compare_and_swap_16:
4962 return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
4963
4964 case Builtin::BI__sync_swap_1:
4965 case Builtin::BI__sync_swap_2:
4966 case Builtin::BI__sync_swap_4:
4967 case Builtin::BI__sync_swap_8:
4968 case Builtin::BI__sync_swap_16:
4969 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
4970
4971 case Builtin::BI__sync_lock_test_and_set_1:
4972 case Builtin::BI__sync_lock_test_and_set_2:
4973 case Builtin::BI__sync_lock_test_and_set_4:
4974 case Builtin::BI__sync_lock_test_and_set_8:
4975 case Builtin::BI__sync_lock_test_and_set_16:
4976 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
4977
4978 case Builtin::BI__sync_lock_release_1:
4979 case Builtin::BI__sync_lock_release_2:
4980 case Builtin::BI__sync_lock_release_4:
4981 case Builtin::BI__sync_lock_release_8:
4982 case Builtin::BI__sync_lock_release_16: {
4983 Address Ptr = CheckAtomicAlignment(*this, E);
4984 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
4985
4986 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
4987 getContext().getTypeSize(ElTy));
4988 llvm::StoreInst *Store =
4989 Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
4990 Store->setAtomic(llvm::AtomicOrdering::Release);
4991 return RValue::get(nullptr);
4992 }
4993
4994 case Builtin::BI__sync_synchronize: {
4995 // We assume this is supposed to correspond to a C++0x-style
4996 // sequentially-consistent fence (i.e. this is only usable for
4997 // synchronization, not device I/O or anything like that). This intrinsic
4998 // is really badly designed in the sense that in theory, there isn't
4999 // any way to safely use it... but in practice, it mostly works
5000 // to use it with non-atomic loads and stores to get acquire/release
5001 // semantics.
5002 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
5003 return RValue::get(nullptr);
5004 }
5005
5006 case Builtin::BI__builtin_nontemporal_load:
5007 return RValue::get(EmitNontemporalLoad(*this, E));
5008 case Builtin::BI__builtin_nontemporal_store:
5009 return RValue::get(EmitNontemporalStore(*this, E));
5010 case Builtin::BI__c11_atomic_is_lock_free:
5011 case Builtin::BI__atomic_is_lock_free: {
5012 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
5013 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
5014 // _Atomic(T) is always properly-aligned.
5015 const char *LibCallName = "__atomic_is_lock_free";
5016 CallArgList Args;
5017 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
5018 getContext().getSizeType());
5019 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
5020 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
5022 else
5023 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
5025 const CGFunctionInfo &FuncInfo =
5026 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
5027 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
5028 llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
5029 return EmitCall(FuncInfo, CGCallee::forDirect(Func),
5030 ReturnValueSlot(), Args);
5031 }
5032
5033 case Builtin::BI__atomic_thread_fence:
5034 case Builtin::BI__atomic_signal_fence:
5035 case Builtin::BI__c11_atomic_thread_fence:
5036 case Builtin::BI__c11_atomic_signal_fence: {
5037 llvm::SyncScope::ID SSID;
5038 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
5039 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
5040 SSID = llvm::SyncScope::SingleThread;
5041 else
5042 SSID = llvm::SyncScope::System;
5043 Value *Order = EmitScalarExpr(E->getArg(0));
5044 if (isa<llvm::ConstantInt>(Order)) {
5045 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
5046 switch (ord) {
5047 case 0: // memory_order_relaxed
5048 default: // invalid order
5049 break;
5050 case 1: // memory_order_consume
5051 case 2: // memory_order_acquire
5052 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
5053 break;
5054 case 3: // memory_order_release
5055 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
5056 break;
5057 case 4: // memory_order_acq_rel
5058 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
5059 break;
5060 case 5: // memory_order_seq_cst
5061 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
5062 break;
5063 }
5064 return RValue::get(nullptr);
5065 }
5066
5067 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
5068 AcquireBB = createBasicBlock("acquire", CurFn);
5069 ReleaseBB = createBasicBlock("release", CurFn);
5070 AcqRelBB = createBasicBlock("acqrel", CurFn);
5071 SeqCstBB = createBasicBlock("seqcst", CurFn);
5072 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
5073
5074 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
5075 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
5076
5077 Builder.SetInsertPoint(AcquireBB);
5078 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
5079 Builder.CreateBr(ContBB);
5080 SI->addCase(Builder.getInt32(1), AcquireBB);
5081 SI->addCase(Builder.getInt32(2), AcquireBB);
5082
5083 Builder.SetInsertPoint(ReleaseBB);
5084 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
5085 Builder.CreateBr(ContBB);
5086 SI->addCase(Builder.getInt32(3), ReleaseBB);
5087
5088 Builder.SetInsertPoint(AcqRelBB);
5089 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
5090 Builder.CreateBr(ContBB);
5091 SI->addCase(Builder.getInt32(4), AcqRelBB);
5092
5093 Builder.SetInsertPoint(SeqCstBB);
5094 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
5095 Builder.CreateBr(ContBB);
5096 SI->addCase(Builder.getInt32(5), SeqCstBB);
5097
5098 Builder.SetInsertPoint(ContBB);
5099 return RValue::get(nullptr);
5100 }
5101 case Builtin::BI__scoped_atomic_thread_fence: {
5103
5104 Value *Order = EmitScalarExpr(E->getArg(0));
5105 Value *Scope = EmitScalarExpr(E->getArg(1));
5106 auto Ord = dyn_cast<llvm::ConstantInt>(Order);
5107 auto Scp = dyn_cast<llvm::ConstantInt>(Scope);
5108 if (Ord && Scp) {
5109 SyncScope SS = ScopeModel->isValid(Scp->getZExtValue())
5110 ? ScopeModel->map(Scp->getZExtValue())
5111 : ScopeModel->map(ScopeModel->getFallBackValue());
5112 switch (Ord->getZExtValue()) {
5113 case 0: // memory_order_relaxed
5114 default: // invalid order
5115 break;
5116 case 1: // memory_order_consume
5117 case 2: // memory_order_acquire
5118 Builder.CreateFence(
5119 llvm::AtomicOrdering::Acquire,
5120 getTargetHooks().getLLVMSyncScopeID(getLangOpts(), SS,
5121 llvm::AtomicOrdering::Acquire,
5122 getLLVMContext()));
5123 break;
5124 case 3: // memory_order_release
5125 Builder.CreateFence(
5126 llvm::AtomicOrdering::Release,
5127 getTargetHooks().getLLVMSyncScopeID(getLangOpts(), SS,
5128 llvm::AtomicOrdering::Release,
5129 getLLVMContext()));
5130 break;
5131 case 4: // memory_order_acq_rel
5132 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease,
5133 getTargetHooks().getLLVMSyncScopeID(
5134 getLangOpts(), SS,
5135 llvm::AtomicOrdering::AcquireRelease,
5136 getLLVMContext()));
5137 break;
5138 case 5: // memory_order_seq_cst
5139 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
5140 getTargetHooks().getLLVMSyncScopeID(
5141 getLangOpts(), SS,
5142 llvm::AtomicOrdering::SequentiallyConsistent,
5143 getLLVMContext()));
5144 break;
5145 }
5146 return RValue::get(nullptr);
5147 }
5148
5149 llvm::BasicBlock *ContBB = createBasicBlock("atomic.scope.continue", CurFn);
5150
5152 OrderBBs;
5153 if (Ord) {
5154 switch (Ord->getZExtValue()) {
5155 case 0: // memory_order_relaxed
5156 default: // invalid order
5157 ContBB->eraseFromParent();
5158 return RValue::get(nullptr);
5159 case 1: // memory_order_consume
5160 case 2: // memory_order_acquire
5161 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5162 llvm::AtomicOrdering::Acquire);
5163 break;
5164 case 3: // memory_order_release
5165 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5166 llvm::AtomicOrdering::Release);
5167 break;
5168 case 4: // memory_order_acq_rel
5169 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5170 llvm::AtomicOrdering::AcquireRelease);
5171 break;
5172 case 5: // memory_order_seq_cst
5173 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5174 llvm::AtomicOrdering::SequentiallyConsistent);
5175 break;
5176 }
5177 } else {
5178 llvm::BasicBlock *AcquireBB = createBasicBlock("acquire", CurFn);
5179 llvm::BasicBlock *ReleaseBB = createBasicBlock("release", CurFn);
5180 llvm::BasicBlock *AcqRelBB = createBasicBlock("acqrel", CurFn);
5181 llvm::BasicBlock *SeqCstBB = createBasicBlock("seqcst", CurFn);
5182
5183 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
5184 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
5185 SI->addCase(Builder.getInt32(1), AcquireBB);
5186 SI->addCase(Builder.getInt32(2), AcquireBB);
5187 SI->addCase(Builder.getInt32(3), ReleaseBB);
5188 SI->addCase(Builder.getInt32(4), AcqRelBB);
5189 SI->addCase(Builder.getInt32(5), SeqCstBB);
5190
5191 OrderBBs.emplace_back(AcquireBB, llvm::AtomicOrdering::Acquire);
5192 OrderBBs.emplace_back(ReleaseBB, llvm::AtomicOrdering::Release);
5193 OrderBBs.emplace_back(AcqRelBB, llvm::AtomicOrdering::AcquireRelease);
5194 OrderBBs.emplace_back(SeqCstBB,
5195 llvm::AtomicOrdering::SequentiallyConsistent);
5196 }
5197
5198 for (auto &[OrderBB, Ordering] : OrderBBs) {
5199 Builder.SetInsertPoint(OrderBB);
5200 if (Scp) {
5201 SyncScope SS = ScopeModel->isValid(Scp->getZExtValue())
5202 ? ScopeModel->map(Scp->getZExtValue())
5203 : ScopeModel->map(ScopeModel->getFallBackValue());
5204 Builder.CreateFence(Ordering,
5205 getTargetHooks().getLLVMSyncScopeID(
5206 getLangOpts(), SS, Ordering, getLLVMContext()));
5207 Builder.CreateBr(ContBB);
5208 } else {
5209 llvm::DenseMap<unsigned, llvm::BasicBlock *> BBs;
5210 for (unsigned Scp : ScopeModel->getRuntimeValues())
5211 BBs[Scp] = createBasicBlock(getAsString(ScopeModel->map(Scp)), CurFn);
5212
5213 auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
5214 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, ContBB);
5215 for (unsigned Scp : ScopeModel->getRuntimeValues()) {
5216 auto *B = BBs[Scp];
5217 SI->addCase(Builder.getInt32(Scp), B);
5218
5219 Builder.SetInsertPoint(B);
5220 Builder.CreateFence(Ordering, getTargetHooks().getLLVMSyncScopeID(
5221 getLangOpts(), ScopeModel->map(Scp),
5222 Ordering, getLLVMContext()));
5223 Builder.CreateBr(ContBB);
5224 }
5225 }
5226 }
5227
5228 Builder.SetInsertPoint(ContBB);
5229 return RValue::get(nullptr);
5230 }
5231
5232 case Builtin::BI__builtin_signbit:
5233 case Builtin::BI__builtin_signbitf:
5234 case Builtin::BI__builtin_signbitl: {
5235 return RValue::get(
5236 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
5237 ConvertType(E->getType())));
5238 }
5239 case Builtin::BI__warn_memset_zero_len:
5240 return RValue::getIgnored();
5241 case Builtin::BI__annotation: {
5242 // Re-encode each wide string to UTF8 and make an MDString.
5244 for (const Expr *Arg : E->arguments()) {
5245 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
5246 assert(Str->getCharByteWidth() == 2);
5247 StringRef WideBytes = Str->getBytes();
5248 std::string StrUtf8;
5249 if (!convertUTF16ToUTF8String(
5250 ArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
5251 CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
5252 continue;
5253 }
5254 Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
5255 }
5256
5257 // Build and MDTuple of MDStrings and emit the intrinsic call.
5258 llvm::Function *F = CGM.getIntrinsic(Intrinsic::codeview_annotation, {});
5259 MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
5260 Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
5261 return RValue::getIgnored();
5262 }
5263 case Builtin::BI__builtin_annotation: {
5264 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
5265 llvm::Function *F = CGM.getIntrinsic(
5266 Intrinsic::annotation, {AnnVal->getType(), CGM.ConstGlobalsPtrTy});
5267
5268 // Get the annotation string, go through casts. Sema requires this to be a
5269 // non-wide string literal, potentially casted, so the cast<> is safe.
5270 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
5271 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
5272 return RValue::get(
5273 EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr));
5274 }
5275 case Builtin::BI__builtin_addcb:
5276 case Builtin::BI__builtin_addcs:
5277 case Builtin::BI__builtin_addc:
5278 case Builtin::BI__builtin_addcl:
5279 case Builtin::BI__builtin_addcll:
5280 case Builtin::BI__builtin_subcb:
5281 case Builtin::BI__builtin_subcs:
5282 case Builtin::BI__builtin_subc:
5283 case Builtin::BI__builtin_subcl:
5284 case Builtin::BI__builtin_subcll: {
5285
5286 // We translate all of these builtins from expressions of the form:
5287 // int x = ..., y = ..., carryin = ..., carryout, result;
5288 // result = __builtin_addc(x, y, carryin, &carryout);
5289 //
5290 // to LLVM IR of the form:
5291 //
5292 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
5293 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
5294 // %carry1 = extractvalue {i32, i1} %tmp1, 1
5295 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
5296 // i32 %carryin)
5297 // %result = extractvalue {i32, i1} %tmp2, 0
5298 // %carry2 = extractvalue {i32, i1} %tmp2, 1
5299 // %tmp3 = or i1 %carry1, %carry2
5300 // %tmp4 = zext i1 %tmp3 to i32
5301 // store i32 %tmp4, i32* %carryout
5302
5303 // Scalarize our inputs.
5304 llvm::Value *X = EmitScalarExpr(E->getArg(0));
5305 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
5306 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
5307 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
5308
5309 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
5310 Intrinsic::ID IntrinsicId;
5311 switch (BuiltinID) {
5312 default: llvm_unreachable("Unknown multiprecision builtin id.");
5313 case Builtin::BI__builtin_addcb:
5314 case Builtin::BI__builtin_addcs:
5315 case Builtin::BI__builtin_addc:
5316 case Builtin::BI__builtin_addcl:
5317 case Builtin::BI__builtin_addcll:
5318 IntrinsicId = Intrinsic::uadd_with_overflow;
5319 break;
5320 case Builtin::BI__builtin_subcb:
5321 case Builtin::BI__builtin_subcs:
5322 case Builtin::BI__builtin_subc:
5323 case Builtin::BI__builtin_subcl:
5324 case Builtin::BI__builtin_subcll:
5325 IntrinsicId = Intrinsic::usub_with_overflow;
5326 break;
5327 }
5328
5329 // Construct our resulting LLVM IR expression.
5330 llvm::Value *Carry1;
5331 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
5332 X, Y, Carry1);
5333 llvm::Value *Carry2;
5334 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
5335 Sum1, Carryin, Carry2);
5336 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
5337 X->getType());
5338 Builder.CreateStore(CarryOut, CarryOutPtr);
5339 return RValue::get(Sum2);
5340 }
5341
5342 case Builtin::BI__builtin_add_overflow:
5343 case Builtin::BI__builtin_sub_overflow:
5344 case Builtin::BI__builtin_mul_overflow: {
5345 const clang::Expr *LeftArg = E->getArg(0);
5346 const clang::Expr *RightArg = E->getArg(1);
5347 const clang::Expr *ResultArg = E->getArg(2);
5348
5349 clang::QualType ResultQTy =
5350 ResultArg->getType()->castAs<PointerType>()->getPointeeType();
5351
5352 WidthAndSignedness LeftInfo =
5353 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
5354 WidthAndSignedness RightInfo =
5355 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
5356 WidthAndSignedness ResultInfo =
5357 getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
5358
5359 // Handle mixed-sign multiplication as a special case, because adding
5360 // runtime or backend support for our generic irgen would be too expensive.
5361 if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
5362 return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
5363 RightInfo, ResultArg, ResultQTy,
5364 ResultInfo);
5365
5366 if (isSpecialUnsignedMultiplySignedResult(BuiltinID, LeftInfo, RightInfo,
5367 ResultInfo))
5369 *this, LeftArg, LeftInfo, RightArg, RightInfo, ResultArg, ResultQTy,
5370 ResultInfo);
5371
5372 WidthAndSignedness EncompassingInfo =
5373 EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
5374
5375 llvm::Type *EncompassingLLVMTy =
5376 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
5377
5378 llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
5379
5380 Intrinsic::ID IntrinsicId;
5381 switch (BuiltinID) {
5382 default:
5383 llvm_unreachable("Unknown overflow builtin id.");
5384 case Builtin::BI__builtin_add_overflow:
5385 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::sadd_with_overflow
5386 : Intrinsic::uadd_with_overflow;
5387 break;
5388 case Builtin::BI__builtin_sub_overflow:
5389 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::ssub_with_overflow
5390 : Intrinsic::usub_with_overflow;
5391 break;
5392 case Builtin::BI__builtin_mul_overflow:
5393 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::smul_with_overflow
5394 : Intrinsic::umul_with_overflow;
5395 break;
5396 }
5397
5398 llvm::Value *Left = EmitScalarExpr(LeftArg);
5399 llvm::Value *Right = EmitScalarExpr(RightArg);
5400 Address ResultPtr = EmitPointerWithAlignment(ResultArg);
5401
5402 // Extend each operand to the encompassing type.
5403 Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
5404 Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
5405
5406 // Perform the operation on the extended values.
5407 llvm::Value *Overflow, *Result;
5408 Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
5409
5410 if (EncompassingInfo.Width > ResultInfo.Width) {
5411 // The encompassing type is wider than the result type, so we need to
5412 // truncate it.
5413 llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
5414
5415 // To see if the truncation caused an overflow, we will extend
5416 // the result and then compare it to the original result.
5417 llvm::Value *ResultTruncExt = Builder.CreateIntCast(
5418 ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
5419 llvm::Value *TruncationOverflow =
5420 Builder.CreateICmpNE(Result, ResultTruncExt);
5421
5422 Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
5423 Result = ResultTrunc;
5424 }
5425
5426 // Finally, store the result using the pointer.
5427 bool isVolatile =
5428 ResultArg->getType()->getPointeeType().isVolatileQualified();
5429 Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
5430
5431 return RValue::get(Overflow);
5432 }
5433
5434 case Builtin::BI__builtin_uadd_overflow:
5435 case Builtin::BI__builtin_uaddl_overflow:
5436 case Builtin::BI__builtin_uaddll_overflow:
5437 case Builtin::BI__builtin_usub_overflow:
5438 case Builtin::BI__builtin_usubl_overflow:
5439 case Builtin::BI__builtin_usubll_overflow:
5440 case Builtin::BI__builtin_umul_overflow:
5441 case Builtin::BI__builtin_umull_overflow:
5442 case Builtin::BI__builtin_umulll_overflow:
5443 case Builtin::BI__builtin_sadd_overflow:
5444 case Builtin::BI__builtin_saddl_overflow:
5445 case Builtin::BI__builtin_saddll_overflow:
5446 case Builtin::BI__builtin_ssub_overflow:
5447 case Builtin::BI__builtin_ssubl_overflow:
5448 case Builtin::BI__builtin_ssubll_overflow:
5449 case Builtin::BI__builtin_smul_overflow:
5450 case Builtin::BI__builtin_smull_overflow:
5451 case Builtin::BI__builtin_smulll_overflow: {
5452
5453 // We translate all of these builtins directly to the relevant llvm IR node.
5454
5455 // Scalarize our inputs.
5456 llvm::Value *X = EmitScalarExpr(E->getArg(0));
5457 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
5458 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
5459
5460 // Decide which of the overflow intrinsics we are lowering to:
5461 Intrinsic::ID IntrinsicId;
5462 switch (BuiltinID) {
5463 default: llvm_unreachable("Unknown overflow builtin id.");
5464 case Builtin::BI__builtin_uadd_overflow:
5465 case Builtin::BI__builtin_uaddl_overflow:
5466 case Builtin::BI__builtin_uaddll_overflow:
5467 IntrinsicId = Intrinsic::uadd_with_overflow;
5468 break;
5469 case Builtin::BI__builtin_usub_overflow:
5470 case Builtin::BI__builtin_usubl_overflow:
5471 case Builtin::BI__builtin_usubll_overflow:
5472 IntrinsicId = Intrinsic::usub_with_overflow;
5473 break;
5474 case Builtin::BI__builtin_umul_overflow:
5475 case Builtin::BI__builtin_umull_overflow:
5476 case Builtin::BI__builtin_umulll_overflow:
5477 IntrinsicId = Intrinsic::umul_with_overflow;
5478 break;
5479 case Builtin::BI__builtin_sadd_overflow:
5480 case Builtin::BI__builtin_saddl_overflow:
5481 case Builtin::BI__builtin_saddll_overflow:
5482 IntrinsicId = Intrinsic::sadd_with_overflow;
5483 break;
5484 case Builtin::BI__builtin_ssub_overflow:
5485 case Builtin::BI__builtin_ssubl_overflow:
5486 case Builtin::BI__builtin_ssubll_overflow:
5487 IntrinsicId = Intrinsic::ssub_with_overflow;
5488 break;
5489 case Builtin::BI__builtin_smul_overflow:
5490 case Builtin::BI__builtin_smull_overflow:
5491 case Builtin::BI__builtin_smulll_overflow:
5492 IntrinsicId = Intrinsic::smul_with_overflow;
5493 break;
5494 }
5495
5496
5497 llvm::Value *Carry;
5498 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
5499 Builder.CreateStore(Sum, SumOutPtr);
5500
5501 return RValue::get(Carry);
5502 }
5503 case Builtin::BIaddressof:
5504 case Builtin::BI__addressof:
5505 case Builtin::BI__builtin_addressof:
5506 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
5507 case Builtin::BI__builtin_function_start:
5508 return RValue::get(CGM.GetFunctionStart(
5509 E->getArg(0)->getAsBuiltinConstantDeclRef(CGM.getContext())));
5510 case Builtin::BI__builtin_operator_new:
5512 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
5513 case Builtin::BI__builtin_operator_delete:
5515 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
5516 return RValue::get(nullptr);
5517
5518 case Builtin::BI__builtin_is_aligned:
5519 return EmitBuiltinIsAligned(E);
5520 case Builtin::BI__builtin_align_up:
5521 return EmitBuiltinAlignTo(E, true);
5522 case Builtin::BI__builtin_align_down:
5523 return EmitBuiltinAlignTo(E, false);
5524
5525 case Builtin::BI__noop:
5526 // __noop always evaluates to an integer literal zero.
5527 return RValue::get(ConstantInt::get(IntTy, 0));
5528 case Builtin::BI__builtin_call_with_static_chain: {
5529 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
5530 const Expr *Chain = E->getArg(1);
5531 return EmitCall(Call->getCallee()->getType(),
5532 EmitCallee(Call->getCallee()), Call, ReturnValue,
5533 EmitScalarExpr(Chain));
5534 }
5535 case Builtin::BI_InterlockedExchange8:
5536 case Builtin::BI_InterlockedExchange16:
5537 case Builtin::BI_InterlockedExchange:
5538 case Builtin::BI_InterlockedExchangePointer:
5539 return RValue::get(
5541 case Builtin::BI_InterlockedCompareExchangePointer:
5542 return RValue::get(
5544 case Builtin::BI_InterlockedCompareExchangePointer_nf:
5545 return RValue::get(
5547 case Builtin::BI_InterlockedCompareExchange8:
5548 case Builtin::BI_InterlockedCompareExchange16:
5549 case Builtin::BI_InterlockedCompareExchange:
5550 case Builtin::BI_InterlockedCompareExchange64:
5551 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
5552 case Builtin::BI_InterlockedIncrement16:
5553 case Builtin::BI_InterlockedIncrement:
5554 return RValue::get(
5556 case Builtin::BI_InterlockedDecrement16:
5557 case Builtin::BI_InterlockedDecrement:
5558 return RValue::get(
5560 case Builtin::BI_InterlockedAnd8:
5561 case Builtin::BI_InterlockedAnd16:
5562 case Builtin::BI_InterlockedAnd:
5564 case Builtin::BI_InterlockedExchangeAdd8:
5565 case Builtin::BI_InterlockedExchangeAdd16:
5566 case Builtin::BI_InterlockedExchangeAdd:
5567 return RValue::get(
5569 case Builtin::BI_InterlockedExchangeSub8:
5570 case Builtin::BI_InterlockedExchangeSub16:
5571 case Builtin::BI_InterlockedExchangeSub:
5572 return RValue::get(
5574 case Builtin::BI_InterlockedOr8:
5575 case Builtin::BI_InterlockedOr16:
5576 case Builtin::BI_InterlockedOr:
5578 case Builtin::BI_InterlockedXor8:
5579 case Builtin::BI_InterlockedXor16:
5580 case Builtin::BI_InterlockedXor:
5582
5583 case Builtin::BI_bittest64:
5584 case Builtin::BI_bittest:
5585 case Builtin::BI_bittestandcomplement64:
5586 case Builtin::BI_bittestandcomplement:
5587 case Builtin::BI_bittestandreset64:
5588 case Builtin::BI_bittestandreset:
5589 case Builtin::BI_bittestandset64:
5590 case Builtin::BI_bittestandset:
5591 case Builtin::BI_interlockedbittestandreset:
5592 case Builtin::BI_interlockedbittestandreset64:
5593 case Builtin::BI_interlockedbittestandreset64_acq:
5594 case Builtin::BI_interlockedbittestandreset64_rel:
5595 case Builtin::BI_interlockedbittestandreset64_nf:
5596 case Builtin::BI_interlockedbittestandset64:
5597 case Builtin::BI_interlockedbittestandset64_acq:
5598 case Builtin::BI_interlockedbittestandset64_rel:
5599 case Builtin::BI_interlockedbittestandset64_nf:
5600 case Builtin::BI_interlockedbittestandset:
5601 case Builtin::BI_interlockedbittestandset_acq:
5602 case Builtin::BI_interlockedbittestandset_rel:
5603 case Builtin::BI_interlockedbittestandset_nf:
5604 case Builtin::BI_interlockedbittestandreset_acq:
5605 case Builtin::BI_interlockedbittestandreset_rel:
5606 case Builtin::BI_interlockedbittestandreset_nf:
5607 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
5608
5609 // These builtins exist to emit regular volatile loads and stores not
5610 // affected by the -fms-volatile setting.
5611 case Builtin::BI__iso_volatile_load8:
5612 case Builtin::BI__iso_volatile_load16:
5613 case Builtin::BI__iso_volatile_load32:
5614 case Builtin::BI__iso_volatile_load64:
5615 return RValue::get(EmitISOVolatileLoad(*this, E));
5616 case Builtin::BI__iso_volatile_store8:
5617 case Builtin::BI__iso_volatile_store16:
5618 case Builtin::BI__iso_volatile_store32:
5619 case Builtin::BI__iso_volatile_store64:
5620 return RValue::get(EmitISOVolatileStore(*this, E));
5621
5622 case Builtin::BI__builtin_ptrauth_sign_constant:
5623 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
5624
5625 case Builtin::BI__builtin_ptrauth_auth:
5626 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5627 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5628 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5629 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5630 case Builtin::BI__builtin_ptrauth_strip: {
5631 // Emit the arguments.
5633 for (auto argExpr : E->arguments())
5634 Args.push_back(EmitScalarExpr(argExpr));
5635
5636 // Cast the value to intptr_t, saving its original type.
5637 llvm::Type *OrigValueType = Args[0]->getType();
5638 if (OrigValueType->isPointerTy())
5639 Args[0] = Builder.CreatePtrToInt(Args[0], IntPtrTy);
5640
5641 switch (BuiltinID) {
5642 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5643 if (Args[4]->getType()->isPointerTy())
5644 Args[4] = Builder.CreatePtrToInt(Args[4], IntPtrTy);
5645 [[fallthrough]];
5646
5647 case Builtin::BI__builtin_ptrauth_auth:
5648 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5649 if (Args[2]->getType()->isPointerTy())
5650 Args[2] = Builder.CreatePtrToInt(Args[2], IntPtrTy);
5651 break;
5652
5653 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5654 if (Args[1]->getType()->isPointerTy())
5655 Args[1] = Builder.CreatePtrToInt(Args[1], IntPtrTy);
5656 break;
5657
5658 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5659 case Builtin::BI__builtin_ptrauth_strip:
5660 break;
5661 }
5662
5663 // Call the intrinsic.
5664 auto IntrinsicID = [&]() -> unsigned {
5665 switch (BuiltinID) {
5666 case Builtin::BI__builtin_ptrauth_auth:
5667 return Intrinsic::ptrauth_auth;
5668 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5669 return Intrinsic::ptrauth_resign;
5670 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5671 return Intrinsic::ptrauth_blend;
5672 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5673 return Intrinsic::ptrauth_sign_generic;
5674 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5675 return Intrinsic::ptrauth_sign;
5676 case Builtin::BI__builtin_ptrauth_strip:
5677 return Intrinsic::ptrauth_strip;
5678 }
5679 llvm_unreachable("bad ptrauth intrinsic");
5680 }();
5681 auto Intrinsic = CGM.getIntrinsic(IntrinsicID);
5682 llvm::Value *Result = EmitRuntimeCall(Intrinsic, Args);
5683
5684 if (BuiltinID != Builtin::BI__builtin_ptrauth_sign_generic_data &&
5685 BuiltinID != Builtin::BI__builtin_ptrauth_blend_discriminator &&
5686 OrigValueType->isPointerTy()) {
5687 Result = Builder.CreateIntToPtr(Result, OrigValueType);
5688 }
5689 return RValue::get(Result);
5690 }
5691
5692 case Builtin::BI__builtin_get_vtable_pointer: {
5693 const Expr *Target = E->getArg(0);
5694 QualType TargetType = Target->getType();
5695 const CXXRecordDecl *Decl = TargetType->getPointeeCXXRecordDecl();
5696 assert(Decl);
5697 auto ThisAddress = EmitPointerWithAlignment(Target);
5698 assert(ThisAddress.isValid());
5699 llvm::Value *VTablePointer =
5701 return RValue::get(VTablePointer);
5702 }
5703
5704 case Builtin::BI__exception_code:
5705 case Builtin::BI_exception_code:
5707 case Builtin::BI__exception_info:
5708 case Builtin::BI_exception_info:
5710 case Builtin::BI__abnormal_termination:
5711 case Builtin::BI_abnormal_termination:
5713 case Builtin::BI_setjmpex:
5714 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
5715 E->getArg(0)->getType()->isPointerType())
5716 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
5717 break;
5718 case Builtin::BI_setjmp:
5719 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
5720 E->getArg(0)->getType()->isPointerType()) {
5721 if (getTarget().getTriple().getArch() == llvm::Triple::x86)
5722 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
5723 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
5724 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
5725 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
5726 }
5727 break;
5728
5729 // C++ std:: builtins.
5730 case Builtin::BImove:
5731 case Builtin::BImove_if_noexcept:
5732 case Builtin::BIforward:
5733 case Builtin::BIforward_like:
5734 case Builtin::BIas_const:
5735 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
5736 case Builtin::BI__GetExceptionInfo: {
5737 if (llvm::GlobalVariable *GV =
5738 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
5739 return RValue::get(GV);
5740 break;
5741 }
5742
5743 case Builtin::BI__fastfail:
5745
5746 case Builtin::BI__builtin_coro_id:
5747 return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
5748 case Builtin::BI__builtin_coro_promise:
5749 return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
5750 case Builtin::BI__builtin_coro_resume:
5751 EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
5752 return RValue::get(nullptr);
5753 case Builtin::BI__builtin_coro_frame:
5754 return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
5755 case Builtin::BI__builtin_coro_noop:
5756 return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
5757 case Builtin::BI__builtin_coro_free:
5758 return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
5759 case Builtin::BI__builtin_coro_destroy:
5760 EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
5761 return RValue::get(nullptr);
5762 case Builtin::BI__builtin_coro_done:
5763 return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
5764 case Builtin::BI__builtin_coro_alloc:
5765 return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
5766 case Builtin::BI__builtin_coro_begin:
5767 return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
5768 case Builtin::BI__builtin_coro_end:
5769 return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
5770 case Builtin::BI__builtin_coro_suspend:
5771 return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
5772 case Builtin::BI__builtin_coro_size:
5773 return EmitCoroutineIntrinsic(E, Intrinsic::coro_size);
5774 case Builtin::BI__builtin_coro_align:
5775 return EmitCoroutineIntrinsic(E, Intrinsic::coro_align);
5776
5777 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
5778 case Builtin::BIread_pipe:
5779 case Builtin::BIwrite_pipe: {
5780 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5781 *Arg1 = EmitScalarExpr(E->getArg(1));
5782 CGOpenCLRuntime OpenCLRT(CGM);
5783 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5784 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5785
5786 // Type of the generic packet parameter.
5787 unsigned GenericAS =
5789 llvm::Type *I8PTy = llvm::PointerType::get(getLLVMContext(), GenericAS);
5790
5791 // Testing which overloaded version we should generate the call for.
5792 if (2U == E->getNumArgs()) {
5793 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
5794 : "__write_pipe_2";
5795 // Creating a generic function type to be able to call with any builtin or
5796 // user defined type.
5797 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
5798 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5799 Value *ACast = Builder.CreateAddrSpaceCast(Arg1, I8PTy);
5800 return RValue::get(
5801 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5802 {Arg0, ACast, PacketSize, PacketAlign}));
5803 } else {
5804 assert(4 == E->getNumArgs() &&
5805 "Illegal number of parameters to pipe function");
5806 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
5807 : "__write_pipe_4";
5808
5809 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
5810 Int32Ty, Int32Ty};
5811 Value *Arg2 = EmitScalarExpr(E->getArg(2)),
5812 *Arg3 = EmitScalarExpr(E->getArg(3));
5813 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5814 Value *ACast = Builder.CreateAddrSpaceCast(Arg3, I8PTy);
5815 // We know the third argument is an integer type, but we may need to cast
5816 // it to i32.
5817 if (Arg2->getType() != Int32Ty)
5818 Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
5819 return RValue::get(
5820 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5821 {Arg0, Arg1, Arg2, ACast, PacketSize, PacketAlign}));
5822 }
5823 }
5824 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
5825 // functions
5826 case Builtin::BIreserve_read_pipe:
5827 case Builtin::BIreserve_write_pipe:
5828 case Builtin::BIwork_group_reserve_read_pipe:
5829 case Builtin::BIwork_group_reserve_write_pipe:
5830 case Builtin::BIsub_group_reserve_read_pipe:
5831 case Builtin::BIsub_group_reserve_write_pipe: {
5832 // Composing the mangled name for the function.
5833 const char *Name;
5834 if (BuiltinID == Builtin::BIreserve_read_pipe)
5835 Name = "__reserve_read_pipe";
5836 else if (BuiltinID == Builtin::BIreserve_write_pipe)
5837 Name = "__reserve_write_pipe";
5838 else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
5839 Name = "__work_group_reserve_read_pipe";
5840 else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
5841 Name = "__work_group_reserve_write_pipe";
5842 else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
5843 Name = "__sub_group_reserve_read_pipe";
5844 else
5845 Name = "__sub_group_reserve_write_pipe";
5846
5847 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5848 *Arg1 = EmitScalarExpr(E->getArg(1));
5849 llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
5850 CGOpenCLRuntime OpenCLRT(CGM);
5851 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5852 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5853
5854 // Building the generic function prototype.
5855 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
5856 llvm::FunctionType *FTy =
5857 llvm::FunctionType::get(ReservedIDTy, ArgTys, false);
5858 // We know the second argument is an integer type, but we may need to cast
5859 // it to i32.
5860 if (Arg1->getType() != Int32Ty)
5861 Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
5862 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5863 {Arg0, Arg1, PacketSize, PacketAlign}));
5864 }
5865 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
5866 // functions
5867 case Builtin::BIcommit_read_pipe:
5868 case Builtin::BIcommit_write_pipe:
5869 case Builtin::BIwork_group_commit_read_pipe:
5870 case Builtin::BIwork_group_commit_write_pipe:
5871 case Builtin::BIsub_group_commit_read_pipe:
5872 case Builtin::BIsub_group_commit_write_pipe: {
5873 const char *Name;
5874 if (BuiltinID == Builtin::BIcommit_read_pipe)
5875 Name = "__commit_read_pipe";
5876 else if (BuiltinID == Builtin::BIcommit_write_pipe)
5877 Name = "__commit_write_pipe";
5878 else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
5879 Name = "__work_group_commit_read_pipe";
5880 else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
5881 Name = "__work_group_commit_write_pipe";
5882 else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
5883 Name = "__sub_group_commit_read_pipe";
5884 else
5885 Name = "__sub_group_commit_write_pipe";
5886
5887 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5888 *Arg1 = EmitScalarExpr(E->getArg(1));
5889 CGOpenCLRuntime OpenCLRT(CGM);
5890 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5891 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5892
5893 // Building the generic function prototype.
5894 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
5895 llvm::FunctionType *FTy = llvm::FunctionType::get(
5896 llvm::Type::getVoidTy(getLLVMContext()), ArgTys, false);
5897
5898 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5899 {Arg0, Arg1, PacketSize, PacketAlign}));
5900 }
5901 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
5902 case Builtin::BIget_pipe_num_packets:
5903 case Builtin::BIget_pipe_max_packets: {
5904 const char *BaseName;
5905 const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
5906 if (BuiltinID == Builtin::BIget_pipe_num_packets)
5907 BaseName = "__get_pipe_num_packets";
5908 else
5909 BaseName = "__get_pipe_max_packets";
5910 std::string Name = std::string(BaseName) +
5911 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
5912
5913 // Building the generic function prototype.
5914 Value *Arg0 = EmitScalarExpr(E->getArg(0));
5915 CGOpenCLRuntime OpenCLRT(CGM);
5916 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5917 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5918 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
5919 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5920
5921 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5922 {Arg0, PacketSize, PacketAlign}));
5923 }
5924
5925 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
5926 case Builtin::BIto_global:
5927 case Builtin::BIto_local:
5928 case Builtin::BIto_private: {
5929 auto Arg0 = EmitScalarExpr(E->getArg(0));
5930 auto NewArgT = llvm::PointerType::get(
5932 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
5933 auto NewRetT = llvm::PointerType::get(
5935 CGM.getContext().getTargetAddressSpace(
5937 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
5938 llvm::Value *NewArg;
5939 if (Arg0->getType()->getPointerAddressSpace() !=
5940 NewArgT->getPointerAddressSpace())
5941 NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
5942 else
5943 NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
5944 auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
5945 auto NewCall =
5946 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
5947 return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
5948 ConvertType(E->getType())));
5949 }
5950
5951 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
5952 // Table 6.13.17.1 specifies four overload forms of enqueue_kernel.
5953 // The code below expands the builtin call to a call to one of the following
5954 // functions that an OpenCL runtime library will have to provide:
5955 // __enqueue_kernel_basic
5956 // __enqueue_kernel_varargs
5957 // __enqueue_kernel_basic_events
5958 // __enqueue_kernel_events_varargs
5959 case Builtin::BIenqueue_kernel: {
5960 StringRef Name; // Generated function call name
5961 unsigned NumArgs = E->getNumArgs();
5962
5963 llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
5964 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
5965 getContext().getTargetAddressSpace(LangAS::opencl_generic));
5966
5967 llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
5968 llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
5969 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
5970 llvm::Value *Range = NDRangeL.getAddress().emitRawPointer(*this);
5971
5972 // FIXME: Look through the addrspacecast which may exist to the stack
5973 // temporary as a hack.
5974 //
5975 // This is hardcoding the assumed ABI of the target function. This assumes
5976 // direct passing for every argument except NDRange, which is assumed to be
5977 // byval or byref indirect passed.
5978 //
5979 // This should be fixed to query a signature from CGOpenCLRuntime, and go
5980 // through EmitCallArgs to get the correct target ABI.
5981 Range = Range->stripPointerCasts();
5982
5983 llvm::Type *RangePtrTy = Range->getType();
5984
5985 if (NumArgs == 4) {
5986 // The most basic form of the call with parameters:
5987 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
5988 Name = "__enqueue_kernel_basic";
5989 llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangePtrTy, GenericVoidPtrTy,
5990 GenericVoidPtrTy};
5991 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5992
5993 auto Info =
5994 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
5995 llvm::Value *Kernel =
5996 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
5997 llvm::Value *Block =
5998 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
5999
6000 auto RTCall = EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6001 {Queue, Flags, Range, Kernel, Block});
6002 return RValue::get(RTCall);
6003 }
6004 assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
6005
6006 // Create a temporary array to hold the sizes of local pointer arguments
6007 // for the block. \p First is the position of the first size argument.
6008 auto CreateArrayForSizeVar =
6009 [=](unsigned First) -> std::pair<llvm::Value *, llvm::Value *> {
6010 llvm::APInt ArraySize(32, NumArgs - First);
6012 getContext().getSizeType(), ArraySize, nullptr,
6014 /*IndexTypeQuals=*/0);
6015 auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
6016 llvm::Value *TmpPtr = Tmp.getPointer();
6017 // The EmitLifetime* pair expect a naked Alloca as their last argument,
6018 // however for cases where the default AS is not the Alloca AS, Tmp is
6019 // actually the Alloca ascasted to the default AS, hence the
6020 // stripPointerCasts()
6021 llvm::Value *Alloca = TmpPtr->stripPointerCasts();
6022 llvm::Value *ElemPtr;
6023 EmitLifetimeStart(Alloca);
6024 // Each of the following arguments specifies the size of the corresponding
6025 // argument passed to the enqueued block.
6026 auto *Zero = llvm::ConstantInt::get(IntTy, 0);
6027 for (unsigned I = First; I < NumArgs; ++I) {
6028 auto *Index = llvm::ConstantInt::get(IntTy, I - First);
6029 auto *GEP =
6030 Builder.CreateGEP(Tmp.getElementType(), Alloca, {Zero, Index});
6031 if (I == First)
6032 ElemPtr = GEP;
6033 auto *V =
6034 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
6035 Builder.CreateAlignedStore(
6036 V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
6037 }
6038 // Return the Alloca itself rather than a potential ascast as this is only
6039 // used by the paired EmitLifetimeEnd.
6040 return {ElemPtr, Alloca};
6041 };
6042
6043 // Could have events and/or varargs.
6044 if (E->getArg(3)->getType()->isBlockPointerType()) {
6045 // No events passed, but has variadic arguments.
6046 Name = "__enqueue_kernel_varargs";
6047 auto Info =
6048 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
6049 llvm::Value *Kernel =
6050 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6051 auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6052 auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(4);
6053
6054 // Create a vector of the arguments, as well as a constant value to
6055 // express to the runtime the number of variadic arguments.
6056 llvm::Value *const Args[] = {Queue, Flags,
6057 Range, Kernel,
6058 Block, ConstantInt::get(IntTy, NumArgs - 4),
6059 ElemPtr};
6060 llvm::Type *const ArgTys[] = {
6061 QueueTy, IntTy, RangePtrTy, GenericVoidPtrTy,
6062 GenericVoidPtrTy, IntTy, ElemPtr->getType()};
6063
6064 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6065 auto Call = RValue::get(
6066 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6067 EmitLifetimeEnd(TmpPtr);
6068 return Call;
6069 }
6070 // Any calls now have event arguments passed.
6071 if (NumArgs >= 7) {
6072 llvm::PointerType *PtrTy = llvm::PointerType::get(
6073 CGM.getLLVMContext(),
6074 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
6075
6076 llvm::Value *NumEvents =
6077 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
6078
6079 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
6080 // to be a null pointer constant (including `0` literal), we can take it
6081 // into account and emit null pointer directly.
6082 llvm::Value *EventWaitList = nullptr;
6083 if (E->getArg(4)->isNullPointerConstant(
6085 EventWaitList = llvm::ConstantPointerNull::get(PtrTy);
6086 } else {
6087 EventWaitList =
6088 E->getArg(4)->getType()->isArrayType()
6090 : EmitScalarExpr(E->getArg(4));
6091 // Convert to generic address space.
6092 EventWaitList = Builder.CreatePointerCast(EventWaitList, PtrTy);
6093 }
6094 llvm::Value *EventRet = nullptr;
6095 if (E->getArg(5)->isNullPointerConstant(
6097 EventRet = llvm::ConstantPointerNull::get(PtrTy);
6098 } else {
6099 EventRet =
6100 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), PtrTy);
6101 }
6102
6103 auto Info =
6104 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
6105 llvm::Value *Kernel =
6106 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6107 llvm::Value *Block =
6108 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6109
6110 std::vector<llvm::Type *> ArgTys = {
6111 QueueTy, Int32Ty, RangePtrTy, Int32Ty,
6112 PtrTy, PtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
6113
6114 std::vector<llvm::Value *> Args = {Queue, Flags, Range,
6115 NumEvents, EventWaitList, EventRet,
6116 Kernel, Block};
6117
6118 if (NumArgs == 7) {
6119 // Has events but no variadics.
6120 Name = "__enqueue_kernel_basic_events";
6121 llvm::FunctionType *FTy =
6122 llvm::FunctionType::get(Int32Ty, ArgTys, false);
6123 return RValue::get(
6124 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6125 }
6126 // Has event info and variadics
6127 // Pass the number of variadics to the runtime function too.
6128 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
6129 ArgTys.push_back(Int32Ty);
6130 Name = "__enqueue_kernel_events_varargs";
6131
6132 auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(7);
6133 Args.push_back(ElemPtr);
6134 ArgTys.push_back(ElemPtr->getType());
6135
6136 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6137 auto Call = RValue::get(
6138 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6139 EmitLifetimeEnd(TmpPtr);
6140 return Call;
6141 }
6142 llvm_unreachable("Unexpected enqueue_kernel signature");
6143 }
6144 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
6145 // parameter.
6146 case Builtin::BIget_kernel_work_group_size: {
6147 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6148 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6149 auto Info =
6150 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
6151 Value *Kernel =
6152 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6153 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6155 CGM.CreateRuntimeFunction(
6156 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
6157 false),
6158 "__get_kernel_work_group_size_impl"),
6159 {Kernel, Arg}));
6160 }
6161 case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
6162 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6163 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6164 auto Info =
6165 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
6166 Value *Kernel =
6167 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6168 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6170 CGM.CreateRuntimeFunction(
6171 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
6172 false),
6173 "__get_kernel_preferred_work_group_size_multiple_impl"),
6174 {Kernel, Arg}));
6175 }
6176 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
6177 case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
6178 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6179 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6180 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
6181 llvm::Value *NDRange = NDRangeL.getAddress().emitRawPointer(*this);
6182 auto Info =
6183 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
6184 Value *Kernel =
6185 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6186 Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6187 const char *Name =
6188 BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
6189 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
6190 : "__get_kernel_sub_group_count_for_ndrange_impl";
6192 CGM.CreateRuntimeFunction(
6193 llvm::FunctionType::get(
6194 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
6195 false),
6196 Name),
6197 {NDRange, Kernel, Block}));
6198 }
6199 case Builtin::BI__builtin_store_half:
6200 case Builtin::BI__builtin_store_halff: {
6201 Value *Val = EmitScalarExpr(E->getArg(0));
6203 Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
6204 Builder.CreateStore(HalfVal, Address);
6205 return RValue::get(nullptr);
6206 }
6207 case Builtin::BI__builtin_load_half: {
6209 Value *HalfVal = Builder.CreateLoad(Address);
6210 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
6211 }
6212 case Builtin::BI__builtin_load_halff: {
6214 Value *HalfVal = Builder.CreateLoad(Address);
6215 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
6216 }
6217 case Builtin::BI__builtin_printf:
6218 case Builtin::BIprintf:
6219 if (getTarget().getTriple().isNVPTX() ||
6220 getTarget().getTriple().isAMDGCN() ||
6221 (getTarget().getTriple().isSPIRV() &&
6222 getTarget().getTriple().getVendor() == Triple::VendorType::AMD)) {
6223 if (getTarget().getTriple().isNVPTX())
6225 if ((getTarget().getTriple().isAMDGCN() ||
6226 getTarget().getTriple().isSPIRV()) &&
6227 getLangOpts().HIP)
6229 }
6230
6231 break;
6232 case Builtin::BI__builtin_canonicalize:
6233 case Builtin::BI__builtin_canonicalizef:
6234 case Builtin::BI__builtin_canonicalizef16:
6235 case Builtin::BI__builtin_canonicalizel:
6236 return RValue::get(
6237 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::canonicalize));
6238
6239 case Builtin::BI__builtin_thread_pointer: {
6240 if (!getContext().getTargetInfo().isTLSSupported())
6241 CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
6242
6243 return RValue::get(Builder.CreateIntrinsic(llvm::Intrinsic::thread_pointer,
6244 {GlobalsInt8PtrTy}, {}));
6245 }
6246 case Builtin::BI__builtin_os_log_format:
6247 return emitBuiltinOSLogFormat(*E);
6248
6249 case Builtin::BI__xray_customevent: {
6251 return RValue::getIgnored();
6252
6253 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
6255 return RValue::getIgnored();
6256
6257 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
6258 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
6259 return RValue::getIgnored();
6260
6261 Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
6262 auto FTy = F->getFunctionType();
6263 auto Arg0 = E->getArg(0);
6264 auto Arg0Val = EmitScalarExpr(Arg0);
6265 auto Arg0Ty = Arg0->getType();
6266 auto PTy0 = FTy->getParamType(0);
6267 if (PTy0 != Arg0Val->getType()) {
6268 if (Arg0Ty->isArrayType())
6269 Arg0Val = EmitArrayToPointerDecay(Arg0).emitRawPointer(*this);
6270 else
6271 Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
6272 }
6273 auto Arg1 = EmitScalarExpr(E->getArg(1));
6274 auto PTy1 = FTy->getParamType(1);
6275 if (PTy1 != Arg1->getType())
6276 Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
6277 return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
6278 }
6279
6280 case Builtin::BI__xray_typedevent: {
6281 // TODO: There should be a way to always emit events even if the current
6282 // function is not instrumented. Losing events in a stream can cripple
6283 // a trace.
6285 return RValue::getIgnored();
6286
6287 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
6289 return RValue::getIgnored();
6290
6291 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
6292 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
6293 return RValue::getIgnored();
6294
6295 Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
6296 auto FTy = F->getFunctionType();
6297 auto Arg0 = EmitScalarExpr(E->getArg(0));
6298 auto PTy0 = FTy->getParamType(0);
6299 if (PTy0 != Arg0->getType())
6300 Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
6301 auto Arg1 = E->getArg(1);
6302 auto Arg1Val = EmitScalarExpr(Arg1);
6303 auto Arg1Ty = Arg1->getType();
6304 auto PTy1 = FTy->getParamType(1);
6305 if (PTy1 != Arg1Val->getType()) {
6306 if (Arg1Ty->isArrayType())
6307 Arg1Val = EmitArrayToPointerDecay(Arg1).emitRawPointer(*this);
6308 else
6309 Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
6310 }
6311 auto Arg2 = EmitScalarExpr(E->getArg(2));
6312 auto PTy2 = FTy->getParamType(2);
6313 if (PTy2 != Arg2->getType())
6314 Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
6315 return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
6316 }
6317
6318 case Builtin::BI__builtin_ms_va_start:
6319 case Builtin::BI__builtin_ms_va_end:
6320 return RValue::get(
6322 BuiltinID == Builtin::BI__builtin_ms_va_start));
6323
6324 case Builtin::BI__builtin_ms_va_copy: {
6325 // Lower this manually. We can't reliably determine whether or not any
6326 // given va_copy() is for a Win64 va_list from the calling convention
6327 // alone, because it's legal to do this from a System V ABI function.
6328 // With opaque pointer types, we won't have enough information in LLVM
6329 // IR to determine this from the argument types, either. Best to do it
6330 // now, while we have enough information.
6331 Address DestAddr = EmitMSVAListRef(E->getArg(0));
6332 Address SrcAddr = EmitMSVAListRef(E->getArg(1));
6333
6334 DestAddr = DestAddr.withElementType(Int8PtrTy);
6335 SrcAddr = SrcAddr.withElementType(Int8PtrTy);
6336
6337 Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
6338 return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
6339 }
6340
6341 case Builtin::BI__builtin_get_device_side_mangled_name: {
6342 auto Name = CGM.getCUDARuntime().getDeviceSideName(
6343 cast<DeclRefExpr>(E->getArg(0)->IgnoreImpCasts())->getDecl());
6344 auto Str = CGM.GetAddrOfConstantCString(Name, "");
6345 return RValue::get(Str.getPointer());
6346 }
6347 }
6348
6349 // If this is an alias for a lib function (e.g. __builtin_sin), emit
6350 // the call using the normal call path, but using the unmangled
6351 // version of the function name.
6352 const auto &BI = getContext().BuiltinInfo;
6353 if (!shouldEmitBuiltinAsIR(BuiltinID, BI, *this) &&
6354 BI.isLibFunction(BuiltinID))
6355 return emitLibraryCall(*this, FD, E,
6356 CGM.getBuiltinLibFunction(FD, BuiltinID));
6357
6358 // If this is a predefined lib function (e.g. malloc), emit the call
6359 // using exactly the normal call path.
6360 if (BI.isPredefinedLibFunction(BuiltinID))
6361 return emitLibraryCall(*this, FD, E, CGM.getRawFunctionPointer(FD));
6362
6363 // Check that a call to a target specific builtin has the correct target
6364 // features.
6365 // This is down here to avoid non-target specific builtins, however, if
6366 // generic builtins start to require generic target features then we
6367 // can move this up to the beginning of the function.
6368 checkTargetFeatures(E, FD);
6369
6370 if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
6371 LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
6372
6373 // See if we have a target specific intrinsic.
6374 std::string Name = getContext().BuiltinInfo.getName(BuiltinID);
6375 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
6376 StringRef Prefix =
6377 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
6378 if (!Prefix.empty()) {
6379 IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin(Prefix.data(), Name);
6380 if (IntrinsicID == Intrinsic::not_intrinsic && Prefix == "spv" &&
6381 getTarget().getTriple().getOS() == llvm::Triple::OSType::AMDHSA)
6382 IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin("amdgcn", Name);
6383 // NOTE we don't need to perform a compatibility flag check here since the
6384 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
6385 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
6386 if (IntrinsicID == Intrinsic::not_intrinsic)
6387 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
6388 }
6389
6390 if (IntrinsicID != Intrinsic::not_intrinsic) {
6392
6393 // Find out if any arguments are required to be integer constant
6394 // expressions.
6395 unsigned ICEArguments = 0;
6397 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
6398 assert(Error == ASTContext::GE_None && "Should not codegen an error");
6399
6400 Function *F = CGM.getIntrinsic(IntrinsicID);
6401 llvm::FunctionType *FTy = F->getFunctionType();
6402
6403 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
6404 Value *ArgValue = EmitScalarOrConstFoldImmArg(ICEArguments, i, E);
6405 // If the intrinsic arg type is different from the builtin arg type
6406 // we need to do a bit cast.
6407 llvm::Type *PTy = FTy->getParamType(i);
6408 if (PTy != ArgValue->getType()) {
6409 // XXX - vector of pointers?
6410 if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
6411 if (PtrTy->getAddressSpace() !=
6412 ArgValue->getType()->getPointerAddressSpace()) {
6413 ArgValue = Builder.CreateAddrSpaceCast(
6414 ArgValue, llvm::PointerType::get(getLLVMContext(),
6415 PtrTy->getAddressSpace()));
6416 }
6417 }
6418
6419 // Cast vector type (e.g., v256i32) to x86_amx, this only happen
6420 // in amx intrinsics.
6421 if (PTy->isX86_AMXTy())
6422 ArgValue = Builder.CreateIntrinsic(Intrinsic::x86_cast_vector_to_tile,
6423 {ArgValue->getType()}, {ArgValue});
6424 else
6425 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
6426 }
6427
6428 Args.push_back(ArgValue);
6429 }
6430
6431 Value *V = Builder.CreateCall(F, Args);
6432 QualType BuiltinRetType = E->getType();
6433
6434 llvm::Type *RetTy = VoidTy;
6435 if (!BuiltinRetType->isVoidType())
6436 RetTy = ConvertType(BuiltinRetType);
6437
6438 if (RetTy != V->getType()) {
6439 // XXX - vector of pointers?
6440 if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
6441 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
6442 V = Builder.CreateAddrSpaceCast(
6443 V, llvm::PointerType::get(getLLVMContext(),
6444 PtrTy->getAddressSpace()));
6445 }
6446 }
6447
6448 // Cast x86_amx to vector type (e.g., v256i32), this only happen
6449 // in amx intrinsics.
6450 if (V->getType()->isX86_AMXTy())
6451 V = Builder.CreateIntrinsic(Intrinsic::x86_cast_tile_to_vector, {RetTy},
6452 {V});
6453 else
6454 V = Builder.CreateBitCast(V, RetTy);
6455 }
6456
6457 if (RetTy->isVoidTy())
6458 return RValue::get(nullptr);
6459
6460 return RValue::get(V);
6461 }
6462
6463 // Some target-specific builtins can have aggregate return values, e.g.
6464 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
6465 // ReturnValue to be non-null, so that the target-specific emission code can
6466 // always just emit into it.
6468 if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) {
6469 Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp");
6470 ReturnValue = ReturnValueSlot(DestPtr, false);
6471 }
6472
6473 // Now see if we can emit a target-specific builtin.
6474 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
6475 switch (EvalKind) {
6476 case TEK_Scalar:
6477 if (V->getType()->isVoidTy())
6478 return RValue::get(nullptr);
6479 return RValue::get(V);
6480 case TEK_Aggregate:
6481 return RValue::getAggregate(ReturnValue.getAddress(),
6482 ReturnValue.isVolatile());
6483 case TEK_Complex:
6484 llvm_unreachable("No current target builtin returns complex");
6485 }
6486 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
6487 }
6488
6489 // EmitHLSLBuiltinExpr will check getLangOpts().HLSL
6490 if (Value *V = EmitHLSLBuiltinExpr(BuiltinID, E, ReturnValue)) {
6491 switch (EvalKind) {
6492 case TEK_Scalar:
6493 if (V->getType()->isVoidTy())
6494 return RValue::get(nullptr);
6495 return RValue::get(V);
6496 case TEK_Aggregate:
6497 return RValue::getAggregate(ReturnValue.getAddress(),
6498 ReturnValue.isVolatile());
6499 case TEK_Complex:
6500 llvm_unreachable("No current hlsl builtin returns complex");
6501 }
6502 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
6503 }
6504
6505 if (getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice)
6506 return EmitHipStdParUnsupportedBuiltin(this, FD);
6507
6508 ErrorUnsupported(E, "builtin function");
6509
6510 // Unknown builtin, for now just dump it out and return undef.
6511 return GetUndefRValue(E->getType());
6512}
6513
6514namespace {
6515struct BuiltinAlignArgs {
6516 llvm::Value *Src = nullptr;
6517 llvm::Type *SrcType = nullptr;
6518 llvm::Value *Alignment = nullptr;
6519 llvm::Value *Mask = nullptr;
6520 llvm::IntegerType *IntType = nullptr;
6521
6522 BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
6523 QualType AstType = E->getArg(0)->getType();
6524 if (AstType->isArrayType())
6525 Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(CGF);
6526 else
6527 Src = CGF.EmitScalarExpr(E->getArg(0));
6528 SrcType = Src->getType();
6529 if (SrcType->isPointerTy()) {
6530 IntType = IntegerType::get(
6531 CGF.getLLVMContext(),
6532 CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType));
6533 } else {
6534 assert(SrcType->isIntegerTy());
6535 IntType = cast<llvm::IntegerType>(SrcType);
6536 }
6537 Alignment = CGF.EmitScalarExpr(E->getArg(1));
6538 Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment");
6539 auto *One = llvm::ConstantInt::get(IntType, 1);
6540 Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
6541 }
6542};
6543} // namespace
6544
6545/// Generate (x & (y-1)) == 0.
6547 BuiltinAlignArgs Args(E, *this);
6548 llvm::Value *SrcAddress = Args.Src;
6549 if (Args.SrcType->isPointerTy())
6550 SrcAddress =
6551 Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr");
6552 return RValue::get(Builder.CreateICmpEQ(
6553 Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"),
6554 llvm::Constant::getNullValue(Args.IntType), "is_aligned"));
6555}
6556
6557/// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
6558/// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
6559/// llvm.ptrmask intrinsic (with a GEP before in the align_up case).
6561 BuiltinAlignArgs Args(E, *this);
6562 llvm::Value *SrcForMask = Args.Src;
6563 if (AlignUp) {
6564 // When aligning up we have to first add the mask to ensure we go over the
6565 // next alignment value and then align down to the next valid multiple.
6566 // By adding the mask, we ensure that align_up on an already aligned
6567 // value will not change the value.
6568 if (Args.Src->getType()->isPointerTy()) {
6569 if (getLangOpts().PointerOverflowDefined)
6570 SrcForMask =
6571 Builder.CreateGEP(Int8Ty, SrcForMask, Args.Mask, "over_boundary");
6572 else
6573 SrcForMask = EmitCheckedInBoundsGEP(Int8Ty, SrcForMask, Args.Mask,
6574 /*SignedIndices=*/true,
6575 /*isSubtraction=*/false,
6576 E->getExprLoc(), "over_boundary");
6577 } else {
6578 SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
6579 }
6580 }
6581 // Invert the mask to only clear the lower bits.
6582 llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
6583 llvm::Value *Result = nullptr;
6584 if (Args.Src->getType()->isPointerTy()) {
6585 Result = Builder.CreateIntrinsic(
6586 Intrinsic::ptrmask, {Args.SrcType, Args.IntType},
6587 {SrcForMask, InvertedMask}, nullptr, "aligned_result");
6588 } else {
6589 Result = Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
6590 }
6591 assert(Result->getType() == Args.SrcType);
6592 return RValue::get(Result);
6593}
#define V(N, I)
static char bitActionToX86BTCode(BitTest::ActionKind A)
static Value * EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering SuccessOrdering)
static void emitSincosBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static CanQualType getOSLogArgType(ASTContext &C, int Size)
Get the argument type for arguments to os_log_helper.
static Value * EmitOverflowCheckedAbs(CodeGenFunction &CGF, const CallExpr *E, bool SanitizeOverflow)
static llvm::Value * EmitBitCountExpr(CodeGenFunction &CGF, const Expr *E)
static Value * tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID, Value *V)
static bool areBOSTypesCompatible(int From, int To)
Checks if using the result of __builtin_object_size(p, From) in place of __builtin_object_size(p,...
static std::pair< llvm::Value *, llvm::Value * > GetCountFieldAndIndex(CodeGenFunction &CGF, const MemberExpr *ME, const FieldDecl *ArrayFD, const FieldDecl *CountFD, const Expr *Idx, llvm::IntegerType *ResType, bool IsSigned)
Value * EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, QualType T, llvm::Type *ResultType)
static bool TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty, llvm::SmallPtrSetImpl< const Decl * > &Seen)
static Value * EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)
static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind, const CallExpr *E)
MSVC handles setjmp a bit differently on different platforms.
#define MUTATE_LDBL(func)
static Value * emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty)
Determine if the specified type requires laundering by checking if it is a dynamic class type or cont...
static Value * EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E)
static struct WidthAndSignedness EncompassingIntegerType(ArrayRef< struct WidthAndSignedness > Types)
static Value * EmitTargetArchBuiltinExpr(CodeGenFunction *CGF, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
Definition CGBuiltin.cpp:72
static Value * emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, Instruction::BinaryOps Op, bool Invert=false)
Utility to insert an atomic instruction based Intrinsic::ID and the expression node,...
static bool HasNoIndirectArgumentsOrResults(CGFunctionInfo const &FnInfo)
Checks no arguments or results are passed indirectly in the ABI (i.e.
Value * EmitToInt(CodeGenFunction &CGF, llvm::Value *V, QualType T, llvm::IntegerType *IntType)
Emit the conversions required to turn the given value into an integer of the given size.
static Value * emitBinaryExpMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID, Intrinsic::ID ConstrainedIntrinsicID)
static llvm::Value * EmitBitTestIntrinsic(CodeGenFunction &CGF, unsigned BuiltinID, const CallExpr *E)
Emit a _bittest* intrinsic.
static Value * EmitSignBit(CodeGenFunction &CGF, Value *V)
Emit the computation of the sign bit for a floating point value.
static Value * EmitFAbs(CodeGenFunction &CGF, Value *V)
EmitFAbs - Emit a call to @llvm.fabs().
static llvm::Value * EmitPositiveResultOrZero(CodeGenFunction &CGF, llvm::Value *Res, llvm::Value *Index, llvm::IntegerType *ResType, bool IsSigned)
static bool shouldEmitBuiltinAsIR(unsigned BuiltinID, const Builtin::Context &BI, const CodeGenFunction &CGF)
Some builtins do not have library implementation on some targets and are instead emitted as LLVM IRs ...
Definition CGBuiltin.cpp:48
static bool isSpecialUnsignedMultiplySignedResult(unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, WidthAndSignedness ResultInfo)
static llvm::Value * getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType)
static Value * emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static WidthAndSignedness getIntegerWidthAndSignedness(const clang::ASTContext &context, const clang::QualType Type)
static RValue EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, const clang::Expr *Op2, WidthAndSignedness Op2Info, const clang::Expr *ResultArg, QualType ResultQTy, WidthAndSignedness ResultInfo)
Emit a checked mixed-sign multiply.
static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID)
Value * MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E, bool ReturnBool)
Utility to insert an atomic cmpxchg instruction.
static RValue EmitBinaryAtomic(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E)
static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size, Align AlignmentInBytes)
static Value * EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering SuccessOrdering=AtomicOrdering::SequentiallyConsistent)
This function should be invoked to emit atomic cmpxchg for Microsoft's _InterlockedCompareExchange* i...
static bool isSpecialMixedSignMultiply(unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, WidthAndSignedness ResultInfo)
Determine if a binop is a checked mixed-sign multiply we can specialize.
static Value * emitFrexpBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static llvm::Value * emitModfBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static Value * EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E)
static const FieldDecl * FindFlexibleArrayMemberField(CodeGenFunction &CGF, ASTContext &Ctx, const RecordDecl *RD)
Find a struct's flexible array member.
static Value * EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E)
static RValue EmitHipStdParUnsupportedBuiltin(CodeGenFunction *CGF, const FunctionDecl *FD)
static llvm::Value * EmitX86BitTestIntrinsic(CodeGenFunction &CGF, BitTest BT, const CallExpr *E, Value *BitBase, Value *BitPos)
static RValue EmitCheckedUnsignedMultiplySignedResult(CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, const clang::Expr *Op2, WidthAndSignedness Op2Info, const clang::Expr *ResultArg, QualType ResultQTy, WidthAndSignedness ResultInfo)
Address CheckAtomicAlignment(CodeGenFunction &CGF, const CallExpr *E)
static Value * EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E)
static llvm::AtomicOrdering getBitTestAtomicOrdering(BitTest::InterlockingKind I)
static bool GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FD, int64_t &Offset)
Calculate the offset of a struct field.
Value * MakeBinaryAtomicValue(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, AtomicOrdering Ordering)
Utility to insert an atomic instruction based on Intrinsic::ID and the expression node.
llvm::Value * EmitOverflowIntrinsic(CodeGenFunction &CGF, const Intrinsic::ID IntrinsicID, llvm::Value *X, llvm::Value *Y, llvm::Value *&Carry)
Emit a call to llvm.
static Value * EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW)
static Value * EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)
llvm::Value * emitBuiltinWithOneOverloadedType(clang::CodeGen::CodeGenFunction &CGF, const clang::CallExpr *E, unsigned IntrinsicID, llvm::StringRef Name="")
Definition CGBuiltin.h:63
static RValue emitUnaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitLibraryCall(CIRGenFunction &cgf, const FunctionDecl *fd, const CallExpr *e, mlir::Operation *calleeValue)
TokenType getType() const
Returns the token's type, e.g.
FormatToken * Next
The next token in the unwrapped line.
#define X(type, name)
Definition Value.h:97
static unsigned getCharWidth(tok::TokenKind kind, const TargetInfo &Target)
llvm::MachO::Record Record
Definition MachO.h:31
SanitizerHandler
static QualType getPointeeType(const MemRegion *R)
__DEVICE__ float modf(float __x, float *__iptr)
__DEVICE__ double nan(const char *)
APSInt & getInt()
Definition APValue.h:489
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType VoidPtrTy
IdentifierTable & Idents
Definition ASTContext.h:784
Builtin::Context & BuiltinInfo
Definition ASTContext.h:786
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, const Expr *SizeExpr, ArraySizeModifier ASM, unsigned IndexTypeQuals) const
Return the unique reference to the type for a constant array of the specified element type.
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CanQualType VoidTy
QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error, unsigned *IntegerConstantArgs=nullptr) const
Return the type for the specified builtin.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:903
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
@ GE_None
No error.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
QualType getElementType() const
Definition TypeBase.h:3734
static std::unique_ptr< AtomicScopeModel > create(AtomicScopeModelKind K)
Create an atomic scope model by AtomicScopeModelKind.
Definition SyncScope.h:298
static bool isCommaOp(Opcode Opc)
Definition Expr.h:4075
Expr * getRHS() const
Definition Expr.h:4024
Holds information about both target-independent and target-specific builtins, allowing easy queries b...
Definition Builtins.h:229
bool shouldGenerateFPMathIntrinsic(unsigned BuiltinID, llvm::Triple Trip, std::optional< bool > ErrnoOverwritten, bool MathErrnoEnabled, bool HasOptNoneAttr, bool IsOptimizationEnabled) const
Determine whether we can generate LLVM intrinsics for the given builtin ID, based on whether it has s...
Definition Builtins.cpp:225
bool isConstWithoutErrnoAndExceptions(unsigned ID) const
Return true if this function has no side effects and doesn't read memory, except for possibly errno o...
Definition Builtins.h:401
std::string getName(unsigned ID) const
Return the identifier name for the specified builtin, e.g.
Definition Builtins.cpp:80
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3081
bool hasStoredFPFeatures() const
Definition Expr.h:3036
SourceLocation getBeginLoc() const
Definition Expr.h:3211
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3060
Expr * getCallee()
Definition Expr.h:3024
FPOptionsOverride getFPFeatures() const
Definition Expr.h:3176
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3068
arg_range arguments()
Definition Expr.h:3129
CastKind getCastKind() const
Definition Expr.h:3654
Expr * getSubExpr()
Definition Expr.h:3660
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
Address withAlignment(CharUnits NewAlignment) const
Return address with different alignment, but same pointer and element type.
Definition Address.h:269
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition CGBuilder.h:140
llvm::StoreInst * CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr, CharUnits Align, bool IsVolatile=false)
Definition CGBuilder.h:147
llvm::AtomicRMWInst * CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Ordering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition CGBuilder.h:184
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition CGBuilder.h:402
llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(Address Addr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition CGBuilder.h:173
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:112
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition CGBuilder.h:132
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition CGBuilder.h:350
All available information about a concrete callee.
Definition CGCall.h:63
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition CGCall.h:137
llvm::DILocation * CreateTrapFailureMessageFor(llvm::DebugLoc TrapLocation, StringRef Category, StringRef FailureMsg)
Create a debug location from TrapLocation that adds an artificial inline frame where the frame name i...
CGFunctionInfo - Class to encapsulate the information about a function definition.
MutableArrayRef< ArgInfo > arguments()
virtual llvm::Value * getPipeElemAlign(const Expr *PipeArg)
virtual llvm::Value * getPipeElemSize(const Expr *PipeArg)
llvm::StructType * getLLVMType() const
Return the "complete object" LLVM type associated with this record.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition CGCall.h:274
void add(RValue rvalue, QualType type)
Definition CGCall.h:302
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E)
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Definition CGClass.cpp:2845
RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E)
RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID)
llvm::Value * EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, const CallExpr *E)
Definition AMDGPU.cpp:346
SanitizerSet SanOpts
Sanitizers enabled for this function.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
llvm::Value * GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Definition CGExpr.cpp:1187
llvm::Type * ConvertType(QualType T)
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
BuiltinCheckKind
Specifies which type of sanitizer check to apply when handling a particular builtin.
llvm::Value * EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition SystemZ.cpp:39
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
Definition CGCall.cpp:5079
llvm::Value * EmitSEHAbnormalTermination()
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
Produce the code to do a retain.
Definition CGObjC.cpp:2328
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
llvm::Value * EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart)
Emits a call to an LLVM variable-argument intrinsic, either llvm.va_start or llvm....
llvm::Value * EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition AMDGPU.cpp:420
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3689
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:6711
RValue emitBuiltinOSLogFormat(const CallExpr &E)
Emit IR for __builtin_os_log_format.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
llvm::Function * generateBuiltinOSLogHelperFunction(const analyze_os_log::OSLogBufferLayout &Layout, CharUnits BufferAlignment)
const LangOptions & getLangOpts() const
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
llvm::Value * EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
Definition RISCV.cpp:1073
llvm::Value * EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind)
Emits an argument for a call to a builtin.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3579
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
Definition CGCall.cpp:4578
const TargetInfo & getTarget() const
RValue emitRotate(const CallExpr *E, bool IsRotateRight)
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location, const AnnotateAttr *Attr)
Emit an annotation call (intrinsic).
llvm::Value * EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
Definition ARM.cpp:2710
CGCallee EmitCallee(const Expr *E)
Definition CGExpr.cpp:6112
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression, potentially with an active ...
llvm::Value * EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition ARM.cpp:7825
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:225
llvm::Value * EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition PPC.cpp:73
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:3829
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
Definition CGDecl.cpp:1356
llvm::MDNode * buildAllocToken(QualType AllocType)
Build metadata used by the AllocToken instrumentation.
Definition CGExpr.cpp:1277
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2190
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition CGCall.cpp:5235
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp)
Emit IR for __builtin_align_up/__builtin_align_down.
void EmitLifetimeEnd(llvm::Value *Addr)
Definition CGDecl.cpp:1368
llvm::Value * EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
llvm::Value * EmitDirectXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition DirectX.cpp:22
llvm::Value * EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm::Triple::ArchType Arch)
Definition ARM.cpp:5020
llvm::Value * EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4269
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
Definition CGDecl.cpp:2331
llvm::Value * EmitSPIRVBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition SPIR.cpp:22
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
Address EmitVAListRef(const Expr *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
Definition CGExpr.cpp:1569
RValue EmitBuiltinIsAligned(const CallExpr *E)
Emit IR for __builtin_is_aligned.
RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, const CallExpr *TheCallExpr, bool IsDelete)
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition Hexagon.cpp:77
CodeGenTypes & getTypes() const
llvm::Value * EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition X86.cpp:737
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1552
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:188
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
Definition CGExpr.cpp:4254
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4181
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2220
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
Definition CGExpr.cpp:1230
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
llvm::Value * EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition NVPTX.cpp:420
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
Definition CGExpr.cpp:4169
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1668
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
EmitTargetBuiltinExpr - Emit the given builtin call.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
llvm::Value * EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Given a number of pointers, inform the optimizer that they're being intrinsically used up until this ...
Definition CGObjC.cpp:2167
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:652
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
llvm::Constant * getBuiltinLibFunction(const FunctionDecl *FD, unsigned BuiltinID)
Given a builtin id for a function like "__builtin_fabsf", return a Function* for "fabsf".
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
const llvm::Triple & getTriple() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition CGCall.cpp:1702
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
LValue - This represents an lvalue references.
Definition CGValue.h:182
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition CGValue.h:361
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
static RValue getIgnored()
Definition CGValue.h:93
static RValue get(llvm::Value *V)
Definition CGValue.h:98
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition CGValue.h:125
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition CGValue.h:108
An abstract representation of an aligned address.
Definition Address.h:42
static RawAddress invalid()
Definition Address.h:61
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition CGCall.h:379
virtual bool supportsLibCall() const
supportsLibCall - Query to whether or not target supports all lib calls.
Definition TargetInfo.h:78
virtual llvm::Value * encodeReturnAddress(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Performs the code-generation required to convert the address of an instruction into a return address ...
Definition TargetInfo.h:176
virtual llvm::Value * decodeReturnAddress(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Performs the code-generation required to convert a return address as stored by the system into the ac...
Definition TargetInfo.h:166
virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const
Determines the DWARF register number for the stack pointer, for exception-handling purposes.
Definition TargetInfo.h:148
virtual llvm::Value * testFPKind(llvm::Value *V, unsigned BuiltinID, CGBuilderTy &Builder, CodeGenModule &CGM) const
Performs a target specific test of a floating point value for things like IsNaN, Infinity,...
Definition TargetInfo.h:185
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3275
Represents a concrete matrix type with constant number of rows and columns.
Definition TypeBase.h:4373
Represents a sugar type with __counted_by or __sized_by annotations, including their _or_null variant...
Definition TypeBase.h:3436
DynamicCountPointerKind getKind() const
Definition TypeBase.h:3466
static bool isFlexibleArrayMemberLike(const ASTContext &Context, const Decl *D, QualType Ty, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, bool IgnoreTemplateOrMacroSubstitution)
Whether it resembles a flexible array member.
Definition DeclBase.cpp:459
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition DeclBase.h:593
FunctionDecl * getAsFunction() LLVM_READONLY
Returns the function itself, or the templated function if this is a function template.
Definition DeclBase.cpp:273
bool hasAttr() const
Definition DeclBase.h:577
Concrete class used by the front-end to report problems and issues.
Definition Diagnostic.h:232
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3115
Expr * IgnoreParenCasts() LLVM_READONLY
Skip past any parentheses and casts which might surround this expression until reaching a fixed point...
Definition Expr.cpp:3093
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3088
bool EvaluateAsFloat(llvm::APFloat &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsFloat - Return true if this is a constant which we can fold and convert to a floating point...
bool isPRValue() const
Definition Expr.h:285
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
Definition Expr.h:835
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3668
std::optional< std::string > tryEvaluateString(ASTContext &Ctx) const
If the current Expr can be evaluated to a pointer to a null-terminated constant string,...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3068
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Definition Expr.cpp:4045
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
bool tryEvaluateObjectSize(uint64_t &Result, ASTContext &Ctx, unsigned Type) const
If the current Expr is a pointer, this will try to statically determine the number of bytes available...
const ValueDecl * getAsBuiltinConstantDeclRef(const ASTContext &Context) const
If this expression is an unambiguous reference to a single declaration, in the style of __builtin_fun...
Definition Expr.cpp:225
Represents difference between two FPOptions values.
LangOptions::FPExceptionModeKind getExceptionMode() const
Represents a member of a struct/union/class.
Definition Decl.h:3160
const FieldDecl * findCountedByField() const
Find the FieldDecl specified in a FAM's "counted_by" attribute.
Definition Decl.cpp:4844
Represents a function declaration or definition.
Definition Decl.h:2000
const ParmVarDecl * getParamDecl(unsigned i) const
Definition Decl.h:2797
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition Decl.cpp:3751
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5254
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
IdentifierInfo & get(StringRef Name)
Return the identifier token info for the specified named identifier.
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition Decl.cpp:5522
@ FPE_Ignore
Assume that floating-point exceptions are masked.
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3298
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3381
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
std::string getNameAsString() const
Get a human-readable name for the declaration, even if it is one of the special kinds of names (C++ c...
Definition Decl.h:317
const Expr * getSubExpr() const
Definition Expr.h:2199
PipeType - OpenCL20.
Definition TypeBase.h:8096
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8362
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8404
Represents a struct/union/class.
Definition Decl.h:4312
field_range fields() const
Definition Decl.h:4515
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
Encodes a location in the source.
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:350
bool isUnion() const
Definition Decl.h:3922
Exposes information about the current target.
Definition TargetInfo.h:226
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool isBigEndian() const
virtual bool checkArithmeticFenceSupported() const
Controls if __arithmetic_fence is supported in the targeted backend.
unsigned getSuitableAlign() const
Return the alignment that is the largest alignment ever used for any scalar/SIMD data type on the tar...
Definition TargetInfo.h:745
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
The base class of the type hierarchy.
Definition TypeBase.h:1833
bool isBlockPointerType() const
Definition TypeBase.h:8535
bool isVoidType() const
Definition TypeBase.h:8871
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2205
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isArrayType() const
Definition TypeBase.h:8614
bool isCountAttributedType() const
Definition Type.cpp:741
bool isPointerType() const
Definition TypeBase.h:8515
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8915
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9158
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1909
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9091
Expr * getSubExpr() const
Definition Expr.h:2285
QualType getType() const
Definition Decl.h:723
QualType getType() const
Definition Value.cpp:237
Represents a GCC generic vector type.
Definition TypeBase.h:4175
QualType getElementType() const
Definition TypeBase.h:4189
SmallVector< OSLogBufferItem, 4 > Items
Definition OSLog.h:113
unsigned char getNumArgsByte() const
Definition OSLog.h:148
unsigned char getSummaryByte() const
Definition OSLog.h:139
Defines the clang::TargetInfo interface.
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
Definition CGValue.h:154
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:145
llvm::Constant * initializationPatternFor(CodeGenModule &, llvm::Type *)
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
constexpr XRayInstrMask Typed
Definition XRayInstr.h:42
constexpr XRayInstrMask Custom
Definition XRayInstr.h:41
bool computeOSLogBufferLayout(clang::ASTContext &Ctx, const clang::CallExpr *E, OSLogBufferLayout &layout)
Definition OSLog.cpp:192
bool Mul(InterpState &S, CodePtr OpPC)
Definition Interp.h:350
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ Success
Annotation was successful.
Definition Parser.h:65
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Expr * Cond
};
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
const FunctionProtoType * T
LangAS
Defines the address space values used by the address space qualifier of QualType.
SyncScope
Defines sync scope values used internally by clang.
Definition SyncScope.h:42
llvm::StringRef getAsString(SyncScope S)
Definition SyncScope.h:62
U cast(CodeGen::Address addr)
Definition Address.h:327
@ Other
Other implicit parameter.
Definition Decl.h:1746
unsigned long uint64_t
long int64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
void clear(SanitizerMask K=SanitizerKind::All)
Disable the sanitizers specified in K.
Definition Sanitizers.h:195
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition Sanitizers.h:187
#define sinh(__x)
Definition tgmath.h:373
#define asin(__x)
Definition tgmath.h:112
#define scalbln(__x, __y)
Definition tgmath.h:1182
#define sqrt(__x)
Definition tgmath.h:520
#define acos(__x)
Definition tgmath.h:83
#define fmin(__x, __y)
Definition tgmath.h:780
#define exp(__x)
Definition tgmath.h:431
#define ilogb(__x)
Definition tgmath.h:851
#define copysign(__x, __y)
Definition tgmath.h:618
#define erf(__x)
Definition tgmath.h:636
#define atanh(__x)
Definition tgmath.h:228
#define remquo(__x, __y, __z)
Definition tgmath.h:1111
#define nextafter(__x, __y)
Definition tgmath.h:1055
#define frexp(__x, __y)
Definition tgmath.h:816
#define asinh(__x)
Definition tgmath.h:199
#define erfc(__x)
Definition tgmath.h:653
#define atan2(__x, __y)
Definition tgmath.h:566
#define nexttoward(__x, __y)
Definition tgmath.h:1073
#define hypot(__x, __y)
Definition tgmath.h:833
#define exp2(__x)
Definition tgmath.h:670
#define sin(__x)
Definition tgmath.h:286
#define cbrt(__x)
Definition tgmath.h:584
#define log2(__x)
Definition tgmath.h:970
#define llround(__x)
Definition tgmath.h:919
#define cosh(__x)
Definition tgmath.h:344
#define trunc(__x)
Definition tgmath.h:1216
#define fmax(__x, __y)
Definition tgmath.h:762
#define ldexp(__x, __y)
Definition tgmath.h:868
#define acosh(__x)
Definition tgmath.h:170
#define tgamma(__x)
Definition tgmath.h:1199
#define scalbn(__x, __y)
Definition tgmath.h:1165
#define round(__x)
Definition tgmath.h:1148
#define fmod(__x, __y)
Definition tgmath.h:798
#define llrint(__x)
Definition tgmath.h:902
#define tan(__x)
Definition tgmath.h:315
#define cos(__x)
Definition tgmath.h:257
#define log10(__x)
Definition tgmath.h:936
#define fabs(__x)
Definition tgmath.h:549
#define pow(__x, __y)
Definition tgmath.h:490
#define log1p(__x)
Definition tgmath.h:953
#define rint(__x)
Definition tgmath.h:1131
#define expm1(__x)
Definition tgmath.h:687
#define remainder(__x, __y)
Definition tgmath.h:1090
#define fdim(__x, __y)
Definition tgmath.h:704
#define lgamma(__x)
Definition tgmath.h:885
#define tanh(__x)
Definition tgmath.h:402
#define lrint(__x)
Definition tgmath.h:1004
#define atan(__x)
Definition tgmath.h:141
#define floor(__x)
Definition tgmath.h:722
#define ceil(__x)
Definition tgmath.h:601
#define log(__x)
Definition tgmath.h:460
#define logb(__x)
Definition tgmath.h:987
#define nearbyint(__x)
Definition tgmath.h:1038
#define lround(__x)
Definition tgmath.h:1021
#define fma(__x, __y, __z)
Definition tgmath.h:742