clang 23.0.0git
CGBuiltin.cpp
Go to the documentation of this file.
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGBuiltin.h"
14#include "ABIInfo.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGDebugInfo.h"
18#include "CGObjCRuntime.h"
19#include "CGOpenCLRuntime.h"
20#include "CGRecordLayout.h"
21#include "CGValue.h"
22#include "CodeGenFunction.h"
23#include "CodeGenModule.h"
24#include "ConstantEmitter.h"
25#include "PatternInit.h"
26#include "TargetInfo.h"
27#include "clang/AST/OSLog.h"
31#include "llvm/IR/InlineAsm.h"
32#include "llvm/IR/Instruction.h"
33#include "llvm/IR/Intrinsics.h"
34#include "llvm/IR/IntrinsicsX86.h"
35#include "llvm/IR/MatrixBuilder.h"
36#include "llvm/Support/ConvertUTF.h"
37#include "llvm/Support/ScopedPrinter.h"
38#include <optional>
39#include <utility>
40
41using namespace clang;
42using namespace CodeGen;
43using namespace llvm;
44
45/// Some builtins do not have library implementation on some targets and
46/// are instead emitted as LLVM IRs by some target builtin emitters.
47/// FIXME: Remove this when library support is added
48static bool shouldEmitBuiltinAsIR(unsigned BuiltinID,
49 const Builtin::Context &BI,
50 const CodeGenFunction &CGF) {
51 if (!CGF.CGM.getLangOpts().MathErrno &&
55 switch (BuiltinID) {
56 default:
57 return false;
58 case Builtin::BIlogbf:
59 case Builtin::BI__builtin_logbf:
60 case Builtin::BIlogb:
61 case Builtin::BI__builtin_logb:
62 case Builtin::BIscalbnf:
63 case Builtin::BI__builtin_scalbnf:
64 case Builtin::BIscalbn:
65 case Builtin::BI__builtin_scalbn:
66 return true;
67 }
68 }
69 return false;
70}
71
73 unsigned BuiltinID, const CallExpr *E,
74 ReturnValueSlot ReturnValue,
75 llvm::Triple::ArchType Arch) {
76 // When compiling in HipStdPar mode we have to be conservative in rejecting
77 // target specific features in the FE, and defer the possible error to the
78 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
79 // referenced by an accelerator executable function, we emit an error.
80 // Returning nullptr here leads to the builtin being handled in
81 // EmitStdParUnsupportedBuiltin.
82 if (CGF->getLangOpts().HIPStdPar && CGF->getLangOpts().CUDAIsDevice &&
83 Arch != CGF->getTarget().getTriple().getArch())
84 return nullptr;
85
86 switch (Arch) {
87 case llvm::Triple::arm:
88 case llvm::Triple::armeb:
89 case llvm::Triple::thumb:
90 case llvm::Triple::thumbeb:
91 return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch);
92 case llvm::Triple::aarch64:
93 case llvm::Triple::aarch64_32:
94 case llvm::Triple::aarch64_be:
95 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
96 case llvm::Triple::bpfeb:
97 case llvm::Triple::bpfel:
98 return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
99 case llvm::Triple::dxil:
100 return CGF->EmitDirectXBuiltinExpr(BuiltinID, E);
101 case llvm::Triple::x86:
102 case llvm::Triple::x86_64:
103 return CGF->EmitX86BuiltinExpr(BuiltinID, E);
104 case llvm::Triple::ppc:
105 case llvm::Triple::ppcle:
106 case llvm::Triple::ppc64:
107 case llvm::Triple::ppc64le:
108 return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
109 case llvm::Triple::r600:
110 case llvm::Triple::amdgcn:
111 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
112 case llvm::Triple::systemz:
113 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
114 case llvm::Triple::nvptx:
115 case llvm::Triple::nvptx64:
116 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
117 case llvm::Triple::wasm32:
118 case llvm::Triple::wasm64:
119 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
120 case llvm::Triple::hexagon:
121 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
122 case llvm::Triple::riscv32:
123 case llvm::Triple::riscv64:
124 return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue);
125 case llvm::Triple::spirv32:
126 case llvm::Triple::spirv64:
127 if (CGF->getTarget().getTriple().getOS() == llvm::Triple::OSType::AMDHSA)
128 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
129 [[fallthrough]];
130 case llvm::Triple::spirv:
131 return CGF->EmitSPIRVBuiltinExpr(BuiltinID, E);
132 default:
133 return nullptr;
134 }
135}
136
138 const CallExpr *E,
140 if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
141 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
143 this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
144 ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
145 }
146
147 return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue,
148 getTarget().getTriple().getArch());
149}
150
151static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
152 Align AlignmentInBytes) {
153 ConstantInt *Byte;
154 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
156 // Nothing to initialize.
157 return;
159 Byte = CGF.Builder.getInt8(0x00);
160 break;
162 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
163 Byte = llvm::dyn_cast<llvm::ConstantInt>(
164 initializationPatternFor(CGF.CGM, Int8));
165 break;
166 }
167 }
168 if (CGF.CGM.stopAutoInit())
169 return;
170 auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
171 I->addAnnotationMetadata("auto-init");
172}
173
174/// getBuiltinLibFunction - Given a builtin id for a function like
175/// "__builtin_fabsf", return a Function* for "fabsf".
177 unsigned BuiltinID) {
178 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
179
180 // Get the name, skip over the __builtin_ prefix (if necessary). We may have
181 // to build this up so provide a small stack buffer to handle the vast
182 // majority of names.
184 GlobalDecl D(FD);
185
186 // TODO: This list should be expanded or refactored after all GCC-compatible
187 // std libcall builtins are implemented.
188 static SmallDenseMap<unsigned, StringRef, 64> F128Builtins{
189 {Builtin::BI__builtin___fprintf_chk, "__fprintf_chkieee128"},
190 {Builtin::BI__builtin___printf_chk, "__printf_chkieee128"},
191 {Builtin::BI__builtin___snprintf_chk, "__snprintf_chkieee128"},
192 {Builtin::BI__builtin___sprintf_chk, "__sprintf_chkieee128"},
193 {Builtin::BI__builtin___vfprintf_chk, "__vfprintf_chkieee128"},
194 {Builtin::BI__builtin___vprintf_chk, "__vprintf_chkieee128"},
195 {Builtin::BI__builtin___vsnprintf_chk, "__vsnprintf_chkieee128"},
196 {Builtin::BI__builtin___vsprintf_chk, "__vsprintf_chkieee128"},
197 {Builtin::BI__builtin_fprintf, "__fprintfieee128"},
198 {Builtin::BI__builtin_printf, "__printfieee128"},
199 {Builtin::BI__builtin_snprintf, "__snprintfieee128"},
200 {Builtin::BI__builtin_sprintf, "__sprintfieee128"},
201 {Builtin::BI__builtin_vfprintf, "__vfprintfieee128"},
202 {Builtin::BI__builtin_vprintf, "__vprintfieee128"},
203 {Builtin::BI__builtin_vsnprintf, "__vsnprintfieee128"},
204 {Builtin::BI__builtin_vsprintf, "__vsprintfieee128"},
205 {Builtin::BI__builtin_fscanf, "__fscanfieee128"},
206 {Builtin::BI__builtin_scanf, "__scanfieee128"},
207 {Builtin::BI__builtin_sscanf, "__sscanfieee128"},
208 {Builtin::BI__builtin_vfscanf, "__vfscanfieee128"},
209 {Builtin::BI__builtin_vscanf, "__vscanfieee128"},
210 {Builtin::BI__builtin_vsscanf, "__vsscanfieee128"},
211 {Builtin::BI__builtin_nexttowardf128, "__nexttowardieee128"},
212 };
213
214 // The AIX library functions frexpl, ldexpl, and modfl are for 128-bit
215 // IBM 'long double' (i.e. __ibm128). Map to the 'double' versions
216 // if it is 64-bit 'long double' mode.
217 static SmallDenseMap<unsigned, StringRef, 4> AIXLongDouble64Builtins{
218 {Builtin::BI__builtin_frexpl, "frexp"},
219 {Builtin::BI__builtin_ldexpl, "ldexp"},
220 {Builtin::BI__builtin_modfl, "modf"},
221 };
222
223 // If the builtin has been declared explicitly with an assembler label,
224 // use the mangled name. This differs from the plain label on platforms
225 // that prefix labels.
226 if (FD->hasAttr<AsmLabelAttr>())
227 Name = getMangledName(D);
228 else {
229 // TODO: This mutation should also be applied to other targets other than
230 // PPC, after backend supports IEEE 128-bit style libcalls.
231 if (getTriple().isPPC64() &&
232 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad() &&
233 F128Builtins.contains(BuiltinID))
234 Name = F128Builtins[BuiltinID];
235 else if (getTriple().isOSAIX() &&
236 &getTarget().getLongDoubleFormat() ==
237 &llvm::APFloat::IEEEdouble() &&
238 AIXLongDouble64Builtins.contains(BuiltinID))
239 Name = AIXLongDouble64Builtins[BuiltinID];
240 else
241 Name = Context.BuiltinInfo.getName(BuiltinID).substr(10);
242 }
243
244 llvm::FunctionType *Ty =
245 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
246
247 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
248}
249
250/// Emit the conversions required to turn the given value into an
251/// integer of the given size.
252Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
253 QualType T, llvm::IntegerType *IntType) {
254 V = CGF.EmitToMemory(V, T);
255
256 if (V->getType()->isPointerTy())
257 return CGF.Builder.CreatePtrToInt(V, IntType);
258
259 assert(V->getType() == IntType);
260 return V;
261}
262
263Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
264 QualType T, llvm::Type *ResultType) {
265 V = CGF.EmitFromMemory(V, T);
266
267 if (ResultType->isPointerTy())
268 return CGF.Builder.CreateIntToPtr(V, ResultType);
269
270 assert(V->getType() == ResultType);
271 return V;
272}
273
275 ASTContext &Ctx = CGF.getContext();
276 Address Ptr = CGF.EmitPointerWithAlignment(E->getArg(0));
277 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
278 unsigned Bytes = Ptr.getElementType()->isPointerTy()
280 : DL.getTypeStoreSize(Ptr.getElementType());
281 unsigned Align = Ptr.getAlignment().getQuantity();
282 if (Align % Bytes != 0) {
283 DiagnosticsEngine &Diags = CGF.CGM.getDiags();
284 Diags.Report(E->getBeginLoc(), diag::warn_sync_op_misaligned);
285 // Force address to be at least naturally-aligned.
286 return Ptr.withAlignment(CharUnits::fromQuantity(Bytes));
287 }
288 return Ptr;
289}
290
291/// Utility to insert an atomic instruction based on Intrinsic::ID
292/// and the expression node.
294 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
295 AtomicOrdering Ordering) {
296
297 QualType T = E->getType();
298 assert(E->getArg(0)->getType()->isPointerType());
300 E->getArg(0)->getType()->getPointeeType()));
301 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
302
303 Address DestAddr = CheckAtomicAlignment(CGF, E);
304
305 llvm::IntegerType *IntType = llvm::IntegerType::get(
306 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
307
308 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1));
309 llvm::Type *ValueType = Val->getType();
310 Val = EmitToInt(CGF, Val, T, IntType);
311
312 llvm::Value *Result =
313 CGF.Builder.CreateAtomicRMW(Kind, DestAddr, Val, Ordering);
314 return EmitFromInt(CGF, Result, T, ValueType);
315}
316
318 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
320
321 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
322 LValue LV = CGF.MakeAddrLValue(Addr, E->getArg(0)->getType());
323 LV.setNontemporal(true);
324 CGF.EmitStoreOfScalar(Val, LV, false);
325 return nullptr;
326}
327
330
331 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
332 LV.setNontemporal(true);
333 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
334}
335
337 llvm::AtomicRMWInst::BinOp Kind,
338 const CallExpr *E) {
339 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
340}
341
342/// Utility to insert an atomic instruction based Intrinsic::ID and
343/// the expression node, where the return value is the result of the
344/// operation.
346 llvm::AtomicRMWInst::BinOp Kind,
347 const CallExpr *E,
348 Instruction::BinaryOps Op,
349 bool Invert = false) {
350 QualType T = E->getType();
351 assert(E->getArg(0)->getType()->isPointerType());
353 E->getArg(0)->getType()->getPointeeType()));
354 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
355
356 Address DestAddr = CheckAtomicAlignment(CGF, E);
357
358 llvm::IntegerType *IntType = llvm::IntegerType::get(
359 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
360
361 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1));
362 llvm::Type *ValueType = Val->getType();
363 Val = EmitToInt(CGF, Val, T, IntType);
364
365 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
366 Kind, DestAddr, Val, llvm::AtomicOrdering::SequentiallyConsistent);
367 Result = CGF.Builder.CreateBinOp(Op, Result, Val);
368 if (Invert)
369 Result =
370 CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
371 llvm::ConstantInt::getAllOnesValue(IntType));
372 Result = EmitFromInt(CGF, Result, T, ValueType);
373 return RValue::get(Result);
374}
375
376/// Utility to insert an atomic cmpxchg instruction.
377///
378/// @param CGF The current codegen function.
379/// @param E Builtin call expression to convert to cmpxchg.
380/// arg0 - address to operate on
381/// arg1 - value to compare with
382/// arg2 - new value
383/// @param ReturnBool Specifies whether to return success flag of
384/// cmpxchg result or the old value.
385///
386/// @returns result of cmpxchg, according to ReturnBool
387///
388/// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
389/// invoke the function EmitAtomicCmpXchgForMSIntrin.
391 bool ReturnBool) {
392 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
393 Address DestAddr = CheckAtomicAlignment(CGF, E);
394
395 llvm::IntegerType *IntType = llvm::IntegerType::get(
396 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
397
398 Value *Cmp = CGF.EmitScalarExpr(E->getArg(1));
399 llvm::Type *ValueType = Cmp->getType();
400 Cmp = EmitToInt(CGF, Cmp, T, IntType);
401 Value *New = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
402
404 DestAddr, Cmp, New, llvm::AtomicOrdering::SequentiallyConsistent,
405 llvm::AtomicOrdering::SequentiallyConsistent);
406 if (ReturnBool)
407 // Extract boolean success flag and zext it to int.
408 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
409 CGF.ConvertType(E->getType()));
410 else
411 // Extract old value and emit it using the same type as compare value.
412 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
413 ValueType);
414}
415
416/// This function should be invoked to emit atomic cmpxchg for Microsoft's
417/// _InterlockedCompareExchange* intrinsics which have the following signature:
418/// T _InterlockedCompareExchange(T volatile *Destination,
419/// T Exchange,
420/// T Comparand);
421///
422/// Whereas the llvm 'cmpxchg' instruction has the following syntax:
423/// cmpxchg *Destination, Comparand, Exchange.
424/// So we need to swap Comparand and Exchange when invoking
425/// CreateAtomicCmpXchg. That is the reason we could not use the above utility
426/// function MakeAtomicCmpXchgValue since it expects the arguments to be
427/// already swapped.
428
429static
431 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
432 assert(E->getArg(0)->getType()->isPointerType());
434 E->getType(), E->getArg(0)->getType()->getPointeeType()));
435 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
436 E->getArg(1)->getType()));
437 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
438 E->getArg(2)->getType()));
439
440 Address DestAddr = CheckAtomicAlignment(CGF, E);
441
442 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
443 auto *RTy = Exchange->getType();
444
445 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
446
447 if (RTy->isPointerTy()) {
448 Exchange = CGF.Builder.CreatePtrToInt(Exchange, CGF.IntPtrTy);
449 Comparand = CGF.Builder.CreatePtrToInt(Comparand, CGF.IntPtrTy);
450 }
451
452 // For Release ordering, the failure ordering should be Monotonic.
453 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
454 AtomicOrdering::Monotonic :
455 SuccessOrdering;
456
457 // The atomic instruction is marked volatile for consistency with MSVC. This
458 // blocks the few atomics optimizations that LLVM has. If we want to optimize
459 // _Interlocked* operations in the future, we will have to remove the volatile
460 // marker.
461 auto *CmpXchg = CGF.Builder.CreateAtomicCmpXchg(
462 DestAddr, Comparand, Exchange, SuccessOrdering, FailureOrdering);
463 CmpXchg->setVolatile(true);
464
465 auto *Result = CGF.Builder.CreateExtractValue(CmpXchg, 0);
466 if (RTy->isPointerTy()) {
467 Result = CGF.Builder.CreateIntToPtr(Result, RTy);
468 }
469
470 return Result;
471}
472
473// 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
474// prototyped like this:
475//
476// unsigned char _InterlockedCompareExchange128...(
477// __int64 volatile * _Destination,
478// __int64 _ExchangeHigh,
479// __int64 _ExchangeLow,
480// __int64 * _ComparandResult);
481//
482// Note that Destination is assumed to be at least 16-byte aligned, despite
483// being typed int64.
484
486 const CallExpr *E,
487 AtomicOrdering SuccessOrdering) {
488 assert(E->getNumArgs() == 4);
489 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
490 llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1));
491 llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2));
492 Address ComparandAddr = CGF.EmitPointerWithAlignment(E->getArg(3));
493
494 assert(DestPtr->getType()->isPointerTy());
495 assert(!ExchangeHigh->getType()->isPointerTy());
496 assert(!ExchangeLow->getType()->isPointerTy());
497
498 // For Release ordering, the failure ordering should be Monotonic.
499 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
500 ? AtomicOrdering::Monotonic
501 : SuccessOrdering;
502
503 // Convert to i128 pointers and values. Alignment is also overridden for
504 // destination pointer.
505 llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128);
506 Address DestAddr(DestPtr, Int128Ty,
508 ComparandAddr = ComparandAddr.withElementType(Int128Ty);
509
510 // (((i128)hi) << 64) | ((i128)lo)
511 ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
512 ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty);
513 ExchangeHigh =
514 CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64));
515 llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow);
516
517 // Load the comparand for the instruction.
518 llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandAddr);
519
520 auto *CXI = CGF.Builder.CreateAtomicCmpXchg(DestAddr, Comparand, Exchange,
521 SuccessOrdering, FailureOrdering);
522
523 // The atomic instruction is marked volatile for consistency with MSVC. This
524 // blocks the few atomics optimizations that LLVM has. If we want to optimize
525 // _Interlocked* operations in the future, we will have to remove the volatile
526 // marker.
527 CXI->setVolatile(true);
528
529 // Store the result as an outparameter.
530 CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0),
531 ComparandAddr);
532
533 // Get the success boolean and zero extend it to i8.
534 Value *Success = CGF.Builder.CreateExtractValue(CXI, 1);
535 return CGF.Builder.CreateZExt(Success, CGF.Int8Ty);
536}
537
539 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
540 assert(E->getArg(0)->getType()->isPointerType());
541
542 auto *IntTy = CGF.ConvertType(E->getType());
543 Address DestAddr = CheckAtomicAlignment(CGF, E);
544 auto *Result = CGF.Builder.CreateAtomicRMW(
545 AtomicRMWInst::Add, DestAddr, ConstantInt::get(IntTy, 1), Ordering);
546 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
547}
548
550 CodeGenFunction &CGF, const CallExpr *E,
551 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
552 assert(E->getArg(0)->getType()->isPointerType());
553
554 auto *IntTy = CGF.ConvertType(E->getType());
555 Address DestAddr = CheckAtomicAlignment(CGF, E);
556 auto *Result = CGF.Builder.CreateAtomicRMW(
557 AtomicRMWInst::Sub, DestAddr, ConstantInt::get(IntTy, 1), Ordering);
558 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
559}
560
561// Build a plain volatile load.
563 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
564 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
565 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
566 llvm::Type *ITy =
567 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
568 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize);
569 Load->setAtomic(llvm::AtomicOrdering::Monotonic);
570 Load->setVolatile(true);
571 return Load;
572}
573
574// Build a plain volatile store.
576 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
577 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
578 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
579 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
580 llvm::StoreInst *Store =
581 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
582 Store->setAtomic(llvm::AtomicOrdering::Monotonic);
583 Store->setVolatile(true);
584 return Store;
585}
586
587// Emit a simple mangled intrinsic that has 1 argument and a return type
588// matching the argument type. Depending on mode, this may be a constrained
589// floating-point intrinsic.
591 const CallExpr *E, unsigned IntrinsicID,
592 unsigned ConstrainedIntrinsicID) {
593 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
594
595 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
596 if (CGF.Builder.getIsFPConstrained()) {
597 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
598 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
599 } else {
600 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
601 return CGF.Builder.CreateCall(F, Src0);
602 }
603}
604
605// Emit an intrinsic that has 2 operands of the same type as its result.
606// Depending on mode, this may be a constrained floating-point intrinsic.
608 const CallExpr *E, unsigned IntrinsicID,
609 unsigned ConstrainedIntrinsicID) {
610 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
611 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
612
613 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
614 if (CGF.Builder.getIsFPConstrained()) {
615 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
616 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
617 } else {
618 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
619 return CGF.Builder.CreateCall(F, { Src0, Src1 });
620 }
621}
622
623// Has second type mangled argument.
624static Value *
626 Intrinsic::ID IntrinsicID,
627 Intrinsic::ID ConstrainedIntrinsicID) {
628 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
629 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
630
631 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
632 if (CGF.Builder.getIsFPConstrained()) {
633 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
634 {Src0->getType(), Src1->getType()});
635 return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1});
636 }
637
638 Function *F =
639 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), Src1->getType()});
640 return CGF.Builder.CreateCall(F, {Src0, Src1});
641}
642
643// Emit an intrinsic that has 3 operands of the same type as its result.
644// Depending on mode, this may be a constrained floating-point intrinsic.
646 const CallExpr *E, unsigned IntrinsicID,
647 unsigned ConstrainedIntrinsicID) {
648 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
649 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
650 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
651
652 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
653 if (CGF.Builder.getIsFPConstrained()) {
654 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
655 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
656 } else {
657 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
658 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
659 }
660}
661
662// Emit an intrinsic that has overloaded integer result and fp operand.
663static Value *
665 unsigned IntrinsicID,
666 unsigned ConstrainedIntrinsicID) {
667 llvm::Type *ResultType = CGF.ConvertType(E->getType());
668 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
669
670 if (CGF.Builder.getIsFPConstrained()) {
671 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
672 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
673 {ResultType, Src0->getType()});
674 return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
675 } else {
676 Function *F =
677 CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
678 return CGF.Builder.CreateCall(F, Src0);
679 }
680}
681
683 Intrinsic::ID IntrinsicID) {
684 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
685 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
686
687 QualType IntPtrTy = E->getArg(1)->getType()->getPointeeType();
688 llvm::Type *IntTy = CGF.ConvertType(IntPtrTy);
689 llvm::Function *F =
690 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), IntTy});
691 llvm::Value *Call = CGF.Builder.CreateCall(F, Src0);
692
693 llvm::Value *Exp = CGF.Builder.CreateExtractValue(Call, 1);
694 LValue LV = CGF.MakeNaturalAlignAddrLValue(Src1, IntPtrTy);
695 CGF.EmitStoreOfScalar(Exp, LV);
696
697 return CGF.Builder.CreateExtractValue(Call, 0);
698}
699
700static void emitSincosBuiltin(CodeGenFunction &CGF, const CallExpr *E,
701 Intrinsic::ID IntrinsicID) {
702 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(0));
703 llvm::Value *Dest0 = CGF.EmitScalarExpr(E->getArg(1));
704 llvm::Value *Dest1 = CGF.EmitScalarExpr(E->getArg(2));
705
706 llvm::Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {Val->getType()});
707 llvm::Value *Call = CGF.Builder.CreateCall(F, Val);
708
709 llvm::Value *SinResult = CGF.Builder.CreateExtractValue(Call, 0);
710 llvm::Value *CosResult = CGF.Builder.CreateExtractValue(Call, 1);
711
712 QualType DestPtrType = E->getArg(1)->getType()->getPointeeType();
713 LValue SinLV = CGF.MakeNaturalAlignAddrLValue(Dest0, DestPtrType);
714 LValue CosLV = CGF.MakeNaturalAlignAddrLValue(Dest1, DestPtrType);
715
716 llvm::StoreInst *StoreSin =
717 CGF.Builder.CreateStore(SinResult, SinLV.getAddress());
718 llvm::StoreInst *StoreCos =
719 CGF.Builder.CreateStore(CosResult, CosLV.getAddress());
720
721 // Mark the two stores as non-aliasing with each other. The order of stores
722 // emitted by this builtin is arbitrary, enforcing a particular order will
723 // prevent optimizations later on.
724 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
725 MDNode *Domain = MDHelper.createAnonymousAliasScopeDomain();
726 MDNode *AliasScope = MDHelper.createAnonymousAliasScope(Domain);
727 MDNode *AliasScopeList = MDNode::get(Call->getContext(), AliasScope);
728 StoreSin->setMetadata(LLVMContext::MD_alias_scope, AliasScopeList);
729 StoreCos->setMetadata(LLVMContext::MD_noalias, AliasScopeList);
730}
731
732static llvm::Value *emitModfBuiltin(CodeGenFunction &CGF, const CallExpr *E,
733 Intrinsic::ID IntrinsicID) {
734 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(0));
735 llvm::Value *IntPartDest = CGF.EmitScalarExpr(E->getArg(1));
736
737 llvm::Value *Call =
738 CGF.Builder.CreateIntrinsic(IntrinsicID, {Val->getType()}, Val);
739
740 llvm::Value *FractionalResult = CGF.Builder.CreateExtractValue(Call, 0);
741 llvm::Value *IntegralResult = CGF.Builder.CreateExtractValue(Call, 1);
742
743 QualType DestPtrType = E->getArg(1)->getType()->getPointeeType();
744 LValue IntegralLV = CGF.MakeNaturalAlignAddrLValue(IntPartDest, DestPtrType);
745 CGF.EmitStoreOfScalar(IntegralResult, IntegralLV);
746
747 return FractionalResult;
748}
749
750/// EmitFAbs - Emit a call to @llvm.fabs().
752 Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
753 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
754 Call->setDoesNotAccessMemory();
755 return Call;
756}
757
758/// Emit the computation of the sign bit for a floating point value. Returns
759/// the i1 sign bit value.
761 LLVMContext &C = CGF.CGM.getLLVMContext();
762
763 llvm::Type *Ty = V->getType();
764 int Width = Ty->getPrimitiveSizeInBits();
765 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
766 V = CGF.Builder.CreateBitCast(V, IntTy);
767 if (Ty->isPPC_FP128Ty()) {
768 // We want the sign bit of the higher-order double. The bitcast we just
769 // did works as if the double-double was stored to memory and then
770 // read as an i128. The "store" will put the higher-order double in the
771 // lower address in both little- and big-Endian modes, but the "load"
772 // will treat those bits as a different part of the i128: the low bits in
773 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
774 // we need to shift the high bits down to the low before truncating.
775 Width >>= 1;
776 if (CGF.getTarget().isBigEndian()) {
777 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
778 V = CGF.Builder.CreateLShr(V, ShiftCst);
779 }
780 // We are truncating value in order to extract the higher-order
781 // double, which we will be using to extract the sign from.
782 IntTy = llvm::IntegerType::get(C, Width);
783 V = CGF.Builder.CreateTrunc(V, IntTy);
784 }
785 Value *Zero = llvm::Constant::getNullValue(IntTy);
786 return CGF.Builder.CreateICmpSLT(V, Zero);
787}
788
789/// Checks no arguments or results are passed indirectly in the ABI (i.e. via a
790/// hidden pointer). This is used to check annotating FP libcalls (that could
791/// set `errno`) with "int" TBAA metadata is safe. If any floating-point
792/// arguments are passed indirectly, setup for the call could be incorrectly
793/// optimized out.
795 auto IsIndirect = [&](ABIArgInfo const &info) {
796 return info.isIndirect() || info.isIndirectAliased() || info.isInAlloca();
797 };
798 return !IsIndirect(FnInfo.getReturnInfo()) &&
799 llvm::none_of(FnInfo.arguments(),
800 [&](CGFunctionInfoArgInfo const &ArgInfo) {
801 return IsIndirect(ArgInfo.info);
802 });
803}
804
806 const CallExpr *E, llvm::Constant *calleeValue) {
807 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
808 CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
809 llvm::CallBase *callOrInvoke = nullptr;
810 CGFunctionInfo const *FnInfo = nullptr;
811 RValue Call =
812 CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot(),
813 /*Chain=*/nullptr, &callOrInvoke, &FnInfo);
814
815 if (unsigned BuiltinID = FD->getBuiltinID()) {
816 // Check whether a FP math builtin function, such as BI__builtin_expf
817 ASTContext &Context = CGF.getContext();
818 bool ConstWithoutErrnoAndExceptions =
820 // Restrict to target with errno, for example, MacOS doesn't set errno.
821 // TODO: Support builtin function with complex type returned, eg: cacosh
822 if (ConstWithoutErrnoAndExceptions && CGF.CGM.getLangOpts().MathErrno &&
823 !CGF.Builder.getIsFPConstrained() && Call.isScalar() &&
825 // Emit "int" TBAA metadata on FP math libcalls.
826 clang::QualType IntTy = Context.IntTy;
827 TBAAAccessInfo TBAAInfo = CGF.CGM.getTBAAAccessInfo(IntTy);
828 CGF.CGM.DecorateInstructionWithTBAA(callOrInvoke, TBAAInfo);
829 }
830 }
831 return Call;
832}
833
834/// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
835/// depending on IntrinsicID.
836///
837/// \arg CGF The current codegen function.
838/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
839/// \arg X The first argument to the llvm.*.with.overflow.*.
840/// \arg Y The second argument to the llvm.*.with.overflow.*.
841/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
842/// \returns The result (i.e. sum/product) returned by the intrinsic.
844 const Intrinsic::ID IntrinsicID,
845 llvm::Value *X, llvm::Value *Y,
846 llvm::Value *&Carry) {
847 // Make sure we have integers of the same width.
848 assert(X->getType() == Y->getType() &&
849 "Arguments must be the same type. (Did you forget to make sure both "
850 "arguments have the same integer width?)");
851
852 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
853 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
854 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
855 return CGF.Builder.CreateExtractValue(Tmp, 0);
856}
857
858namespace {
859 struct WidthAndSignedness {
860 unsigned Width;
861 bool Signed;
862 };
863}
864
865static WidthAndSignedness
867 const clang::QualType Type) {
868 assert(Type->isIntegerType() && "Given type is not an integer.");
869 unsigned Width = context.getIntWidth(Type);
871 return {Width, Signed};
872}
873
874// Given one or more integer types, this function produces an integer type that
875// encompasses them: any value in one of the given types could be expressed in
876// the encompassing type.
877static struct WidthAndSignedness
878EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
879 assert(Types.size() > 0 && "Empty list of types.");
880
881 // If any of the given types is signed, we must return a signed type.
882 bool Signed = false;
883 for (const auto &Type : Types) {
884 Signed |= Type.Signed;
885 }
886
887 // The encompassing type must have a width greater than or equal to the width
888 // of the specified types. Additionally, if the encompassing type is signed,
889 // its width must be strictly greater than the width of any unsigned types
890 // given.
891 unsigned Width = 0;
892 for (const auto &Type : Types) {
893 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
894 if (Width < MinWidth) {
895 Width = MinWidth;
896 }
897 }
898
899 return {Width, Signed};
900}
901
902Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
903 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
904 return Builder.CreateCall(CGM.getIntrinsic(inst, {ArgValue->getType()}),
905 ArgValue);
906}
907
908/// Checks if using the result of __builtin_object_size(p, @p From) in place of
909/// __builtin_object_size(p, @p To) is correct
910static bool areBOSTypesCompatible(int From, int To) {
911 // Note: Our __builtin_object_size implementation currently treats Type=0 and
912 // Type=2 identically. Encoding this implementation detail here may make
913 // improving __builtin_object_size difficult in the future, so it's omitted.
914 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
915}
916
917static llvm::Value *
918getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
919 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
920}
921
922llvm::Value *
923CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
924 llvm::IntegerType *ResType,
925 llvm::Value *EmittedE,
926 bool IsDynamic) {
927 uint64_t ObjectSize;
928 if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
929 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
930 return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
931}
932
933namespace {
934
935/// StructFieldAccess is a simple visitor class to grab the first MemberExpr
936/// from an Expr. It records any ArraySubscriptExpr we meet along the way.
937class StructFieldAccess
938 : public ConstStmtVisitor<StructFieldAccess, const Expr *> {
939 bool AddrOfSeen = false;
940
941public:
942 const Expr *ArrayIndex = nullptr;
943 QualType ArrayElementTy;
944
945 const Expr *VisitMemberExpr(const MemberExpr *E) {
946 if (AddrOfSeen && E->getType()->isArrayType())
947 // Avoid forms like '&ptr->array'.
948 return nullptr;
949 return E;
950 }
951
952 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
953 if (ArrayIndex)
954 // We don't support multiple subscripts.
955 return nullptr;
956
957 AddrOfSeen = false; // '&ptr->array[idx]' is okay.
958 ArrayIndex = E->getIdx();
959 ArrayElementTy = E->getBase()->getType();
960 return Visit(E->getBase());
961 }
962 const Expr *VisitCastExpr(const CastExpr *E) {
963 if (E->getCastKind() == CK_LValueToRValue)
964 return E;
965 return Visit(E->getSubExpr());
966 }
967 const Expr *VisitParenExpr(const ParenExpr *E) {
968 return Visit(E->getSubExpr());
969 }
970 const Expr *VisitUnaryAddrOf(const clang::UnaryOperator *E) {
971 AddrOfSeen = true;
972 return Visit(E->getSubExpr());
973 }
974 const Expr *VisitUnaryDeref(const clang::UnaryOperator *E) {
975 AddrOfSeen = false;
976 return Visit(E->getSubExpr());
977 }
978 const Expr *VisitBinaryOperator(const clang::BinaryOperator *Op) {
979 return Op->isCommaOp() ? Visit(Op->getRHS()) : nullptr;
980 }
981};
982
983} // end anonymous namespace
984
985/// Find a struct's flexible array member. It may be embedded inside multiple
986/// sub-structs, but must still be the last field.
988 ASTContext &Ctx,
989 const RecordDecl *RD) {
990 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
991 CGF.getLangOpts().getStrictFlexArraysLevel();
992
993 if (RD->isImplicit())
994 return nullptr;
995
996 for (const FieldDecl *FD : RD->fields()) {
998 Ctx, FD, FD->getType(), StrictFlexArraysLevel,
999 /*IgnoreTemplateOrMacroSubstitution=*/true))
1000 return FD;
1001
1002 if (const auto *RD = FD->getType()->getAsRecordDecl())
1003 if (const FieldDecl *FD = FindFlexibleArrayMemberField(CGF, Ctx, RD))
1004 return FD;
1005 }
1006
1007 return nullptr;
1008}
1009
1010/// Calculate the offset of a struct field. It may be embedded inside multiple
1011/// sub-structs.
1012static bool GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD,
1013 const FieldDecl *FD, int64_t &Offset) {
1014 if (RD->isImplicit())
1015 return false;
1016
1017 // Keep track of the field number ourselves, because the other methods
1018 // (CGRecordLayout::getLLVMFieldNo) aren't always equivalent to how the AST
1019 // is laid out.
1020 uint32_t FieldNo = 0;
1021 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
1022
1023 for (const FieldDecl *Field : RD->fields()) {
1024 if (Field == FD) {
1025 Offset += Layout.getFieldOffset(FieldNo);
1026 return true;
1027 }
1028
1029 if (const auto *RD = Field->getType()->getAsRecordDecl()) {
1030 if (GetFieldOffset(Ctx, RD, FD, Offset)) {
1031 Offset += Layout.getFieldOffset(FieldNo);
1032 return true;
1033 }
1034 }
1035
1036 if (!RD->isUnion())
1037 ++FieldNo;
1038 }
1039
1040 return false;
1041}
1042
1043static std::optional<int64_t>
1044GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FD) {
1045 int64_t Offset = 0;
1046
1047 if (GetFieldOffset(Ctx, RD, FD, Offset))
1048 return std::optional<int64_t>(Offset);
1049
1050 return std::nullopt;
1051}
1052
1053llvm::Value *CodeGenFunction::emitCountedBySize(const Expr *E,
1054 llvm::Value *EmittedE,
1055 unsigned Type,
1056 llvm::IntegerType *ResType) {
1057 // Note: If the whole struct is specificed in the __bdos (i.e. Visitor
1058 // returns a DeclRefExpr). The calculation of the whole size of the structure
1059 // with a flexible array member can be done in two ways:
1060 //
1061 // 1) sizeof(struct S) + count * sizeof(typeof(fam))
1062 // 2) offsetof(struct S, fam) + count * sizeof(typeof(fam))
1063 //
1064 // The first will add additional padding after the end of the array
1065 // allocation while the second method is more precise, but not quite expected
1066 // from programmers. See
1067 // https://lore.kernel.org/lkml/ZvV6X5FPBBW7CO1f@archlinux/ for a discussion
1068 // of the topic.
1069 //
1070 // GCC isn't (currently) able to calculate __bdos on a pointer to the whole
1071 // structure. Therefore, because of the above issue, we choose to match what
1072 // GCC does for consistency's sake.
1073
1074 StructFieldAccess Visitor;
1075 E = Visitor.Visit(E);
1076 if (!E)
1077 return nullptr;
1078
1079 const Expr *Idx = Visitor.ArrayIndex;
1080 if (Idx) {
1081 if (Idx->HasSideEffects(getContext()))
1082 // We can't have side-effects.
1083 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1084
1085 if (const auto *IL = dyn_cast<IntegerLiteral>(Idx)) {
1086 int64_t Val = IL->getValue().getSExtValue();
1087 if (Val < 0)
1088 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1089
1090 // The index is 0, so we don't need to take it into account.
1091 if (Val == 0)
1092 Idx = nullptr;
1093 }
1094 }
1095
1096 // __counted_by on either a flexible array member or a pointer into a struct
1097 // with a flexible array member.
1098 if (const auto *ME = dyn_cast<MemberExpr>(E))
1099 return emitCountedByMemberSize(ME, Idx, EmittedE, Visitor.ArrayElementTy,
1100 Type, ResType);
1101
1102 // __counted_by on a pointer in a struct.
1103 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E);
1104 ICE && ICE->getCastKind() == CK_LValueToRValue)
1105 return emitCountedByPointerSize(ICE, Idx, EmittedE, Visitor.ArrayElementTy,
1106 Type, ResType);
1107
1108 return nullptr;
1109}
1110
1112 llvm::Value *Res,
1113 llvm::Value *Index,
1114 llvm::IntegerType *ResType,
1115 bool IsSigned) {
1116 // cmp = (array_size >= 0)
1117 Value *Cmp = CGF.Builder.CreateIsNotNeg(Res);
1118 if (Index)
1119 // cmp = (cmp && index >= 0)
1120 Cmp = CGF.Builder.CreateAnd(CGF.Builder.CreateIsNotNeg(Index), Cmp);
1121
1122 // return cmp ? result : 0
1123 return CGF.Builder.CreateSelect(Cmp, Res,
1124 ConstantInt::get(ResType, 0, IsSigned));
1125}
1126
1127static std::pair<llvm::Value *, llvm::Value *>
1129 const FieldDecl *ArrayFD, const FieldDecl *CountFD,
1130 const Expr *Idx, llvm::IntegerType *ResType,
1131 bool IsSigned) {
1132 // count = ptr->count;
1133 Value *Count = CGF.EmitLoadOfCountedByField(ME, ArrayFD, CountFD);
1134 if (!Count)
1135 return std::make_pair<Value *>(nullptr, nullptr);
1136 Count = CGF.Builder.CreateIntCast(Count, ResType, IsSigned, "count");
1137
1138 // index = ptr->index;
1139 Value *Index = nullptr;
1140 if (Idx) {
1141 bool IdxSigned = Idx->getType()->isSignedIntegerType();
1142 Index = CGF.EmitScalarExpr(Idx);
1143 Index = CGF.Builder.CreateIntCast(Index, ResType, IdxSigned, "index");
1144 }
1145
1146 return std::make_pair(Count, Index);
1147}
1148
1149llvm::Value *CodeGenFunction::emitCountedByPointerSize(
1150 const ImplicitCastExpr *E, const Expr *Idx, llvm::Value *EmittedE,
1151 QualType CastedArrayElementTy, unsigned Type, llvm::IntegerType *ResType) {
1152 assert(E->getCastKind() == CK_LValueToRValue &&
1153 "must be an LValue to RValue cast");
1154
1155 const MemberExpr *ME =
1156 dyn_cast<MemberExpr>(E->getSubExpr()->IgnoreParenNoopCasts(getContext()));
1157 if (!ME)
1158 return nullptr;
1159
1160 const auto *ArrayBaseFD = dyn_cast<FieldDecl>(ME->getMemberDecl());
1161 if (!ArrayBaseFD || !ArrayBaseFD->getType()->isPointerType() ||
1162 !ArrayBaseFD->getType()->isCountAttributedType())
1163 return nullptr;
1164
1165 // Get the 'count' FieldDecl.
1166 const FieldDecl *CountFD = ArrayBaseFD->findCountedByField();
1167 if (!CountFD)
1168 // Can't find the field referenced by the "counted_by" attribute.
1169 return nullptr;
1170
1171 // Calculate the array's object size using these formulae. (Note: if the
1172 // calculation is negative, we return 0.):
1173 //
1174 // struct p;
1175 // struct s {
1176 // /* ... */
1177 // struct p **array __attribute__((counted_by(count)));
1178 // int count;
1179 // };
1180 //
1181 // 1) 'ptr->array':
1182 //
1183 // count = ptr->count;
1184 //
1185 // array_element_size = sizeof (*ptr->array);
1186 // array_size = count * array_element_size;
1187 //
1188 // result = array_size;
1189 //
1190 // cmp = (result >= 0)
1191 // return cmp ? result : 0;
1192 //
1193 // 2) '&((cast) ptr->array)[idx]':
1194 //
1195 // count = ptr->count;
1196 // index = idx;
1197 //
1198 // array_element_size = sizeof (*ptr->array);
1199 // array_size = count * array_element_size;
1200 //
1201 // casted_array_element_size = sizeof (*((cast) ptr->array));
1202 //
1203 // index_size = index * casted_array_element_size;
1204 // result = array_size - index_size;
1205 //
1206 // cmp = (result >= 0)
1207 // if (index)
1208 // cmp = (cmp && index > 0)
1209 // return cmp ? result : 0;
1210
1211 auto GetElementBaseSize = [&](QualType ElementTy) {
1212 CharUnits ElementSize =
1213 getContext().getTypeSizeInChars(ElementTy->getPointeeType());
1214
1215 if (ElementSize.isZero()) {
1216 // This might be a __sized_by (or __counted_by) on a
1217 // 'void *', which counts bytes, not elements.
1218 [[maybe_unused]] auto *CAT = ElementTy->getAs<CountAttributedType>();
1219 assert(CAT && "must have an CountAttributedType");
1220
1221 ElementSize = CharUnits::One();
1222 }
1223
1224 return std::optional<CharUnits>(ElementSize);
1225 };
1226
1227 // Get the sizes of the original array element and the casted array element,
1228 // if different.
1229 std::optional<CharUnits> ArrayElementBaseSize =
1230 GetElementBaseSize(ArrayBaseFD->getType());
1231 if (!ArrayElementBaseSize)
1232 return nullptr;
1233
1234 std::optional<CharUnits> CastedArrayElementBaseSize = ArrayElementBaseSize;
1235 if (!CastedArrayElementTy.isNull() && CastedArrayElementTy->isPointerType()) {
1236 CastedArrayElementBaseSize = GetElementBaseSize(CastedArrayElementTy);
1237 if (!CastedArrayElementBaseSize)
1238 return nullptr;
1239 }
1240
1241 bool IsSigned = CountFD->getType()->isSignedIntegerType();
1242
1243 // count = ptr->count;
1244 // index = ptr->index;
1245 Value *Count, *Index;
1246 std::tie(Count, Index) = GetCountFieldAndIndex(
1247 *this, ME, ArrayBaseFD, CountFD, Idx, ResType, IsSigned);
1248 if (!Count)
1249 return nullptr;
1250
1251 // array_element_size = sizeof (*ptr->array)
1252 auto *ArrayElementSize = llvm::ConstantInt::get(
1253 ResType, ArrayElementBaseSize->getQuantity(), IsSigned);
1254
1255 // casted_array_element_size = sizeof (*((cast) ptr->array));
1256 auto *CastedArrayElementSize = llvm::ConstantInt::get(
1257 ResType, CastedArrayElementBaseSize->getQuantity(), IsSigned);
1258
1259 // array_size = count * array_element_size;
1260 Value *ArraySize = Builder.CreateMul(Count, ArrayElementSize, "array_size",
1261 !IsSigned, IsSigned);
1262
1263 // Option (1) 'ptr->array'
1264 // result = array_size
1265 Value *Result = ArraySize;
1266
1267 if (Idx) { // Option (2) '&((cast) ptr->array)[idx]'
1268 // index_size = index * casted_array_element_size;
1269 Value *IndexSize = Builder.CreateMul(Index, CastedArrayElementSize,
1270 "index_size", !IsSigned, IsSigned);
1271
1272 // result = result - index_size;
1273 Result =
1274 Builder.CreateSub(Result, IndexSize, "result", !IsSigned, IsSigned);
1275 }
1276
1277 return EmitPositiveResultOrZero(*this, Result, Index, ResType, IsSigned);
1278}
1279
1280llvm::Value *CodeGenFunction::emitCountedByMemberSize(
1281 const MemberExpr *ME, const Expr *Idx, llvm::Value *EmittedE,
1282 QualType CastedArrayElementTy, unsigned Type, llvm::IntegerType *ResType) {
1283 const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
1284 if (!FD)
1285 return nullptr;
1286
1287 // Find the flexible array member and check that it has the __counted_by
1288 // attribute.
1289 ASTContext &Ctx = getContext();
1290 const RecordDecl *RD = FD->getDeclContext()->getOuterLexicalRecordContext();
1291 const FieldDecl *FlexibleArrayMemberFD = nullptr;
1292
1294 Ctx, FD, FD->getType(), getLangOpts().getStrictFlexArraysLevel(),
1295 /*IgnoreTemplateOrMacroSubstitution=*/true))
1296 FlexibleArrayMemberFD = FD;
1297 else
1298 FlexibleArrayMemberFD = FindFlexibleArrayMemberField(*this, Ctx, RD);
1299
1300 if (!FlexibleArrayMemberFD ||
1301 !FlexibleArrayMemberFD->getType()->isCountAttributedType())
1302 return nullptr;
1303
1304 // Get the 'count' FieldDecl.
1305 const FieldDecl *CountFD = FlexibleArrayMemberFD->findCountedByField();
1306 if (!CountFD)
1307 // Can't find the field referenced by the "counted_by" attribute.
1308 return nullptr;
1309
1310 // Calculate the flexible array member's object size using these formulae.
1311 // (Note: if the calculation is negative, we return 0.):
1312 //
1313 // struct p;
1314 // struct s {
1315 // /* ... */
1316 // int count;
1317 // struct p *array[] __attribute__((counted_by(count)));
1318 // };
1319 //
1320 // 1) 'ptr->array':
1321 //
1322 // count = ptr->count;
1323 //
1324 // flexible_array_member_element_size = sizeof (*ptr->array);
1325 // flexible_array_member_size =
1326 // count * flexible_array_member_element_size;
1327 //
1328 // result = flexible_array_member_size;
1329 //
1330 // cmp = (result >= 0)
1331 // return cmp ? result : 0;
1332 //
1333 // 2) '&((cast) ptr->array)[idx]':
1334 //
1335 // count = ptr->count;
1336 // index = idx;
1337 //
1338 // flexible_array_member_element_size = sizeof (*ptr->array);
1339 // flexible_array_member_size =
1340 // count * flexible_array_member_element_size;
1341 //
1342 // casted_flexible_array_member_element_size =
1343 // sizeof (*((cast) ptr->array));
1344 // index_size = index * casted_flexible_array_member_element_size;
1345 //
1346 // result = flexible_array_member_size - index_size;
1347 //
1348 // cmp = (result >= 0)
1349 // if (index != 0)
1350 // cmp = (cmp && index >= 0)
1351 // return cmp ? result : 0;
1352 //
1353 // 3) '&ptr->field':
1354 //
1355 // count = ptr->count;
1356 // sizeof_struct = sizeof (struct s);
1357 //
1358 // flexible_array_member_element_size = sizeof (*ptr->array);
1359 // flexible_array_member_size =
1360 // count * flexible_array_member_element_size;
1361 //
1362 // field_offset = offsetof (struct s, field);
1363 // offset_diff = sizeof_struct - field_offset;
1364 //
1365 // result = offset_diff + flexible_array_member_size;
1366 //
1367 // cmp = (result >= 0)
1368 // return cmp ? result : 0;
1369 //
1370 // 4) '&((cast) ptr->field_array)[idx]':
1371 //
1372 // count = ptr->count;
1373 // index = idx;
1374 // sizeof_struct = sizeof (struct s);
1375 //
1376 // flexible_array_member_element_size = sizeof (*ptr->array);
1377 // flexible_array_member_size =
1378 // count * flexible_array_member_element_size;
1379 //
1380 // casted_field_element_size = sizeof (*((cast) ptr->field_array));
1381 // field_offset = offsetof (struct s, field)
1382 // field_offset += index * casted_field_element_size;
1383 //
1384 // offset_diff = sizeof_struct - field_offset;
1385 //
1386 // result = offset_diff + flexible_array_member_size;
1387 //
1388 // cmp = (result >= 0)
1389 // if (index != 0)
1390 // cmp = (cmp && index >= 0)
1391 // return cmp ? result : 0;
1392
1393 bool IsSigned = CountFD->getType()->isSignedIntegerType();
1394
1395 QualType FlexibleArrayMemberTy = FlexibleArrayMemberFD->getType();
1396
1397 // Explicit cast because otherwise the CharWidth will promote an i32's into
1398 // u64's leading to overflows.
1399 int64_t CharWidth = static_cast<int64_t>(CGM.getContext().getCharWidth());
1400
1401 // field_offset = offsetof (struct s, field);
1402 Value *FieldOffset = nullptr;
1403 if (FlexibleArrayMemberFD != FD) {
1404 std::optional<int64_t> Offset = GetFieldOffset(Ctx, RD, FD);
1405 if (!Offset)
1406 return nullptr;
1407 FieldOffset =
1408 llvm::ConstantInt::get(ResType, *Offset / CharWidth, IsSigned);
1409 }
1410
1411 // count = ptr->count;
1412 // index = ptr->index;
1413 Value *Count, *Index;
1414 std::tie(Count, Index) = GetCountFieldAndIndex(
1415 *this, ME, FlexibleArrayMemberFD, CountFD, Idx, ResType, IsSigned);
1416 if (!Count)
1417 return nullptr;
1418
1419 // flexible_array_member_element_size = sizeof (*ptr->array);
1420 const ArrayType *ArrayTy = Ctx.getAsArrayType(FlexibleArrayMemberTy);
1421 CharUnits BaseSize = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
1422 auto *FlexibleArrayMemberElementSize =
1423 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1424
1425 // flexible_array_member_size = count * flexible_array_member_element_size;
1426 Value *FlexibleArrayMemberSize =
1427 Builder.CreateMul(Count, FlexibleArrayMemberElementSize,
1428 "flexible_array_member_size", !IsSigned, IsSigned);
1429
1430 Value *Result = nullptr;
1431 if (FlexibleArrayMemberFD == FD) {
1432 if (Idx) { // Option (2) '&((cast) ptr->array)[idx]'
1433 // casted_flexible_array_member_element_size =
1434 // sizeof (*((cast) ptr->array));
1435 llvm::ConstantInt *CastedFlexibleArrayMemberElementSize =
1436 FlexibleArrayMemberElementSize;
1437 if (!CastedArrayElementTy.isNull() &&
1438 CastedArrayElementTy->isPointerType()) {
1439 CharUnits BaseSize =
1440 Ctx.getTypeSizeInChars(CastedArrayElementTy->getPointeeType());
1441 CastedFlexibleArrayMemberElementSize =
1442 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1443 }
1444
1445 // index_size = index * casted_flexible_array_member_element_size;
1446 Value *IndexSize =
1447 Builder.CreateMul(Index, CastedFlexibleArrayMemberElementSize,
1448 "index_size", !IsSigned, IsSigned);
1449
1450 // result = flexible_array_member_size - index_size;
1451 Result = Builder.CreateSub(FlexibleArrayMemberSize, IndexSize, "result",
1452 !IsSigned, IsSigned);
1453 } else { // Option (1) 'ptr->array'
1454 // result = flexible_array_member_size;
1455 Result = FlexibleArrayMemberSize;
1456 }
1457 } else {
1458 // sizeof_struct = sizeof (struct s);
1459 llvm::StructType *StructTy = getTypes().getCGRecordLayout(RD).getLLVMType();
1460 const llvm::DataLayout &Layout = CGM.getDataLayout();
1461 TypeSize Size = Layout.getTypeSizeInBits(StructTy);
1462 Value *SizeofStruct =
1463 llvm::ConstantInt::get(ResType, Size.getKnownMinValue() / CharWidth);
1464
1465 if (Idx) { // Option (4) '&((cast) ptr->field_array)[idx]'
1466 // casted_field_element_size = sizeof (*((cast) ptr->field_array));
1467 CharUnits BaseSize;
1468 if (!CastedArrayElementTy.isNull() &&
1469 CastedArrayElementTy->isPointerType()) {
1470 BaseSize =
1471 Ctx.getTypeSizeInChars(CastedArrayElementTy->getPointeeType());
1472 } else {
1473 const ArrayType *ArrayTy = Ctx.getAsArrayType(FD->getType());
1474 BaseSize = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
1475 }
1476
1477 llvm::ConstantInt *CastedFieldElementSize =
1478 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1479
1480 // field_offset += index * casted_field_element_size;
1481 Value *Mul = Builder.CreateMul(Index, CastedFieldElementSize,
1482 "field_offset", !IsSigned, IsSigned);
1483 FieldOffset = Builder.CreateAdd(FieldOffset, Mul);
1484 }
1485 // Option (3) '&ptr->field', and Option (4) continuation.
1486 // offset_diff = flexible_array_member_offset - field_offset;
1487 Value *OffsetDiff = Builder.CreateSub(SizeofStruct, FieldOffset,
1488 "offset_diff", !IsSigned, IsSigned);
1489
1490 // result = offset_diff + flexible_array_member_size;
1491 Result = Builder.CreateAdd(FlexibleArrayMemberSize, OffsetDiff, "result");
1492 }
1493
1494 return EmitPositiveResultOrZero(*this, Result, Index, ResType, IsSigned);
1495}
1496
1497/// Returns a Value corresponding to the size of the given expression.
1498/// This Value may be either of the following:
1499/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
1500/// it)
1501/// - A call to the @llvm.objectsize intrinsic
1502///
1503/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
1504/// and we wouldn't otherwise try to reference a pass_object_size parameter,
1505/// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
1506llvm::Value *
1507CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
1508 llvm::IntegerType *ResType,
1509 llvm::Value *EmittedE, bool IsDynamic) {
1510 // We need to reference an argument if the pointer is a parameter with the
1511 // pass_object_size attribute.
1512 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
1513 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
1514 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
1515 if (Param != nullptr && PS != nullptr &&
1516 areBOSTypesCompatible(PS->getType(), Type)) {
1517 auto Iter = SizeArguments.find(Param);
1518 assert(Iter != SizeArguments.end());
1519
1520 const ImplicitParamDecl *D = Iter->second;
1521 auto DIter = LocalDeclMap.find(D);
1522 assert(DIter != LocalDeclMap.end());
1523
1524 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
1525 getContext().getSizeType(), E->getBeginLoc());
1526 }
1527 }
1528
1529 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
1530 // evaluate E for side-effects. In either case, we shouldn't lower to
1531 // @llvm.objectsize.
1532 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
1533 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1534
1535 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
1536 assert(Ptr->getType()->isPointerTy() &&
1537 "Non-pointer passed to __builtin_object_size?");
1538
1539 if (IsDynamic)
1540 // Emit special code for a flexible array member with the "counted_by"
1541 // attribute.
1542 if (Value *V = emitCountedBySize(E, Ptr, Type, ResType))
1543 return V;
1544
1545 Function *F =
1546 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
1547
1548 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
1549 Value *Min = Builder.getInt1((Type & 2) != 0);
1550 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
1551 Value *NullIsUnknown = Builder.getTrue();
1552 Value *Dynamic = Builder.getInt1(IsDynamic);
1553 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
1554}
1555
1556namespace {
1557/// A struct to generically describe a bit test intrinsic.
1558struct BitTest {
1559 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
1560 enum InterlockingKind : uint8_t {
1561 Unlocked,
1562 Sequential,
1563 Acquire,
1564 Release,
1565 NoFence
1566 };
1567
1568 ActionKind Action;
1569 InterlockingKind Interlocking;
1570 bool Is64Bit;
1571
1572 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
1573};
1574
1575} // namespace
1576
1577BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
1578 switch (BuiltinID) {
1579 // Main portable variants.
1580 case Builtin::BI_bittest:
1581 return {TestOnly, Unlocked, false};
1582 case Builtin::BI_bittestandcomplement:
1583 return {Complement, Unlocked, false};
1584 case Builtin::BI_bittestandreset:
1585 return {Reset, Unlocked, false};
1586 case Builtin::BI_bittestandset:
1587 return {Set, Unlocked, false};
1588 case Builtin::BI_interlockedbittestandreset:
1589 return {Reset, Sequential, false};
1590 case Builtin::BI_interlockedbittestandset:
1591 return {Set, Sequential, false};
1592
1593 // 64-bit variants.
1594 case Builtin::BI_bittest64:
1595 return {TestOnly, Unlocked, true};
1596 case Builtin::BI_bittestandcomplement64:
1597 return {Complement, Unlocked, true};
1598 case Builtin::BI_bittestandreset64:
1599 return {Reset, Unlocked, true};
1600 case Builtin::BI_bittestandset64:
1601 return {Set, Unlocked, true};
1602 case Builtin::BI_interlockedbittestandreset64:
1603 return {Reset, Sequential, true};
1604 case Builtin::BI_interlockedbittestandset64:
1605 return {Set, Sequential, true};
1606
1607 // ARM/AArch64-specific ordering variants.
1608 case Builtin::BI_interlockedbittestandset_acq:
1609 return {Set, Acquire, false};
1610 case Builtin::BI_interlockedbittestandset_rel:
1611 return {Set, Release, false};
1612 case Builtin::BI_interlockedbittestandset_nf:
1613 return {Set, NoFence, false};
1614 case Builtin::BI_interlockedbittestandreset_acq:
1615 return {Reset, Acquire, false};
1616 case Builtin::BI_interlockedbittestandreset_rel:
1617 return {Reset, Release, false};
1618 case Builtin::BI_interlockedbittestandreset_nf:
1619 return {Reset, NoFence, false};
1620 case Builtin::BI_interlockedbittestandreset64_acq:
1621 return {Reset, Acquire, false};
1622 case Builtin::BI_interlockedbittestandreset64_rel:
1623 return {Reset, Release, false};
1624 case Builtin::BI_interlockedbittestandreset64_nf:
1625 return {Reset, NoFence, false};
1626 case Builtin::BI_interlockedbittestandset64_acq:
1627 return {Set, Acquire, false};
1628 case Builtin::BI_interlockedbittestandset64_rel:
1629 return {Set, Release, false};
1630 case Builtin::BI_interlockedbittestandset64_nf:
1631 return {Set, NoFence, false};
1632 }
1633 llvm_unreachable("expected only bittest intrinsics");
1634}
1635
1636static char bitActionToX86BTCode(BitTest::ActionKind A) {
1637 switch (A) {
1638 case BitTest::TestOnly: return '\0';
1639 case BitTest::Complement: return 'c';
1640 case BitTest::Reset: return 'r';
1641 case BitTest::Set: return 's';
1642 }
1643 llvm_unreachable("invalid action");
1644}
1645
1647 BitTest BT,
1648 const CallExpr *E, Value *BitBase,
1649 Value *BitPos) {
1650 char Action = bitActionToX86BTCode(BT.Action);
1651 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
1652
1653 // Build the assembly.
1655 raw_svector_ostream AsmOS(Asm);
1656 if (BT.Interlocking != BitTest::Unlocked)
1657 AsmOS << "lock ";
1658 AsmOS << "bt";
1659 if (Action)
1660 AsmOS << Action;
1661 AsmOS << SizeSuffix << " $2, ($1)";
1662
1663 // Build the constraints. FIXME: We should support immediates when possible.
1664 std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
1665 std::string_view MachineClobbers = CGF.getTarget().getClobbers();
1666 if (!MachineClobbers.empty()) {
1667 Constraints += ',';
1668 Constraints += MachineClobbers;
1669 }
1670 llvm::IntegerType *IntType = llvm::IntegerType::get(
1671 CGF.getLLVMContext(),
1672 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
1673 llvm::FunctionType *FTy =
1674 llvm::FunctionType::get(CGF.Int8Ty, {CGF.DefaultPtrTy, IntType}, false);
1675
1676 llvm::InlineAsm *IA =
1677 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1678 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
1679}
1680
1681static llvm::AtomicOrdering
1682getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
1683 switch (I) {
1684 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
1685 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
1686 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
1687 case BitTest::Release: return llvm::AtomicOrdering::Release;
1688 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
1689 }
1690 llvm_unreachable("invalid interlocking");
1691}
1692
1693static llvm::Value *EmitBitCountExpr(CodeGenFunction &CGF, const Expr *E) {
1694 llvm::Value *ArgValue = CGF.EmitScalarExpr(E);
1695 llvm::Type *ArgType = ArgValue->getType();
1696
1697 // Boolean vectors can be casted directly to its bitfield representation. We
1698 // intentionally do not round up to the next power of two size and let LLVM
1699 // handle the trailing bits.
1700 if (auto *VT = dyn_cast<llvm::FixedVectorType>(ArgType);
1701 VT && VT->getElementType()->isIntegerTy(1)) {
1702 llvm::Type *StorageType =
1703 llvm::Type::getIntNTy(CGF.getLLVMContext(), VT->getNumElements());
1704 ArgValue = CGF.Builder.CreateBitCast(ArgValue, StorageType);
1705 }
1706
1707 return ArgValue;
1708}
1709
1710/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
1711/// bits and a bit position and read and optionally modify the bit at that
1712/// position. The position index can be arbitrarily large, i.e. it can be larger
1713/// than 31 or 63, so we need an indexed load in the general case.
1714static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
1715 unsigned BuiltinID,
1716 const CallExpr *E) {
1717 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
1718 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
1719
1720 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
1721
1722 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
1723 // indexing operation internally. Use them if possible.
1724 if (CGF.getTarget().getTriple().isX86())
1725 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
1726
1727 // Otherwise, use generic code to load one byte and test the bit. Use all but
1728 // the bottom three bits as the array index, and the bottom three bits to form
1729 // a mask.
1730 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
1731 Value *ByteIndex = CGF.Builder.CreateAShr(
1732 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
1733 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBase, ByteIndex,
1734 "bittest.byteaddr"),
1735 CGF.Int8Ty, CharUnits::One());
1736 Value *PosLow =
1737 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
1738 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
1739
1740 // The updating instructions will need a mask.
1741 Value *Mask = nullptr;
1742 if (BT.Action != BitTest::TestOnly) {
1743 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
1744 "bittest.mask");
1745 }
1746
1747 // Check the action and ordering of the interlocked intrinsics.
1748 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
1749
1750 Value *OldByte = nullptr;
1751 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
1752 // Emit a combined atomicrmw load/store operation for the interlocked
1753 // intrinsics.
1754 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
1755 if (BT.Action == BitTest::Reset) {
1756 Mask = CGF.Builder.CreateNot(Mask);
1757 RMWOp = llvm::AtomicRMWInst::And;
1758 }
1759 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr, Mask, Ordering);
1760 } else {
1761 // Emit a plain load for the non-interlocked intrinsics.
1762 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
1763 Value *NewByte = nullptr;
1764 switch (BT.Action) {
1765 case BitTest::TestOnly:
1766 // Don't store anything.
1767 break;
1768 case BitTest::Complement:
1769 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
1770 break;
1771 case BitTest::Reset:
1772 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
1773 break;
1774 case BitTest::Set:
1775 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
1776 break;
1777 }
1778 if (NewByte)
1779 CGF.Builder.CreateStore(NewByte, ByteAddr);
1780 }
1781
1782 // However we loaded the old byte, either by plain load or atomicrmw, shift
1783 // the bit into the low position and mask it to 0 or 1.
1784 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
1785 return CGF.Builder.CreateAnd(
1786 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
1787}
1788
1789namespace {
1790enum class MSVCSetJmpKind {
1791 _setjmpex,
1792 _setjmp3,
1793 _setjmp
1794};
1795}
1796
1797/// MSVC handles setjmp a bit differently on different platforms. On every
1798/// architecture except 32-bit x86, the frame address is passed. On x86, extra
1799/// parameters can be passed as variadic arguments, but we always pass none.
1800static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
1801 const CallExpr *E) {
1802 llvm::Value *Arg1 = nullptr;
1803 llvm::Type *Arg1Ty = nullptr;
1804 StringRef Name;
1805 bool IsVarArg = false;
1806 if (SJKind == MSVCSetJmpKind::_setjmp3) {
1807 Name = "_setjmp3";
1808 Arg1Ty = CGF.Int32Ty;
1809 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
1810 IsVarArg = true;
1811 } else {
1812 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
1813 Arg1Ty = CGF.Int8PtrTy;
1814 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
1815 Arg1 = CGF.Builder.CreateCall(
1816 CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
1817 } else
1818 Arg1 = CGF.Builder.CreateCall(
1819 CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
1820 llvm::ConstantInt::get(CGF.Int32Ty, 0));
1821 }
1822
1823 // Mark the call site and declaration with ReturnsTwice.
1824 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
1825 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
1826 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
1827 llvm::Attribute::ReturnsTwice);
1828 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
1829 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
1830 ReturnsTwiceAttr, /*Local=*/true);
1831
1832 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
1833 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
1834 llvm::Value *Args[] = {Buf, Arg1};
1835 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
1836 CB->setAttributes(ReturnsTwiceAttr);
1837 return RValue::get(CB);
1838}
1839
1840// Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated.
1842 const CallExpr *E) {
1843 switch (BuiltinID) {
1846 Address IndexAddress(EmitPointerWithAlignment(E->getArg(0)));
1847 Value *ArgValue = EmitScalarExpr(E->getArg(1));
1848
1849 llvm::Type *ArgType = ArgValue->getType();
1850 llvm::Type *IndexType = IndexAddress.getElementType();
1851 llvm::Type *ResultType = ConvertType(E->getType());
1852
1853 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
1854 Value *ResZero = llvm::Constant::getNullValue(ResultType);
1855 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
1856
1857 BasicBlock *Begin = Builder.GetInsertBlock();
1858 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
1859 Builder.SetInsertPoint(End);
1860 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
1861
1862 Builder.SetInsertPoint(Begin);
1863 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
1864 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
1865 Builder.CreateCondBr(IsZero, End, NotZero);
1866 Result->addIncoming(ResZero, Begin);
1867
1868 Builder.SetInsertPoint(NotZero);
1869
1870 if (BuiltinID == MSVCIntrin::_BitScanForward) {
1871 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1872 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1873 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1874 Builder.CreateStore(ZeroCount, IndexAddress, false);
1875 } else {
1876 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
1877 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
1878
1879 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1880 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1881 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1882 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
1883 Builder.CreateStore(Index, IndexAddress, false);
1884 }
1885 Builder.CreateBr(End);
1886 Result->addIncoming(ResOne, NotZero);
1887
1888 Builder.SetInsertPoint(End);
1889 return Result;
1890 }
1892 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
1894 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
1896 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
1898 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
1900 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
1902 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
1904 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1905 AtomicOrdering::Acquire);
1907 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1908 AtomicOrdering::Release);
1910 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1911 AtomicOrdering::Monotonic);
1913 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1914 AtomicOrdering::Acquire);
1916 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1917 AtomicOrdering::Release);
1919 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1920 AtomicOrdering::Monotonic);
1922 return EmitAtomicCmpXchgForMSIntrin(*this, E);
1924 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
1926 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
1928 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1931 *this, E, AtomicOrdering::SequentiallyConsistent);
1933 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire);
1935 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release);
1937 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1939 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1940 AtomicOrdering::Acquire);
1942 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1943 AtomicOrdering::Release);
1945 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1946 AtomicOrdering::Monotonic);
1948 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1949 AtomicOrdering::Acquire);
1951 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1952 AtomicOrdering::Release);
1954 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1955 AtomicOrdering::Monotonic);
1957 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1958 AtomicOrdering::Acquire);
1960 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1961 AtomicOrdering::Release);
1963 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1964 AtomicOrdering::Monotonic);
1966 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1968 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1970 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1972 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1974 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1976 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1977
1979 return EmitAtomicDecrementValue(*this, E);
1981 return EmitAtomicIncrementValue(*this, E);
1982
1984 // Request immediate process termination from the kernel. The instruction
1985 // sequences to do this are documented on MSDN:
1986 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1987 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1988 StringRef Asm, Constraints;
1989 switch (ISA) {
1990 default:
1991 ErrorUnsupported(E, "__fastfail call for this architecture");
1992 break;
1993 case llvm::Triple::x86:
1994 case llvm::Triple::x86_64:
1995 Asm = "int $$0x29";
1996 Constraints = "{cx}";
1997 break;
1998 case llvm::Triple::thumb:
1999 Asm = "udf #251";
2000 Constraints = "{r0}";
2001 break;
2002 case llvm::Triple::aarch64:
2003 Asm = "brk #0xF003";
2004 Constraints = "{w0}";
2005 }
2006 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
2007 llvm::InlineAsm *IA =
2008 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
2009 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
2010 getLLVMContext(), llvm::AttributeList::FunctionIndex,
2011 llvm::Attribute::NoReturn);
2012 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
2013 CI->setAttributes(NoReturnAttr);
2014 return CI;
2015 }
2016 }
2017 llvm_unreachable("Incorrect MSVC intrinsic!");
2018}
2019
2020namespace {
2021// ARC cleanup for __builtin_os_log_format
2022struct CallObjCArcUse final : EHScopeStack::Cleanup {
2023 CallObjCArcUse(llvm::Value *object) : object(object) {}
2024 llvm::Value *object;
2025
2026 void Emit(CodeGenFunction &CGF, Flags flags) override {
2027 CGF.EmitARCIntrinsicUse(object);
2028 }
2029};
2030}
2031
2033 BuiltinCheckKind Kind) {
2034 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) &&
2035 "Unsupported builtin check kind");
2036
2037 Value *ArgValue = EmitBitCountExpr(*this, E);
2038 if (!SanOpts.has(SanitizerKind::Builtin))
2039 return ArgValue;
2040
2041 auto CheckOrdinal = SanitizerKind::SO_Builtin;
2042 auto CheckHandler = SanitizerHandler::InvalidBuiltin;
2043 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2044 Value *Cond = Builder.CreateICmpNE(
2045 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
2046 EmitCheck(std::make_pair(Cond, CheckOrdinal), CheckHandler,
2048 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
2049 {});
2050 return ArgValue;
2051}
2052
2054 Value *ArgValue = EvaluateExprAsBool(E);
2055 if (!SanOpts.has(SanitizerKind::Builtin))
2056 return ArgValue;
2057
2058 auto CheckOrdinal = SanitizerKind::SO_Builtin;
2059 auto CheckHandler = SanitizerHandler::InvalidBuiltin;
2060 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2061 EmitCheck(
2062 std::make_pair(ArgValue, CheckOrdinal), CheckHandler,
2064 llvm::ConstantInt::get(Builder.getInt8Ty(), BCK_AssumePassedFalse)},
2065 {});
2066 return ArgValue;
2067}
2068
2069static Value *EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW) {
2070 return CGF.Builder.CreateBinaryIntrinsic(
2071 Intrinsic::abs, ArgValue,
2072 ConstantInt::get(CGF.Builder.getInt1Ty(), HasNSW));
2073}
2074
2076 bool SanitizeOverflow) {
2077 Value *ArgValue = CGF.EmitScalarExpr(E->getArg(0));
2078
2079 // Try to eliminate overflow check.
2080 if (const auto *VCI = dyn_cast<llvm::ConstantInt>(ArgValue)) {
2081 if (!VCI->isMinSignedValue())
2082 return EmitAbs(CGF, ArgValue, true);
2083 }
2084
2086 SanitizerHandler CheckHandler;
2087 if (SanitizeOverflow) {
2088 Ordinals.push_back(SanitizerKind::SO_SignedIntegerOverflow);
2089 CheckHandler = SanitizerHandler::NegateOverflow;
2090 } else
2091 CheckHandler = SanitizerHandler::SubOverflow;
2092
2093 SanitizerDebugLocation SanScope(&CGF, Ordinals, CheckHandler);
2094
2095 Constant *Zero = Constant::getNullValue(ArgValue->getType());
2096 Value *ResultAndOverflow = CGF.Builder.CreateBinaryIntrinsic(
2097 Intrinsic::ssub_with_overflow, Zero, ArgValue);
2098 Value *Result = CGF.Builder.CreateExtractValue(ResultAndOverflow, 0);
2099 Value *NotOverflow = CGF.Builder.CreateNot(
2100 CGF.Builder.CreateExtractValue(ResultAndOverflow, 1));
2101
2102 // TODO: support -ftrapv-handler.
2103 if (SanitizeOverflow) {
2104 CGF.EmitCheck({{NotOverflow, SanitizerKind::SO_SignedIntegerOverflow}},
2105 CheckHandler,
2108 {ArgValue});
2109 } else
2110 CGF.EmitTrapCheck(NotOverflow, CheckHandler);
2111
2112 Value *CmpResult = CGF.Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
2113 return CGF.Builder.CreateSelect(CmpResult, Result, ArgValue, "abs");
2114}
2115
2116/// Get the argument type for arguments to os_log_helper.
2118 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
2119 return C.getCanonicalType(UnsignedTy);
2120}
2121
2124 CharUnits BufferAlignment) {
2125 ASTContext &Ctx = getContext();
2126
2128 {
2129 raw_svector_ostream OS(Name);
2130 OS << "__os_log_helper";
2131 OS << "_" << BufferAlignment.getQuantity();
2132 OS << "_" << int(Layout.getSummaryByte());
2133 OS << "_" << int(Layout.getNumArgsByte());
2134 for (const auto &Item : Layout.Items)
2135 OS << "_" << int(Item.getSizeByte()) << "_"
2136 << int(Item.getDescriptorByte());
2137 }
2138
2139 if (llvm::Function *F = CGM.getModule().getFunction(Name))
2140 return F;
2141
2143 FunctionArgList Args;
2144 Args.push_back(ImplicitParamDecl::Create(
2145 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
2147 ArgTys.emplace_back(Ctx.VoidPtrTy);
2148
2149 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
2150 char Size = Layout.Items[I].getSizeByte();
2151 if (!Size)
2152 continue;
2153
2154 QualType ArgTy = getOSLogArgType(Ctx, Size);
2155 Args.push_back(ImplicitParamDecl::Create(
2156 Ctx, nullptr, SourceLocation(),
2157 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
2159 ArgTys.emplace_back(ArgTy);
2160 }
2161
2162 QualType ReturnTy = Ctx.VoidTy;
2163
2164 // The helper function has linkonce_odr linkage to enable the linker to merge
2165 // identical functions. To ensure the merging always happens, 'noinline' is
2166 // attached to the function when compiling with -Oz.
2167 const CGFunctionInfo &FI =
2168 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
2169 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
2170 llvm::Function *Fn = llvm::Function::Create(
2171 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
2172 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
2173 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn, /*IsThunk=*/false);
2174 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
2175 Fn->setDoesNotThrow();
2176
2177 // Attach 'noinline' at -Oz.
2178 if (CGM.getCodeGenOpts().OptimizeSize == 2)
2179 Fn->addFnAttr(llvm::Attribute::NoInline);
2180
2181 auto NL = ApplyDebugLocation::CreateEmpty(*this);
2182 StartFunction(GlobalDecl(), ReturnTy, Fn, FI, Args);
2183
2184 // Create a scope with an artificial location for the body of this function.
2185 auto AL = ApplyDebugLocation::CreateArtificial(*this);
2186
2187 CharUnits Offset;
2189 Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), Ctx.VoidTy,
2190 BufferAlignment);
2191 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
2192 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
2193 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
2194 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
2195
2196 unsigned I = 1;
2197 for (const auto &Item : Layout.Items) {
2198 Builder.CreateStore(
2199 Builder.getInt8(Item.getDescriptorByte()),
2200 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
2201 Builder.CreateStore(
2202 Builder.getInt8(Item.getSizeByte()),
2203 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
2204
2205 CharUnits Size = Item.size();
2206 if (!Size.getQuantity())
2207 continue;
2208
2209 Address Arg = GetAddrOfLocalVar(Args[I]);
2210 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
2211 Addr = Addr.withElementType(Arg.getElementType());
2212 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
2213 Offset += Size;
2214 ++I;
2215 }
2216
2218
2219 return Fn;
2220}
2221
2223 assert(E.getNumArgs() >= 2 &&
2224 "__builtin_os_log_format takes at least 2 arguments");
2225 ASTContext &Ctx = getContext();
2228 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
2229
2230 // Ignore argument 1, the format string. It is not currently used.
2231 CallArgList Args;
2232 Args.add(RValue::get(BufAddr.emitRawPointer(*this)), Ctx.VoidPtrTy);
2233
2234 for (const auto &Item : Layout.Items) {
2235 int Size = Item.getSizeByte();
2236 if (!Size)
2237 continue;
2238
2239 llvm::Value *ArgVal;
2240
2241 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
2242 uint64_t Val = 0;
2243 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
2244 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
2245 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
2246 } else if (const Expr *TheExpr = Item.getExpr()) {
2247 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
2248
2249 // If a temporary object that requires destruction after the full
2250 // expression is passed, push a lifetime-extended cleanup to extend its
2251 // lifetime to the end of the enclosing block scope.
2252 auto LifetimeExtendObject = [&](const Expr *E) {
2253 E = E->IgnoreParenCasts();
2254 // Extend lifetimes of objects returned by function calls and message
2255 // sends.
2256
2257 // FIXME: We should do this in other cases in which temporaries are
2258 // created including arguments of non-ARC types (e.g., C++
2259 // temporaries).
2261 return true;
2262 return false;
2263 };
2264
2265 if (TheExpr->getType()->isObjCRetainableType() &&
2266 getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
2267 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
2268 "Only scalar can be a ObjC retainable type");
2269 if (!isa<Constant>(ArgVal)) {
2270 CleanupKind Cleanup = getARCCleanupKind();
2271 QualType Ty = TheExpr->getType();
2273 RawAddress Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
2274 ArgVal = EmitARCRetain(Ty, ArgVal);
2275 Builder.CreateStore(ArgVal, Addr);
2276 pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
2278 Cleanup & EHCleanup);
2279
2280 // Push a clang.arc.use call to ensure ARC optimizer knows that the
2281 // argument has to be alive.
2282 if (CGM.getCodeGenOpts().OptimizationLevel != 0)
2284 }
2285 }
2286 } else {
2287 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
2288 }
2289
2290 unsigned ArgValSize =
2291 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
2292 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
2293 ArgValSize);
2294 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
2295 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
2296 // If ArgVal has type x86_fp80, zero-extend ArgVal.
2297 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
2298 Args.add(RValue::get(ArgVal), ArgTy);
2299 }
2300
2301 const CGFunctionInfo &FI =
2302 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
2303 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
2304 Layout, BufAddr.getAlignment());
2306 return RValue::get(BufAddr, *this);
2307}
2308
2310 unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info,
2311 WidthAndSignedness ResultInfo) {
2312 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2313 Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width &&
2314 !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed;
2315}
2316
2318 CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info,
2319 const clang::Expr *Op2, WidthAndSignedness Op2Info,
2320 const clang::Expr *ResultArg, QualType ResultQTy,
2321 WidthAndSignedness ResultInfo) {
2323 Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&
2324 "Cannot specialize this multiply");
2325
2326 llvm::Value *V1 = CGF.EmitScalarExpr(Op1);
2327 llvm::Value *V2 = CGF.EmitScalarExpr(Op2);
2328
2329 llvm::Value *HasOverflow;
2330 llvm::Value *Result = EmitOverflowIntrinsic(
2331 CGF, Intrinsic::umul_with_overflow, V1, V2, HasOverflow);
2332
2333 // The intrinsic call will detect overflow when the value is > UINT_MAX,
2334 // however, since the original builtin had a signed result, we need to report
2335 // an overflow when the result is greater than INT_MAX.
2336 auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width);
2337 llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax);
2338
2339 llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(Result, IntMaxValue);
2340 HasOverflow = CGF.Builder.CreateOr(HasOverflow, IntMaxOverflow);
2341
2342 bool isVolatile =
2343 ResultArg->getType()->getPointeeType().isVolatileQualified();
2344 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2345 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2346 isVolatile);
2347 return RValue::get(HasOverflow);
2348}
2349
2350/// Determine if a binop is a checked mixed-sign multiply we can specialize.
2351static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
2352 WidthAndSignedness Op1Info,
2353 WidthAndSignedness Op2Info,
2354 WidthAndSignedness ResultInfo) {
2355 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2356 std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
2357 Op1Info.Signed != Op2Info.Signed;
2358}
2359
2360/// Emit a checked mixed-sign multiply. This is a cheaper specialization of
2361/// the generic checked-binop irgen.
2362static RValue
2364 WidthAndSignedness Op1Info, const clang::Expr *Op2,
2365 WidthAndSignedness Op2Info,
2366 const clang::Expr *ResultArg, QualType ResultQTy,
2367 WidthAndSignedness ResultInfo) {
2368 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,
2369 Op2Info, ResultInfo) &&
2370 "Not a mixed-sign multipliction we can specialize");
2371
2372 // Emit the signed and unsigned operands.
2373 const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
2374 const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
2375 llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
2376 llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
2377 unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
2378 unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
2379
2380 // One of the operands may be smaller than the other. If so, [s|z]ext it.
2381 if (SignedOpWidth < UnsignedOpWidth)
2382 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
2383 if (UnsignedOpWidth < SignedOpWidth)
2384 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
2385
2386 llvm::Type *OpTy = Signed->getType();
2387 llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
2388 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2389 llvm::Type *ResTy = CGF.getTypes().ConvertType(ResultQTy);
2390 unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
2391
2392 // Take the absolute value of the signed operand.
2393 llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
2394 llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
2395 llvm::Value *AbsSigned =
2396 CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
2397
2398 // Perform a checked unsigned multiplication.
2399 llvm::Value *UnsignedOverflow;
2400 llvm::Value *UnsignedResult =
2401 EmitOverflowIntrinsic(CGF, Intrinsic::umul_with_overflow, AbsSigned,
2402 Unsigned, UnsignedOverflow);
2403
2404 llvm::Value *Overflow, *Result;
2405 if (ResultInfo.Signed) {
2406 // Signed overflow occurs if the result is greater than INT_MAX or lesser
2407 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
2408 auto IntMax =
2409 llvm::APInt::getSignedMaxValue(ResultInfo.Width).zext(OpWidth);
2410 llvm::Value *MaxResult =
2411 CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
2412 CGF.Builder.CreateZExt(IsNegative, OpTy));
2413 llvm::Value *SignedOverflow =
2414 CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
2415 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
2416
2417 // Prepare the signed result (possibly by negating it).
2418 llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
2419 llvm::Value *SignedResult =
2420 CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
2421 Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
2422 } else {
2423 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
2424 llvm::Value *Underflow = CGF.Builder.CreateAnd(
2425 IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
2426 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
2427 if (ResultInfo.Width < OpWidth) {
2428 auto IntMax =
2429 llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
2430 llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
2431 UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
2432 Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
2433 }
2434
2435 // Negate the product if it would be negative in infinite precision.
2436 Result = CGF.Builder.CreateSelect(
2437 IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
2438
2439 Result = CGF.Builder.CreateTrunc(Result, ResTy);
2440 }
2441 assert(Overflow && Result && "Missing overflow or result");
2442
2443 bool isVolatile =
2444 ResultArg->getType()->getPointeeType().isVolatileQualified();
2445 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2446 isVolatile);
2447 return RValue::get(Overflow);
2448}
2449
2450static bool
2452 llvm::SmallPtrSetImpl<const Decl *> &Seen) {
2453 if (const auto *Arr = Ctx.getAsArrayType(Ty))
2454 Ty = Ctx.getBaseElementType(Arr);
2455
2456 const auto *Record = Ty->getAsCXXRecordDecl();
2457 if (!Record)
2458 return false;
2459
2460 // We've already checked this type, or are in the process of checking it.
2461 if (!Seen.insert(Record).second)
2462 return false;
2463
2464 assert(Record->hasDefinition() &&
2465 "Incomplete types should already be diagnosed");
2466
2467 if (Record->isDynamicClass())
2468 return true;
2469
2470 for (FieldDecl *F : Record->fields()) {
2471 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
2472 return true;
2473 }
2474 return false;
2475}
2476
2477/// Determine if the specified type requires laundering by checking if it is a
2478/// dynamic class type or contains a subobject which is a dynamic class type.
2480 if (!CGM.getCodeGenOpts().StrictVTablePointers)
2481 return false;
2483 return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
2484}
2485
2486RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
2487 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
2488 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
2489
2490 // The builtin's shift arg may have a different type than the source arg and
2491 // result, but the LLVM intrinsic uses the same type for all values.
2492 llvm::Type *Ty = Src->getType();
2493 llvm::Type *ShiftTy = ShiftAmt->getType();
2494
2495 unsigned BitWidth = Ty->getIntegerBitWidth();
2496
2497 // Normalize shift amount to [0, BitWidth) range to match runtime behavior.
2498 // This matches the algorithm in ExprConstant.cpp for constant evaluation.
2499 if (BitWidth == 1) {
2500 // Rotating a 1-bit value is always a no-op
2501 ShiftAmt = ConstantInt::get(ShiftTy, 0);
2502 } else if (BitWidth == 2) {
2503 // For 2-bit values: rotation amount is 0 or 1 based on
2504 // whether the amount is even or odd. We can't use srem here because
2505 // the divisor (2) would be misinterpreted as -2 in 2-bit signed arithmetic.
2506 llvm::Value *One = ConstantInt::get(ShiftTy, 1);
2507 ShiftAmt = Builder.CreateAnd(ShiftAmt, One);
2508 } else {
2509 unsigned ShiftAmtBitWidth = ShiftTy->getIntegerBitWidth();
2510 bool ShiftAmtIsSigned = E->getArg(1)->getType()->isSignedIntegerType();
2511
2512 // Choose the wider type for the divisor to avoid truncation
2513 llvm::Type *DivisorTy = ShiftAmtBitWidth > BitWidth ? ShiftTy : Ty;
2514 llvm::Value *Divisor = ConstantInt::get(DivisorTy, BitWidth);
2515
2516 // Extend ShiftAmt to match Divisor width if needed
2517 if (ShiftAmtBitWidth < DivisorTy->getIntegerBitWidth()) {
2518 ShiftAmt = Builder.CreateIntCast(ShiftAmt, DivisorTy, ShiftAmtIsSigned);
2519 }
2520
2521 // Normalize to [0, BitWidth)
2522 llvm::Value *RemResult;
2523 if (ShiftAmtIsSigned) {
2524 RemResult = Builder.CreateSRem(ShiftAmt, Divisor);
2525 // Signed remainder can be negative, convert to positive equivalent
2526 llvm::Value *Zero = ConstantInt::get(DivisorTy, 0);
2527 llvm::Value *IsNegative = Builder.CreateICmpSLT(RemResult, Zero);
2528 llvm::Value *PositiveShift = Builder.CreateAdd(RemResult, Divisor);
2529 ShiftAmt = Builder.CreateSelect(IsNegative, PositiveShift, RemResult);
2530 } else {
2531 ShiftAmt = Builder.CreateURem(ShiftAmt, Divisor);
2532 }
2533 }
2534
2535 // Convert to the source type if needed
2536 if (ShiftAmt->getType() != Ty) {
2537 ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
2538 }
2539
2540 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
2541 unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
2542 Function *F = CGM.getIntrinsic(IID, Ty);
2543 return RValue::get(Builder.CreateCall(F, {Src, Src, ShiftAmt}));
2544}
2545
2546// Map math builtins for long-double to f128 version.
2547static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {
2548 switch (BuiltinID) {
2549#define MUTATE_LDBL(func) \
2550 case Builtin::BI__builtin_##func##l: \
2551 return Builtin::BI__builtin_##func##f128;
2582 MUTATE_LDBL(nans)
2583 MUTATE_LDBL(inf)
2602 MUTATE_LDBL(huge_val)
2612#undef MUTATE_LDBL
2613 default:
2614 return BuiltinID;
2615 }
2616}
2617
2618static Value *tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID,
2619 Value *V) {
2620 if (CGF.Builder.getIsFPConstrained() &&
2621 CGF.Builder.getDefaultConstrainedExcept() != fp::ebIgnore) {
2622 if (Value *Result =
2623 CGF.getTargetHooks().testFPKind(V, BuiltinID, CGF.Builder, CGF.CGM))
2624 return Result;
2625 }
2626 return nullptr;
2627}
2628
2630 const FunctionDecl *FD) {
2631 auto Name = FD->getNameAsString() + "__hipstdpar_unsupported";
2632 auto FnTy = CGF->CGM.getTypes().GetFunctionType(FD);
2633 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2634
2636 for (auto &&FormalTy : FnTy->params())
2637 Args.push_back(llvm::PoisonValue::get(FormalTy));
2638
2639 return RValue::get(CGF->Builder.CreateCall(UBF, Args));
2640}
2641
2643 const CallExpr *E,
2645 assert(!getContext().BuiltinInfo.isImmediate(BuiltinID) &&
2646 "Should not codegen for consteval builtins");
2647
2648 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
2649 // See if we can constant fold this builtin. If so, don't emit it at all.
2650 // TODO: Extend this handling to all builtin calls that we can constant-fold.
2652 if (E->isPRValue() && E->EvaluateAsRValue(Result, CGM.getContext()) &&
2653 !Result.hasSideEffects()) {
2654 if (Result.Val.isInt())
2655 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
2656 Result.Val.getInt()));
2657 if (Result.Val.isFloat())
2658 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
2659 Result.Val.getFloat()));
2660 }
2661
2662 // If current long-double semantics is IEEE 128-bit, replace math builtins
2663 // of long-double with f128 equivalent.
2664 // TODO: This mutation should also be applied to other targets other than PPC,
2665 // after backend supports IEEE 128-bit style libcalls.
2666 if (getTarget().getTriple().isPPC64() &&
2667 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
2668 BuiltinID = mutateLongDoubleBuiltin(BuiltinID);
2669
2670 // If the builtin has been declared explicitly with an assembler label,
2671 // disable the specialized emitting below. Ideally we should communicate the
2672 // rename in IR, or at least avoid generating the intrinsic calls that are
2673 // likely to get lowered to the renamed library functions.
2674 const unsigned BuiltinIDIfNoAsmLabel =
2675 FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID;
2676
2677 std::optional<bool> ErrnoOverriden;
2678 // ErrnoOverriden is true if math-errno is overriden via the
2679 // '#pragma float_control(precise, on)'. This pragma disables fast-math,
2680 // which implies math-errno.
2681 if (E->hasStoredFPFeatures()) {
2683 if (OP.hasMathErrnoOverride())
2684 ErrnoOverriden = OP.getMathErrnoOverride();
2685 }
2686 // True if 'attribute__((optnone))' is used. This attribute overrides
2687 // fast-math which implies math-errno.
2688 bool OptNone = CurFuncDecl && CurFuncDecl->hasAttr<OptimizeNoneAttr>();
2689
2690 bool IsOptimizationEnabled = CGM.getCodeGenOpts().OptimizationLevel != 0;
2691
2692 bool GenerateFPMathIntrinsics =
2694 BuiltinID, CGM.getTriple(), ErrnoOverriden, getLangOpts().MathErrno,
2695 OptNone, IsOptimizationEnabled);
2696
2697 if (GenerateFPMathIntrinsics) {
2698 switch (BuiltinIDIfNoAsmLabel) {
2699 case Builtin::BIacos:
2700 case Builtin::BIacosf:
2701 case Builtin::BIacosl:
2702 case Builtin::BI__builtin_acos:
2703 case Builtin::BI__builtin_acosf:
2704 case Builtin::BI__builtin_acosf16:
2705 case Builtin::BI__builtin_acosl:
2706 case Builtin::BI__builtin_acosf128:
2707 case Builtin::BI__builtin_elementwise_acos:
2709 *this, E, Intrinsic::acos, Intrinsic::experimental_constrained_acos));
2710
2711 case Builtin::BIasin:
2712 case Builtin::BIasinf:
2713 case Builtin::BIasinl:
2714 case Builtin::BI__builtin_asin:
2715 case Builtin::BI__builtin_asinf:
2716 case Builtin::BI__builtin_asinf16:
2717 case Builtin::BI__builtin_asinl:
2718 case Builtin::BI__builtin_asinf128:
2719 case Builtin::BI__builtin_elementwise_asin:
2721 *this, E, Intrinsic::asin, Intrinsic::experimental_constrained_asin));
2722
2723 case Builtin::BIatan:
2724 case Builtin::BIatanf:
2725 case Builtin::BIatanl:
2726 case Builtin::BI__builtin_atan:
2727 case Builtin::BI__builtin_atanf:
2728 case Builtin::BI__builtin_atanf16:
2729 case Builtin::BI__builtin_atanl:
2730 case Builtin::BI__builtin_atanf128:
2731 case Builtin::BI__builtin_elementwise_atan:
2733 *this, E, Intrinsic::atan, Intrinsic::experimental_constrained_atan));
2734
2735 case Builtin::BIatan2:
2736 case Builtin::BIatan2f:
2737 case Builtin::BIatan2l:
2738 case Builtin::BI__builtin_atan2:
2739 case Builtin::BI__builtin_atan2f:
2740 case Builtin::BI__builtin_atan2f16:
2741 case Builtin::BI__builtin_atan2l:
2742 case Builtin::BI__builtin_atan2f128:
2743 case Builtin::BI__builtin_elementwise_atan2:
2745 *this, E, Intrinsic::atan2,
2746 Intrinsic::experimental_constrained_atan2));
2747
2748 case Builtin::BIceil:
2749 case Builtin::BIceilf:
2750 case Builtin::BIceill:
2751 case Builtin::BI__builtin_ceil:
2752 case Builtin::BI__builtin_ceilf:
2753 case Builtin::BI__builtin_ceilf16:
2754 case Builtin::BI__builtin_ceill:
2755 case Builtin::BI__builtin_ceilf128:
2756 case Builtin::BI__builtin_elementwise_ceil:
2758 Intrinsic::ceil,
2759 Intrinsic::experimental_constrained_ceil));
2760
2761 case Builtin::BIcopysign:
2762 case Builtin::BIcopysignf:
2763 case Builtin::BIcopysignl:
2764 case Builtin::BI__builtin_copysign:
2765 case Builtin::BI__builtin_copysignf:
2766 case Builtin::BI__builtin_copysignf16:
2767 case Builtin::BI__builtin_copysignl:
2768 case Builtin::BI__builtin_copysignf128:
2769 return RValue::get(
2770 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::copysign));
2771
2772 case Builtin::BIcos:
2773 case Builtin::BIcosf:
2774 case Builtin::BIcosl:
2775 case Builtin::BI__builtin_cos:
2776 case Builtin::BI__builtin_cosf:
2777 case Builtin::BI__builtin_cosf16:
2778 case Builtin::BI__builtin_cosl:
2779 case Builtin::BI__builtin_cosf128:
2780 case Builtin::BI__builtin_elementwise_cos:
2782 Intrinsic::cos,
2783 Intrinsic::experimental_constrained_cos));
2784
2785 case Builtin::BIcosh:
2786 case Builtin::BIcoshf:
2787 case Builtin::BIcoshl:
2788 case Builtin::BI__builtin_cosh:
2789 case Builtin::BI__builtin_coshf:
2790 case Builtin::BI__builtin_coshf16:
2791 case Builtin::BI__builtin_coshl:
2792 case Builtin::BI__builtin_coshf128:
2793 case Builtin::BI__builtin_elementwise_cosh:
2795 *this, E, Intrinsic::cosh, Intrinsic::experimental_constrained_cosh));
2796
2797 case Builtin::BIexp:
2798 case Builtin::BIexpf:
2799 case Builtin::BIexpl:
2800 case Builtin::BI__builtin_exp:
2801 case Builtin::BI__builtin_expf:
2802 case Builtin::BI__builtin_expf16:
2803 case Builtin::BI__builtin_expl:
2804 case Builtin::BI__builtin_expf128:
2805 case Builtin::BI__builtin_elementwise_exp:
2807 Intrinsic::exp,
2808 Intrinsic::experimental_constrained_exp));
2809
2810 case Builtin::BIexp2:
2811 case Builtin::BIexp2f:
2812 case Builtin::BIexp2l:
2813 case Builtin::BI__builtin_exp2:
2814 case Builtin::BI__builtin_exp2f:
2815 case Builtin::BI__builtin_exp2f16:
2816 case Builtin::BI__builtin_exp2l:
2817 case Builtin::BI__builtin_exp2f128:
2818 case Builtin::BI__builtin_elementwise_exp2:
2820 Intrinsic::exp2,
2821 Intrinsic::experimental_constrained_exp2));
2822 case Builtin::BI__builtin_exp10:
2823 case Builtin::BI__builtin_exp10f:
2824 case Builtin::BI__builtin_exp10f16:
2825 case Builtin::BI__builtin_exp10l:
2826 case Builtin::BI__builtin_exp10f128:
2827 case Builtin::BI__builtin_elementwise_exp10: {
2828 // TODO: strictfp support
2829 if (Builder.getIsFPConstrained())
2830 break;
2831 return RValue::get(
2832 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::exp10));
2833 }
2834 case Builtin::BIfabs:
2835 case Builtin::BIfabsf:
2836 case Builtin::BIfabsl:
2837 case Builtin::BI__builtin_fabs:
2838 case Builtin::BI__builtin_fabsf:
2839 case Builtin::BI__builtin_fabsf16:
2840 case Builtin::BI__builtin_fabsl:
2841 case Builtin::BI__builtin_fabsf128:
2842 return RValue::get(
2843 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::fabs));
2844
2845 case Builtin::BIfloor:
2846 case Builtin::BIfloorf:
2847 case Builtin::BIfloorl:
2848 case Builtin::BI__builtin_floor:
2849 case Builtin::BI__builtin_floorf:
2850 case Builtin::BI__builtin_floorf16:
2851 case Builtin::BI__builtin_floorl:
2852 case Builtin::BI__builtin_floorf128:
2853 case Builtin::BI__builtin_elementwise_floor:
2855 Intrinsic::floor,
2856 Intrinsic::experimental_constrained_floor));
2857
2858 case Builtin::BIfma:
2859 case Builtin::BIfmaf:
2860 case Builtin::BIfmal:
2861 case Builtin::BI__builtin_fma:
2862 case Builtin::BI__builtin_fmaf:
2863 case Builtin::BI__builtin_fmaf16:
2864 case Builtin::BI__builtin_fmal:
2865 case Builtin::BI__builtin_fmaf128:
2866 case Builtin::BI__builtin_elementwise_fma:
2868 Intrinsic::fma,
2869 Intrinsic::experimental_constrained_fma));
2870
2871 case Builtin::BIfmax:
2872 case Builtin::BIfmaxf:
2873 case Builtin::BIfmaxl:
2874 case Builtin::BI__builtin_fmax:
2875 case Builtin::BI__builtin_fmaxf:
2876 case Builtin::BI__builtin_fmaxf16:
2877 case Builtin::BI__builtin_fmaxl:
2878 case Builtin::BI__builtin_fmaxf128:
2880 Intrinsic::maxnum,
2881 Intrinsic::experimental_constrained_maxnum));
2882
2883 case Builtin::BIfmin:
2884 case Builtin::BIfminf:
2885 case Builtin::BIfminl:
2886 case Builtin::BI__builtin_fmin:
2887 case Builtin::BI__builtin_fminf:
2888 case Builtin::BI__builtin_fminf16:
2889 case Builtin::BI__builtin_fminl:
2890 case Builtin::BI__builtin_fminf128:
2892 Intrinsic::minnum,
2893 Intrinsic::experimental_constrained_minnum));
2894
2895 case Builtin::BIfmaximum_num:
2896 case Builtin::BIfmaximum_numf:
2897 case Builtin::BIfmaximum_numl:
2898 case Builtin::BI__builtin_fmaximum_num:
2899 case Builtin::BI__builtin_fmaximum_numf:
2900 case Builtin::BI__builtin_fmaximum_numf16:
2901 case Builtin::BI__builtin_fmaximum_numl:
2902 case Builtin::BI__builtin_fmaximum_numf128:
2903 return RValue::get(
2904 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::maximumnum));
2905
2906 case Builtin::BIfminimum_num:
2907 case Builtin::BIfminimum_numf:
2908 case Builtin::BIfminimum_numl:
2909 case Builtin::BI__builtin_fminimum_num:
2910 case Builtin::BI__builtin_fminimum_numf:
2911 case Builtin::BI__builtin_fminimum_numf16:
2912 case Builtin::BI__builtin_fminimum_numl:
2913 case Builtin::BI__builtin_fminimum_numf128:
2914 return RValue::get(
2915 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::minimumnum));
2916
2917 // fmod() is a special-case. It maps to the frem instruction rather than an
2918 // LLVM intrinsic.
2919 case Builtin::BIfmod:
2920 case Builtin::BIfmodf:
2921 case Builtin::BIfmodl:
2922 case Builtin::BI__builtin_fmod:
2923 case Builtin::BI__builtin_fmodf:
2924 case Builtin::BI__builtin_fmodf16:
2925 case Builtin::BI__builtin_fmodl:
2926 case Builtin::BI__builtin_fmodf128:
2927 case Builtin::BI__builtin_elementwise_fmod: {
2928 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2929 Value *Arg1 = EmitScalarExpr(E->getArg(0));
2930 Value *Arg2 = EmitScalarExpr(E->getArg(1));
2931 if (Builder.getIsFPConstrained()) {
2932 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_frem,
2933 Arg1->getType());
2934 return RValue::get(Builder.CreateConstrainedFPCall(F, {Arg1, Arg2}));
2935 } else {
2936 return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
2937 }
2938 }
2939
2940 case Builtin::BIlog:
2941 case Builtin::BIlogf:
2942 case Builtin::BIlogl:
2943 case Builtin::BI__builtin_log:
2944 case Builtin::BI__builtin_logf:
2945 case Builtin::BI__builtin_logf16:
2946 case Builtin::BI__builtin_logl:
2947 case Builtin::BI__builtin_logf128:
2948 case Builtin::BI__builtin_elementwise_log:
2950 Intrinsic::log,
2951 Intrinsic::experimental_constrained_log));
2952
2953 case Builtin::BIlog10:
2954 case Builtin::BIlog10f:
2955 case Builtin::BIlog10l:
2956 case Builtin::BI__builtin_log10:
2957 case Builtin::BI__builtin_log10f:
2958 case Builtin::BI__builtin_log10f16:
2959 case Builtin::BI__builtin_log10l:
2960 case Builtin::BI__builtin_log10f128:
2961 case Builtin::BI__builtin_elementwise_log10:
2963 Intrinsic::log10,
2964 Intrinsic::experimental_constrained_log10));
2965
2966 case Builtin::BIlog2:
2967 case Builtin::BIlog2f:
2968 case Builtin::BIlog2l:
2969 case Builtin::BI__builtin_log2:
2970 case Builtin::BI__builtin_log2f:
2971 case Builtin::BI__builtin_log2f16:
2972 case Builtin::BI__builtin_log2l:
2973 case Builtin::BI__builtin_log2f128:
2974 case Builtin::BI__builtin_elementwise_log2:
2976 Intrinsic::log2,
2977 Intrinsic::experimental_constrained_log2));
2978
2979 case Builtin::BInearbyint:
2980 case Builtin::BInearbyintf:
2981 case Builtin::BInearbyintl:
2982 case Builtin::BI__builtin_nearbyint:
2983 case Builtin::BI__builtin_nearbyintf:
2984 case Builtin::BI__builtin_nearbyintl:
2985 case Builtin::BI__builtin_nearbyintf128:
2986 case Builtin::BI__builtin_elementwise_nearbyint:
2988 Intrinsic::nearbyint,
2989 Intrinsic::experimental_constrained_nearbyint));
2990
2991 case Builtin::BIpow:
2992 case Builtin::BIpowf:
2993 case Builtin::BIpowl:
2994 case Builtin::BI__builtin_pow:
2995 case Builtin::BI__builtin_powf:
2996 case Builtin::BI__builtin_powf16:
2997 case Builtin::BI__builtin_powl:
2998 case Builtin::BI__builtin_powf128:
2999 case Builtin::BI__builtin_elementwise_pow:
3001 Intrinsic::pow,
3002 Intrinsic::experimental_constrained_pow));
3003
3004 case Builtin::BIrint:
3005 case Builtin::BIrintf:
3006 case Builtin::BIrintl:
3007 case Builtin::BI__builtin_rint:
3008 case Builtin::BI__builtin_rintf:
3009 case Builtin::BI__builtin_rintf16:
3010 case Builtin::BI__builtin_rintl:
3011 case Builtin::BI__builtin_rintf128:
3012 case Builtin::BI__builtin_elementwise_rint:
3014 Intrinsic::rint,
3015 Intrinsic::experimental_constrained_rint));
3016
3017 case Builtin::BIround:
3018 case Builtin::BIroundf:
3019 case Builtin::BIroundl:
3020 case Builtin::BI__builtin_round:
3021 case Builtin::BI__builtin_roundf:
3022 case Builtin::BI__builtin_roundf16:
3023 case Builtin::BI__builtin_roundl:
3024 case Builtin::BI__builtin_roundf128:
3025 case Builtin::BI__builtin_elementwise_round:
3027 Intrinsic::round,
3028 Intrinsic::experimental_constrained_round));
3029
3030 case Builtin::BIroundeven:
3031 case Builtin::BIroundevenf:
3032 case Builtin::BIroundevenl:
3033 case Builtin::BI__builtin_roundeven:
3034 case Builtin::BI__builtin_roundevenf:
3035 case Builtin::BI__builtin_roundevenf16:
3036 case Builtin::BI__builtin_roundevenl:
3037 case Builtin::BI__builtin_roundevenf128:
3038 case Builtin::BI__builtin_elementwise_roundeven:
3040 Intrinsic::roundeven,
3041 Intrinsic::experimental_constrained_roundeven));
3042
3043 case Builtin::BIsin:
3044 case Builtin::BIsinf:
3045 case Builtin::BIsinl:
3046 case Builtin::BI__builtin_sin:
3047 case Builtin::BI__builtin_sinf:
3048 case Builtin::BI__builtin_sinf16:
3049 case Builtin::BI__builtin_sinl:
3050 case Builtin::BI__builtin_sinf128:
3051 case Builtin::BI__builtin_elementwise_sin:
3053 Intrinsic::sin,
3054 Intrinsic::experimental_constrained_sin));
3055
3056 case Builtin::BIsinh:
3057 case Builtin::BIsinhf:
3058 case Builtin::BIsinhl:
3059 case Builtin::BI__builtin_sinh:
3060 case Builtin::BI__builtin_sinhf:
3061 case Builtin::BI__builtin_sinhf16:
3062 case Builtin::BI__builtin_sinhl:
3063 case Builtin::BI__builtin_sinhf128:
3064 case Builtin::BI__builtin_elementwise_sinh:
3066 *this, E, Intrinsic::sinh, Intrinsic::experimental_constrained_sinh));
3067
3068 case Builtin::BI__builtin_sincospi:
3069 case Builtin::BI__builtin_sincospif:
3070 case Builtin::BI__builtin_sincospil:
3071 if (Builder.getIsFPConstrained())
3072 break; // TODO: Emit constrained sincospi intrinsic once one exists.
3073 emitSincosBuiltin(*this, E, Intrinsic::sincospi);
3074 return RValue::get(nullptr);
3075
3076 case Builtin::BIsincos:
3077 case Builtin::BIsincosf:
3078 case Builtin::BIsincosl:
3079 case Builtin::BI__builtin_sincos:
3080 case Builtin::BI__builtin_sincosf:
3081 case Builtin::BI__builtin_sincosf16:
3082 case Builtin::BI__builtin_sincosl:
3083 case Builtin::BI__builtin_sincosf128:
3084 if (Builder.getIsFPConstrained())
3085 break; // TODO: Emit constrained sincos intrinsic once one exists.
3086 emitSincosBuiltin(*this, E, Intrinsic::sincos);
3087 return RValue::get(nullptr);
3088
3089 case Builtin::BIsqrt:
3090 case Builtin::BIsqrtf:
3091 case Builtin::BIsqrtl:
3092 case Builtin::BI__builtin_sqrt:
3093 case Builtin::BI__builtin_sqrtf:
3094 case Builtin::BI__builtin_sqrtf16:
3095 case Builtin::BI__builtin_sqrtl:
3096 case Builtin::BI__builtin_sqrtf128:
3097 case Builtin::BI__builtin_elementwise_sqrt: {
3099 *this, E, Intrinsic::sqrt, Intrinsic::experimental_constrained_sqrt);
3101 return RValue::get(Call);
3102 }
3103
3104 case Builtin::BItan:
3105 case Builtin::BItanf:
3106 case Builtin::BItanl:
3107 case Builtin::BI__builtin_tan:
3108 case Builtin::BI__builtin_tanf:
3109 case Builtin::BI__builtin_tanf16:
3110 case Builtin::BI__builtin_tanl:
3111 case Builtin::BI__builtin_tanf128:
3112 case Builtin::BI__builtin_elementwise_tan:
3114 *this, E, Intrinsic::tan, Intrinsic::experimental_constrained_tan));
3115
3116 case Builtin::BItanh:
3117 case Builtin::BItanhf:
3118 case Builtin::BItanhl:
3119 case Builtin::BI__builtin_tanh:
3120 case Builtin::BI__builtin_tanhf:
3121 case Builtin::BI__builtin_tanhf16:
3122 case Builtin::BI__builtin_tanhl:
3123 case Builtin::BI__builtin_tanhf128:
3124 case Builtin::BI__builtin_elementwise_tanh:
3126 *this, E, Intrinsic::tanh, Intrinsic::experimental_constrained_tanh));
3127
3128 case Builtin::BItrunc:
3129 case Builtin::BItruncf:
3130 case Builtin::BItruncl:
3131 case Builtin::BI__builtin_trunc:
3132 case Builtin::BI__builtin_truncf:
3133 case Builtin::BI__builtin_truncf16:
3134 case Builtin::BI__builtin_truncl:
3135 case Builtin::BI__builtin_truncf128:
3136 case Builtin::BI__builtin_elementwise_trunc:
3138 Intrinsic::trunc,
3139 Intrinsic::experimental_constrained_trunc));
3140
3141 case Builtin::BIlround:
3142 case Builtin::BIlroundf:
3143 case Builtin::BIlroundl:
3144 case Builtin::BI__builtin_lround:
3145 case Builtin::BI__builtin_lroundf:
3146 case Builtin::BI__builtin_lroundl:
3147 case Builtin::BI__builtin_lroundf128:
3149 *this, E, Intrinsic::lround,
3150 Intrinsic::experimental_constrained_lround));
3151
3152 case Builtin::BIllround:
3153 case Builtin::BIllroundf:
3154 case Builtin::BIllroundl:
3155 case Builtin::BI__builtin_llround:
3156 case Builtin::BI__builtin_llroundf:
3157 case Builtin::BI__builtin_llroundl:
3158 case Builtin::BI__builtin_llroundf128:
3160 *this, E, Intrinsic::llround,
3161 Intrinsic::experimental_constrained_llround));
3162
3163 case Builtin::BIlrint:
3164 case Builtin::BIlrintf:
3165 case Builtin::BIlrintl:
3166 case Builtin::BI__builtin_lrint:
3167 case Builtin::BI__builtin_lrintf:
3168 case Builtin::BI__builtin_lrintl:
3169 case Builtin::BI__builtin_lrintf128:
3171 *this, E, Intrinsic::lrint,
3172 Intrinsic::experimental_constrained_lrint));
3173
3174 case Builtin::BIllrint:
3175 case Builtin::BIllrintf:
3176 case Builtin::BIllrintl:
3177 case Builtin::BI__builtin_llrint:
3178 case Builtin::BI__builtin_llrintf:
3179 case Builtin::BI__builtin_llrintl:
3180 case Builtin::BI__builtin_llrintf128:
3182 *this, E, Intrinsic::llrint,
3183 Intrinsic::experimental_constrained_llrint));
3184 case Builtin::BI__builtin_ldexp:
3185 case Builtin::BI__builtin_ldexpf:
3186 case Builtin::BI__builtin_ldexpl:
3187 case Builtin::BI__builtin_ldexpf16:
3188 case Builtin::BI__builtin_ldexpf128:
3189 case Builtin::BI__builtin_elementwise_ldexp:
3191 *this, E, Intrinsic::ldexp,
3192 Intrinsic::experimental_constrained_ldexp));
3193 default:
3194 break;
3195 }
3196 }
3197
3198 // Check NonnullAttribute/NullabilityArg and Alignment.
3199 auto EmitArgCheck = [&](TypeCheckKind Kind, Address A, const Expr *Arg,
3200 unsigned ParmNum) {
3201 Value *Val = A.emitRawPointer(*this);
3202 EmitNonNullArgCheck(RValue::get(Val), Arg->getType(), Arg->getExprLoc(), FD,
3203 ParmNum);
3204
3205 if (SanOpts.has(SanitizerKind::Alignment)) {
3206 SanitizerSet SkippedChecks;
3207 SkippedChecks.set(SanitizerKind::All);
3208 SkippedChecks.clear(SanitizerKind::Alignment);
3209 SourceLocation Loc = Arg->getExprLoc();
3210 // Strip an implicit cast.
3211 if (auto *CE = dyn_cast<ImplicitCastExpr>(Arg))
3212 if (CE->getCastKind() == CK_BitCast)
3213 Arg = CE->getSubExpr();
3214 EmitTypeCheck(Kind, Loc, Val, Arg->getType(), A.getAlignment(),
3215 SkippedChecks);
3216 }
3217 };
3218
3219 switch (BuiltinIDIfNoAsmLabel) {
3220 default: break;
3221 case Builtin::BI__builtin___CFStringMakeConstantString:
3222 case Builtin::BI__builtin___NSStringMakeConstantString:
3223 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
3224 case Builtin::BI__builtin_stdarg_start:
3225 case Builtin::BI__builtin_va_start:
3226 case Builtin::BI__va_start:
3227 case Builtin::BI__builtin_c23_va_start:
3228 case Builtin::BI__builtin_va_end:
3229 EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
3230 ? EmitScalarExpr(E->getArg(0))
3231 : EmitVAListRef(E->getArg(0)).emitRawPointer(*this),
3232 BuiltinID != Builtin::BI__builtin_va_end);
3233 return RValue::get(nullptr);
3234 case Builtin::BI__builtin_va_copy: {
3235 Value *DstPtr = EmitVAListRef(E->getArg(0)).emitRawPointer(*this);
3236 Value *SrcPtr = EmitVAListRef(E->getArg(1)).emitRawPointer(*this);
3237 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy, {DstPtr->getType()}),
3238 {DstPtr, SrcPtr});
3239 return RValue::get(nullptr);
3240 }
3241 case Builtin::BIabs:
3242 case Builtin::BIlabs:
3243 case Builtin::BIllabs:
3244 case Builtin::BI__builtin_abs:
3245 case Builtin::BI__builtin_labs:
3246 case Builtin::BI__builtin_llabs: {
3247 bool SanitizeOverflow = SanOpts.has(SanitizerKind::SignedIntegerOverflow);
3248
3249 Value *Result;
3250 switch (getLangOpts().getSignedOverflowBehavior()) {
3252 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), false);
3253 break;
3255 if (!SanitizeOverflow) {
3256 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), true);
3257 break;
3258 }
3259 [[fallthrough]];
3261 // TODO: Somehow handle the corner case when the address of abs is taken.
3262 Result = EmitOverflowCheckedAbs(*this, E, SanitizeOverflow);
3263 break;
3264 }
3265 return RValue::get(Result);
3266 }
3267 case Builtin::BI__builtin_complex: {
3268 Value *Real = EmitScalarExpr(E->getArg(0));
3269 Value *Imag = EmitScalarExpr(E->getArg(1));
3270 return RValue::getComplex({Real, Imag});
3271 }
3272 case Builtin::BI__builtin_conj:
3273 case Builtin::BI__builtin_conjf:
3274 case Builtin::BI__builtin_conjl:
3275 case Builtin::BIconj:
3276 case Builtin::BIconjf:
3277 case Builtin::BIconjl: {
3278 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3279 Value *Real = ComplexVal.first;
3280 Value *Imag = ComplexVal.second;
3281 Imag = Builder.CreateFNeg(Imag, "neg");
3282 return RValue::getComplex(std::make_pair(Real, Imag));
3283 }
3284 case Builtin::BI__builtin_creal:
3285 case Builtin::BI__builtin_crealf:
3286 case Builtin::BI__builtin_creall:
3287 case Builtin::BIcreal:
3288 case Builtin::BIcrealf:
3289 case Builtin::BIcreall: {
3290 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3291 return RValue::get(ComplexVal.first);
3292 }
3293
3294 case Builtin::BI__builtin_preserve_access_index: {
3295 // Only enabled preserved access index region when debuginfo
3296 // is available as debuginfo is needed to preserve user-level
3297 // access pattern.
3298 if (!getDebugInfo()) {
3299 CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
3300 return RValue::get(EmitScalarExpr(E->getArg(0)));
3301 }
3302
3303 // Nested builtin_preserve_access_index() not supported
3305 CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
3306 return RValue::get(EmitScalarExpr(E->getArg(0)));
3307 }
3308
3309 IsInPreservedAIRegion = true;
3310 Value *Res = EmitScalarExpr(E->getArg(0));
3311 IsInPreservedAIRegion = false;
3312 return RValue::get(Res);
3313 }
3314
3315 case Builtin::BI__builtin_cimag:
3316 case Builtin::BI__builtin_cimagf:
3317 case Builtin::BI__builtin_cimagl:
3318 case Builtin::BIcimag:
3319 case Builtin::BIcimagf:
3320 case Builtin::BIcimagl: {
3321 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3322 return RValue::get(ComplexVal.second);
3323 }
3324
3325 case Builtin::BI__builtin_clrsb:
3326 case Builtin::BI__builtin_clrsbl:
3327 case Builtin::BI__builtin_clrsbll: {
3328 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
3329 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3330
3331 llvm::Type *ArgType = ArgValue->getType();
3332 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3333
3334 llvm::Type *ResultType = ConvertType(E->getType());
3335 Value *Zero = llvm::Constant::getNullValue(ArgType);
3336 Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
3337 Value *Inverse = Builder.CreateNot(ArgValue, "not");
3338 Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
3339 Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
3340 Value *Result =
3341 Builder.CreateNUWSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
3342 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3343 "cast");
3344 return RValue::get(Result);
3345 }
3346 case Builtin::BI__builtin_ctzs:
3347 case Builtin::BI__builtin_ctz:
3348 case Builtin::BI__builtin_ctzl:
3349 case Builtin::BI__builtin_ctzll:
3350 case Builtin::BI__builtin_ctzg:
3351 case Builtin::BI__builtin_elementwise_ctzg: {
3352 bool HasFallback =
3353 (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_ctzg ||
3354 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_ctzg) &&
3355 E->getNumArgs() > 1;
3356
3357 Value *ArgValue =
3358 HasFallback ? EmitBitCountExpr(*this, E->getArg(0))
3360
3361 llvm::Type *ArgType = ArgValue->getType();
3362 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
3363
3364 llvm::Type *ResultType = ConvertType(E->getType());
3365 // The elementwise builtins always exhibit zero-is-undef behaviour
3366 Value *ZeroUndef = Builder.getInt1(
3367 HasFallback || getTarget().isCLZForZeroUndef() ||
3368 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_ctzg);
3369 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
3370 if (Result->getType() != ResultType)
3371 Result =
3372 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3373 if (!HasFallback)
3374 return RValue::get(Result);
3375
3376 Value *Zero = Constant::getNullValue(ArgType);
3377 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3378 Value *FallbackValue = EmitScalarExpr(E->getArg(1));
3379 Value *ResultOrFallback =
3380 Builder.CreateSelect(IsZero, FallbackValue, Result, "ctzg");
3381 return RValue::get(ResultOrFallback);
3382 }
3383 case Builtin::BI__builtin_clzs:
3384 case Builtin::BI__builtin_clz:
3385 case Builtin::BI__builtin_clzl:
3386 case Builtin::BI__builtin_clzll:
3387 case Builtin::BI__builtin_clzg:
3388 case Builtin::BI__builtin_elementwise_clzg: {
3389 bool HasFallback =
3390 (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_clzg ||
3391 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_clzg) &&
3392 E->getNumArgs() > 1;
3393
3394 Value *ArgValue =
3395 HasFallback ? EmitBitCountExpr(*this, E->getArg(0))
3397
3398 llvm::Type *ArgType = ArgValue->getType();
3399 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3400
3401 llvm::Type *ResultType = ConvertType(E->getType());
3402 // The elementwise builtins always exhibit zero-is-undef behaviour
3403 Value *ZeroUndef = Builder.getInt1(
3404 HasFallback || getTarget().isCLZForZeroUndef() ||
3405 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_clzg);
3406 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
3407 if (Result->getType() != ResultType)
3408 Result =
3409 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3410 if (!HasFallback)
3411 return RValue::get(Result);
3412
3413 Value *Zero = Constant::getNullValue(ArgType);
3414 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3415 Value *FallbackValue = EmitScalarExpr(E->getArg(1));
3416 Value *ResultOrFallback =
3417 Builder.CreateSelect(IsZero, FallbackValue, Result, "clzg");
3418 return RValue::get(ResultOrFallback);
3419 }
3420 case Builtin::BI__builtin_ffs:
3421 case Builtin::BI__builtin_ffsl:
3422 case Builtin::BI__builtin_ffsll: {
3423 // ffs(x) -> x ? cttz(x) + 1 : 0
3424 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3425
3426 llvm::Type *ArgType = ArgValue->getType();
3427 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
3428
3429 llvm::Type *ResultType = ConvertType(E->getType());
3430 Value *Tmp =
3431 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
3432 llvm::ConstantInt::get(ArgType, 1));
3433 Value *Zero = llvm::Constant::getNullValue(ArgType);
3434 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3435 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
3436 if (Result->getType() != ResultType)
3437 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3438 "cast");
3439 return RValue::get(Result);
3440 }
3441 case Builtin::BI__builtin_parity:
3442 case Builtin::BI__builtin_parityl:
3443 case Builtin::BI__builtin_parityll: {
3444 // parity(x) -> ctpop(x) & 1
3445 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3446
3447 llvm::Type *ArgType = ArgValue->getType();
3448 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3449
3450 llvm::Type *ResultType = ConvertType(E->getType());
3451 Value *Tmp = Builder.CreateCall(F, ArgValue);
3452 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
3453 if (Result->getType() != ResultType)
3454 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3455 "cast");
3456 return RValue::get(Result);
3457 }
3458 case Builtin::BI__lzcnt16:
3459 case Builtin::BI__lzcnt:
3460 case Builtin::BI__lzcnt64: {
3461 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3462
3463 llvm::Type *ArgType = ArgValue->getType();
3464 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3465
3466 llvm::Type *ResultType = ConvertType(E->getType());
3467 Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
3468 if (Result->getType() != ResultType)
3469 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3470 "cast");
3471 return RValue::get(Result);
3472 }
3473 case Builtin::BI__popcnt16:
3474 case Builtin::BI__popcnt:
3475 case Builtin::BI__popcnt64:
3476 case Builtin::BI__builtin_popcount:
3477 case Builtin::BI__builtin_popcountl:
3478 case Builtin::BI__builtin_popcountll:
3479 case Builtin::BI__builtin_popcountg: {
3480 Value *ArgValue = EmitBitCountExpr(*this, E->getArg(0));
3481
3482 llvm::Type *ArgType = ArgValue->getType();
3483 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3484
3485 llvm::Type *ResultType = ConvertType(E->getType());
3486 Value *Result = Builder.CreateCall(F, ArgValue);
3487 if (Result->getType() != ResultType)
3488 Result =
3489 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3490 return RValue::get(Result);
3491 }
3492 case Builtin::BI__builtin_unpredictable: {
3493 // Always return the argument of __builtin_unpredictable. LLVM does not
3494 // handle this builtin. Metadata for this builtin should be added directly
3495 // to instructions such as branches or switches that use it.
3496 return RValue::get(EmitScalarExpr(E->getArg(0)));
3497 }
3498 case Builtin::BI__builtin_expect: {
3499 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3500 llvm::Type *ArgType = ArgValue->getType();
3501
3502 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
3503 // Don't generate llvm.expect on -O0 as the backend won't use it for
3504 // anything.
3505 // Note, we still IRGen ExpectedValue because it could have side-effects.
3506 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3507 return RValue::get(ArgValue);
3508
3509 Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
3510 Value *Result =
3511 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
3512 return RValue::get(Result);
3513 }
3514 case Builtin::BI__builtin_expect_with_probability: {
3515 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3516 llvm::Type *ArgType = ArgValue->getType();
3517
3518 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
3519 llvm::APFloat Probability(0.0);
3520 const Expr *ProbArg = E->getArg(2);
3521 bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
3522 assert(EvalSucceed && "probability should be able to evaluate as float");
3523 (void)EvalSucceed;
3524 bool LoseInfo = false;
3525 Probability.convert(llvm::APFloat::IEEEdouble(),
3526 llvm::RoundingMode::Dynamic, &LoseInfo);
3527 llvm::Type *Ty = ConvertType(ProbArg->getType());
3528 Constant *Confidence = ConstantFP::get(Ty, Probability);
3529 // Don't generate llvm.expect.with.probability on -O0 as the backend
3530 // won't use it for anything.
3531 // Note, we still IRGen ExpectedValue because it could have side-effects.
3532 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3533 return RValue::get(ArgValue);
3534
3535 Function *FnExpect =
3536 CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
3537 Value *Result = Builder.CreateCall(
3538 FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
3539 return RValue::get(Result);
3540 }
3541 case Builtin::BI__builtin_assume_aligned: {
3542 const Expr *Ptr = E->getArg(0);
3543 Value *PtrValue = EmitScalarExpr(Ptr);
3544 Value *OffsetValue =
3545 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
3546
3547 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
3548 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
3549 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
3550 AlignmentCI = ConstantInt::get(AlignmentCI->getIntegerType(),
3551 llvm::Value::MaximumAlignment);
3552
3553 emitAlignmentAssumption(PtrValue, Ptr,
3554 /*The expr loc is sufficient.*/ SourceLocation(),
3555 AlignmentCI, OffsetValue);
3556 return RValue::get(PtrValue);
3557 }
3558 case Builtin::BI__builtin_assume_dereferenceable: {
3559 const Expr *Ptr = E->getArg(0);
3560 const Expr *Size = E->getArg(1);
3561 Value *PtrValue = EmitScalarExpr(Ptr);
3562 Value *SizeValue = EmitScalarExpr(Size);
3563 if (SizeValue->getType() != IntPtrTy)
3564 SizeValue =
3565 Builder.CreateIntCast(SizeValue, IntPtrTy, false, "casted.size");
3566 Builder.CreateDereferenceableAssumption(PtrValue, SizeValue);
3567 return RValue::get(nullptr);
3568 }
3569 case Builtin::BI__assume:
3570 case Builtin::BI__builtin_assume: {
3571 if (E->getArg(0)->HasSideEffects(getContext()))
3572 return RValue::get(nullptr);
3573
3574 Value *ArgValue = EmitCheckedArgForAssume(E->getArg(0));
3575 Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
3576 Builder.CreateCall(FnAssume, ArgValue);
3577 return RValue::get(nullptr);
3578 }
3579 case Builtin::BI__builtin_assume_separate_storage: {
3580 const Expr *Arg0 = E->getArg(0);
3581 const Expr *Arg1 = E->getArg(1);
3582
3583 Value *Value0 = EmitScalarExpr(Arg0);
3584 Value *Value1 = EmitScalarExpr(Arg1);
3585
3586 Value *Values[] = {Value0, Value1};
3587 OperandBundleDefT<Value *> OBD("separate_storage", Values);
3588 Builder.CreateAssumption(ConstantInt::getTrue(getLLVMContext()), {OBD});
3589 return RValue::get(nullptr);
3590 }
3591 case Builtin::BI__builtin_allow_runtime_check: {
3592 StringRef Kind =
3593 cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts())->getString();
3594 LLVMContext &Ctx = CGM.getLLVMContext();
3595 llvm::Value *Allow = Builder.CreateCall(
3596 CGM.getIntrinsic(Intrinsic::allow_runtime_check),
3597 llvm::MetadataAsValue::get(Ctx, llvm::MDString::get(Ctx, Kind)));
3598 return RValue::get(Allow);
3599 }
3600 case Builtin::BI__builtin_allow_sanitize_check: {
3601 Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
3602 StringRef Name =
3603 cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts())->getString();
3604
3605 // We deliberately allow the use of kernel- and non-kernel names
3606 // interchangably, even when one or the other is enabled. This is consistent
3607 // with the no_sanitize-attribute, which allows either kernel- or non-kernel
3608 // name to disable instrumentation (see CodeGenFunction::StartFunction).
3609 if (getLangOpts().Sanitize.hasOneOf(SanitizerKind::Address |
3610 SanitizerKind::KernelAddress) &&
3611 (Name == "address" || Name == "kernel-address")) {
3612 IntrID = Intrinsic::allow_sanitize_address;
3613 } else if (getLangOpts().Sanitize.has(SanitizerKind::Thread) &&
3614 Name == "thread") {
3615 IntrID = Intrinsic::allow_sanitize_thread;
3616 } else if (getLangOpts().Sanitize.hasOneOf(SanitizerKind::Memory |
3617 SanitizerKind::KernelMemory) &&
3618 (Name == "memory" || Name == "kernel-memory")) {
3619 IntrID = Intrinsic::allow_sanitize_memory;
3620 } else if (getLangOpts().Sanitize.hasOneOf(
3621 SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress) &&
3622 (Name == "hwaddress" || Name == "kernel-hwaddress")) {
3623 IntrID = Intrinsic::allow_sanitize_hwaddress;
3624 }
3625
3626 if (IntrID != Intrinsic::not_intrinsic) {
3627 llvm::Value *Allow = Builder.CreateCall(CGM.getIntrinsic(IntrID));
3628 return RValue::get(Allow);
3629 }
3630 // If the checked sanitizer is not enabled, we can safely lower to false
3631 // right away. This is also more efficient, since the LowerAllowCheckPass
3632 // must not always be enabled if none of the above sanitizers are enabled.
3633 return RValue::get(Builder.getFalse());
3634 }
3635 case Builtin::BI__arithmetic_fence: {
3636 // Create the builtin call if FastMath is selected, and the target
3637 // supports the builtin, otherwise just return the argument.
3638 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3639 llvm::FastMathFlags FMF = Builder.getFastMathFlags();
3640 bool isArithmeticFenceEnabled =
3641 FMF.allowReassoc() &&
3643 QualType ArgType = E->getArg(0)->getType();
3644 if (ArgType->isComplexType()) {
3645 if (isArithmeticFenceEnabled) {
3646 QualType ElementType = ArgType->castAs<ComplexType>()->getElementType();
3647 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3648 Value *Real = Builder.CreateArithmeticFence(ComplexVal.first,
3649 ConvertType(ElementType));
3650 Value *Imag = Builder.CreateArithmeticFence(ComplexVal.second,
3651 ConvertType(ElementType));
3652 return RValue::getComplex(std::make_pair(Real, Imag));
3653 }
3654 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3655 Value *Real = ComplexVal.first;
3656 Value *Imag = ComplexVal.second;
3657 return RValue::getComplex(std::make_pair(Real, Imag));
3658 }
3659 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3660 if (isArithmeticFenceEnabled)
3661 return RValue::get(
3662 Builder.CreateArithmeticFence(ArgValue, ConvertType(ArgType)));
3663 return RValue::get(ArgValue);
3664 }
3665 case Builtin::BI__builtin_bswapg: {
3666 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3667 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(ArgValue->getType());
3668 assert(IntTy && "LLVM's __builtin_bswapg only supports integer variants");
3669 if (IntTy->getBitWidth() == 1 || IntTy->getBitWidth() == 8)
3670 return RValue::get(ArgValue);
3671 assert(((IntTy->getBitWidth() % 16 == 0 && IntTy->getBitWidth() != 0)) &&
3672 "LLVM's __builtin_bswapg only supports integer variants that has a "
3673 "multiple of 16 bits as well as a single byte");
3674 return RValue::get(
3675 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bswap));
3676 }
3677 case Builtin::BI__builtin_bswap16:
3678 case Builtin::BI__builtin_bswap32:
3679 case Builtin::BI__builtin_bswap64:
3680 case Builtin::BI_byteswap_ushort:
3681 case Builtin::BI_byteswap_ulong:
3682 case Builtin::BI_byteswap_uint64: {
3683 return RValue::get(
3684 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bswap));
3685 }
3686 case Builtin::BI__builtin_bitreverse8:
3687 case Builtin::BI__builtin_bitreverse16:
3688 case Builtin::BI__builtin_bitreverse32:
3689 case Builtin::BI__builtin_bitreverse64: {
3690 return RValue::get(
3691 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bitreverse));
3692 }
3693 case Builtin::BI__builtin_rotateleft8:
3694 case Builtin::BI__builtin_rotateleft16:
3695 case Builtin::BI__builtin_rotateleft32:
3696 case Builtin::BI__builtin_rotateleft64:
3697 case Builtin::BI__builtin_stdc_rotate_left:
3698 case Builtin::BI_rotl8: // Microsoft variants of rotate left
3699 case Builtin::BI_rotl16:
3700 case Builtin::BI_rotl:
3701 case Builtin::BI_lrotl:
3702 case Builtin::BI_rotl64:
3703 return emitRotate(E, false);
3704
3705 case Builtin::BI__builtin_rotateright8:
3706 case Builtin::BI__builtin_rotateright16:
3707 case Builtin::BI__builtin_rotateright32:
3708 case Builtin::BI__builtin_rotateright64:
3709 case Builtin::BI__builtin_stdc_rotate_right:
3710 case Builtin::BI_rotr8: // Microsoft variants of rotate right
3711 case Builtin::BI_rotr16:
3712 case Builtin::BI_rotr:
3713 case Builtin::BI_lrotr:
3714 case Builtin::BI_rotr64:
3715 return emitRotate(E, true);
3716
3717 case Builtin::BI__builtin_constant_p: {
3718 llvm::Type *ResultType = ConvertType(E->getType());
3719
3720 const Expr *Arg = E->getArg(0);
3721 QualType ArgType = Arg->getType();
3722 // FIXME: The allowance for Obj-C pointers and block pointers is historical
3723 // and likely a mistake.
3724 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
3725 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
3726 // Per the GCC documentation, only numeric constants are recognized after
3727 // inlining.
3728 return RValue::get(ConstantInt::get(ResultType, 0));
3729
3730 if (Arg->HasSideEffects(getContext()))
3731 // The argument is unevaluated, so be conservative if it might have
3732 // side-effects.
3733 return RValue::get(ConstantInt::get(ResultType, 0));
3734
3735 Value *ArgValue = EmitScalarExpr(Arg);
3736 if (ArgType->isObjCObjectPointerType()) {
3737 // Convert Objective-C objects to id because we cannot distinguish between
3738 // LLVM types for Obj-C classes as they are opaque.
3739 ArgType = CGM.getContext().getObjCIdType();
3740 ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
3741 }
3742 Function *F =
3743 CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
3744 Value *Result = Builder.CreateCall(F, ArgValue);
3745 if (Result->getType() != ResultType)
3746 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
3747 return RValue::get(Result);
3748 }
3749 case Builtin::BI__builtin_dynamic_object_size:
3750 case Builtin::BI__builtin_object_size: {
3751 unsigned Type =
3752 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
3753 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
3754
3755 // We pass this builtin onto the optimizer so that it can figure out the
3756 // object size in more complex cases.
3757 bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
3758 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
3759 /*EmittedE=*/nullptr, IsDynamic));
3760 }
3761 case Builtin::BI__builtin_counted_by_ref: {
3762 // Default to returning '(void *) 0'.
3763 llvm::Value *Result = llvm::ConstantPointerNull::get(
3764 llvm::PointerType::getUnqual(getLLVMContext()));
3765
3766 const Expr *Arg = E->getArg(0)->IgnoreParenImpCasts();
3767
3768 if (auto *UO = dyn_cast<UnaryOperator>(Arg);
3769 UO && UO->getOpcode() == UO_AddrOf) {
3770 Arg = UO->getSubExpr()->IgnoreParenImpCasts();
3771
3772 if (auto *ASE = dyn_cast<ArraySubscriptExpr>(Arg))
3773 Arg = ASE->getBase()->IgnoreParenImpCasts();
3774 }
3775
3776 if (const MemberExpr *ME = dyn_cast_if_present<MemberExpr>(Arg)) {
3777 if (auto *CATy =
3779 CATy && CATy->getKind() == CountAttributedType::CountedBy) {
3780 const auto *MemberDecl = cast<FieldDecl>(ME->getMemberDecl());
3781 if (const FieldDecl *CountFD = MemberDecl->findCountedByField())
3782 Result = GetCountedByFieldExprGEP(Arg, MemberDecl, CountFD);
3783 else
3784 llvm::report_fatal_error("Cannot find the counted_by 'count' field");
3785 }
3786 }
3787
3788 return RValue::get(Result);
3789 }
3790 case Builtin::BI__builtin_prefetch: {
3791 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
3792 // FIXME: Technically these constants should of type 'int', yes?
3793 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
3794 llvm::ConstantInt::get(Int32Ty, 0);
3795 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
3796 llvm::ConstantInt::get(Int32Ty, 3);
3797 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
3798 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
3799 Builder.CreateCall(F, {Address, RW, Locality, Data});
3800 return RValue::get(nullptr);
3801 }
3802 case Builtin::BI__builtin_readcyclecounter: {
3803 Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
3804 return RValue::get(Builder.CreateCall(F));
3805 }
3806 case Builtin::BI__builtin_readsteadycounter: {
3807 Function *F = CGM.getIntrinsic(Intrinsic::readsteadycounter);
3808 return RValue::get(Builder.CreateCall(F));
3809 }
3810 case Builtin::BI__builtin___clear_cache: {
3811 Value *Begin = EmitScalarExpr(E->getArg(0));
3812 Value *End = EmitScalarExpr(E->getArg(1));
3813 Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
3814 return RValue::get(Builder.CreateCall(F, {Begin, End}));
3815 }
3816 case Builtin::BI__builtin_trap:
3817 EmitTrapCall(Intrinsic::trap);
3818 return RValue::get(nullptr);
3819 case Builtin::BI__builtin_verbose_trap: {
3820 llvm::DILocation *TrapLocation = Builder.getCurrentDebugLocation();
3821 if (getDebugInfo()) {
3822 TrapLocation = getDebugInfo()->CreateTrapFailureMessageFor(
3823 TrapLocation, *E->getArg(0)->tryEvaluateString(getContext()),
3825 }
3826 ApplyDebugLocation ApplyTrapDI(*this, TrapLocation);
3827 // Currently no attempt is made to prevent traps from being merged.
3828 EmitTrapCall(Intrinsic::trap);
3829 return RValue::get(nullptr);
3830 }
3831 case Builtin::BI__debugbreak:
3832 EmitTrapCall(Intrinsic::debugtrap);
3833 return RValue::get(nullptr);
3834 case Builtin::BI__builtin_unreachable: {
3836
3837 // We do need to preserve an insertion point.
3838 EmitBlock(createBasicBlock("unreachable.cont"));
3839
3840 return RValue::get(nullptr);
3841 }
3842
3843 case Builtin::BI__builtin_powi:
3844 case Builtin::BI__builtin_powif:
3845 case Builtin::BI__builtin_powil: {
3846 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
3847 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
3848
3849 if (Builder.getIsFPConstrained()) {
3850 // FIXME: llvm.powi has 2 mangling types,
3851 // llvm.experimental.constrained.powi has one.
3852 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3853 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi,
3854 Src0->getType());
3855 return RValue::get(Builder.CreateConstrainedFPCall(F, { Src0, Src1 }));
3856 }
3857
3858 Function *F = CGM.getIntrinsic(Intrinsic::powi,
3859 { Src0->getType(), Src1->getType() });
3860 return RValue::get(Builder.CreateCall(F, { Src0, Src1 }));
3861 }
3862 case Builtin::BI__builtin_frexpl: {
3863 // Linux PPC will not be adding additional PPCDoubleDouble support.
3864 // WIP to switch default to IEEE long double. Will emit libcall for
3865 // frexpl instead of legalizing this type in the BE.
3866 if (&getTarget().getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble())
3867 break;
3868 [[fallthrough]];
3869 }
3870 case Builtin::BI__builtin_frexp:
3871 case Builtin::BI__builtin_frexpf:
3872 case Builtin::BI__builtin_frexpf128:
3873 case Builtin::BI__builtin_frexpf16:
3874 return RValue::get(emitFrexpBuiltin(*this, E, Intrinsic::frexp));
3875 case Builtin::BImodf:
3876 case Builtin::BImodff:
3877 case Builtin::BImodfl:
3878 case Builtin::BI__builtin_modf:
3879 case Builtin::BI__builtin_modff:
3880 case Builtin::BI__builtin_modfl:
3881 if (Builder.getIsFPConstrained())
3882 break; // TODO: Emit constrained modf intrinsic once one exists.
3883 return RValue::get(emitModfBuiltin(*this, E, Intrinsic::modf));
3884 case Builtin::BI__builtin_isgreater:
3885 case Builtin::BI__builtin_isgreaterequal:
3886 case Builtin::BI__builtin_isless:
3887 case Builtin::BI__builtin_islessequal:
3888 case Builtin::BI__builtin_islessgreater:
3889 case Builtin::BI__builtin_isunordered: {
3890 // Ordered comparisons: we know the arguments to these are matching scalar
3891 // floating point values.
3892 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3893 Value *LHS = EmitScalarExpr(E->getArg(0));
3894 Value *RHS = EmitScalarExpr(E->getArg(1));
3895
3896 switch (BuiltinID) {
3897 default: llvm_unreachable("Unknown ordered comparison");
3898 case Builtin::BI__builtin_isgreater:
3899 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
3900 break;
3901 case Builtin::BI__builtin_isgreaterequal:
3902 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
3903 break;
3904 case Builtin::BI__builtin_isless:
3905 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
3906 break;
3907 case Builtin::BI__builtin_islessequal:
3908 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
3909 break;
3910 case Builtin::BI__builtin_islessgreater:
3911 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
3912 break;
3913 case Builtin::BI__builtin_isunordered:
3914 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
3915 break;
3916 }
3917 // ZExt bool to int type.
3918 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
3919 }
3920
3921 case Builtin::BI__builtin_isnan: {
3922 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3923 Value *V = EmitScalarExpr(E->getArg(0));
3924 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3925 return RValue::get(Result);
3926 return RValue::get(
3927 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNan),
3928 ConvertType(E->getType())));
3929 }
3930
3931 case Builtin::BI__builtin_issignaling: {
3932 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3933 Value *V = EmitScalarExpr(E->getArg(0));
3934 return RValue::get(
3935 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSNan),
3936 ConvertType(E->getType())));
3937 }
3938
3939 case Builtin::BI__builtin_isinf: {
3940 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3941 Value *V = EmitScalarExpr(E->getArg(0));
3942 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3943 return RValue::get(Result);
3944 return RValue::get(
3945 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcInf),
3946 ConvertType(E->getType())));
3947 }
3948
3949 case Builtin::BIfinite:
3950 case Builtin::BI__finite:
3951 case Builtin::BIfinitef:
3952 case Builtin::BI__finitef:
3953 case Builtin::BIfinitel:
3954 case Builtin::BI__finitel:
3955 case Builtin::BI__builtin_isfinite: {
3956 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3957 Value *V = EmitScalarExpr(E->getArg(0));
3958 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3959 return RValue::get(Result);
3960 return RValue::get(
3961 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcFinite),
3962 ConvertType(E->getType())));
3963 }
3964
3965 case Builtin::BI__builtin_isnormal: {
3966 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3967 Value *V = EmitScalarExpr(E->getArg(0));
3968 return RValue::get(
3969 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNormal),
3970 ConvertType(E->getType())));
3971 }
3972
3973 case Builtin::BI__builtin_issubnormal: {
3974 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3975 Value *V = EmitScalarExpr(E->getArg(0));
3976 return RValue::get(
3977 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSubnormal),
3978 ConvertType(E->getType())));
3979 }
3980
3981 case Builtin::BI__builtin_iszero: {
3982 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3983 Value *V = EmitScalarExpr(E->getArg(0));
3984 return RValue::get(
3985 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcZero),
3986 ConvertType(E->getType())));
3987 }
3988
3989 case Builtin::BI__builtin_isfpclass: {
3991 if (!E->getArg(1)->EvaluateAsInt(Result, CGM.getContext()))
3992 break;
3993 uint64_t Test = Result.Val.getInt().getLimitedValue();
3994 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3995 Value *V = EmitScalarExpr(E->getArg(0));
3996 return RValue::get(Builder.CreateZExt(Builder.createIsFPClass(V, Test),
3997 ConvertType(E->getType())));
3998 }
3999
4000 case Builtin::BI__builtin_nondeterministic_value: {
4001 llvm::Type *Ty = ConvertType(E->getArg(0)->getType());
4002
4003 Value *Result = PoisonValue::get(Ty);
4004 Result = Builder.CreateFreeze(Result);
4005
4006 return RValue::get(Result);
4007 }
4008
4009 case Builtin::BI__builtin_elementwise_abs: {
4010 Value *Result;
4011 QualType QT = E->getArg(0)->getType();
4012
4013 if (auto *VecTy = QT->getAs<VectorType>())
4014 QT = VecTy->getElementType();
4015 if (QT->isIntegerType())
4016 Result = Builder.CreateBinaryIntrinsic(
4017 Intrinsic::abs, EmitScalarExpr(E->getArg(0)), Builder.getFalse(),
4018 nullptr, "elt.abs");
4019 else
4020 Result = emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::fabs,
4021 "elt.abs");
4022
4023 return RValue::get(Result);
4024 }
4025 case Builtin::BI__builtin_elementwise_bitreverse:
4027 *this, E, Intrinsic::bitreverse, "elt.bitreverse"));
4028 case Builtin::BI__builtin_elementwise_popcount:
4030 *this, E, Intrinsic::ctpop, "elt.ctpop"));
4031 case Builtin::BI__builtin_elementwise_canonicalize:
4033 *this, E, Intrinsic::canonicalize, "elt.canonicalize"));
4034 case Builtin::BI__builtin_elementwise_copysign:
4035 return RValue::get(
4036 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::copysign));
4037 case Builtin::BI__builtin_elementwise_fshl:
4038 return RValue::get(
4039 emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fshl));
4040 case Builtin::BI__builtin_elementwise_fshr:
4041 return RValue::get(
4042 emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fshr));
4043
4044 case Builtin::BI__builtin_elementwise_add_sat:
4045 case Builtin::BI__builtin_elementwise_sub_sat: {
4046 Value *Op0 = EmitScalarExpr(E->getArg(0));
4047 Value *Op1 = EmitScalarExpr(E->getArg(1));
4048 Value *Result;
4049 assert(Op0->getType()->isIntOrIntVectorTy() && "integer type expected");
4050 QualType Ty = E->getArg(0)->getType();
4051 if (auto *VecTy = Ty->getAs<VectorType>())
4052 Ty = VecTy->getElementType();
4053 bool IsSigned = Ty->isSignedIntegerType();
4054 unsigned Opc;
4055 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_add_sat)
4056 Opc = IsSigned ? Intrinsic::sadd_sat : Intrinsic::uadd_sat;
4057 else
4058 Opc = IsSigned ? Intrinsic::ssub_sat : Intrinsic::usub_sat;
4059 Result = Builder.CreateBinaryIntrinsic(Opc, Op0, Op1, nullptr, "elt.sat");
4060 return RValue::get(Result);
4061 }
4062
4063 case Builtin::BI__builtin_elementwise_max: {
4064 Value *Op0 = EmitScalarExpr(E->getArg(0));
4065 Value *Op1 = EmitScalarExpr(E->getArg(1));
4066 Value *Result;
4067 if (Op0->getType()->isIntOrIntVectorTy()) {
4068 QualType Ty = E->getArg(0)->getType();
4069 if (auto *VecTy = Ty->getAs<VectorType>())
4070 Ty = VecTy->getElementType();
4071 Result = Builder.CreateBinaryIntrinsic(
4072 Ty->isSignedIntegerType() ? Intrinsic::smax : Intrinsic::umax, Op0,
4073 Op1, nullptr, "elt.max");
4074 } else
4075 Result = Builder.CreateMaxNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.max");
4076 return RValue::get(Result);
4077 }
4078 case Builtin::BI__builtin_elementwise_min: {
4079 Value *Op0 = EmitScalarExpr(E->getArg(0));
4080 Value *Op1 = EmitScalarExpr(E->getArg(1));
4081 Value *Result;
4082 if (Op0->getType()->isIntOrIntVectorTy()) {
4083 QualType Ty = E->getArg(0)->getType();
4084 if (auto *VecTy = Ty->getAs<VectorType>())
4085 Ty = VecTy->getElementType();
4086 Result = Builder.CreateBinaryIntrinsic(
4087 Ty->isSignedIntegerType() ? Intrinsic::smin : Intrinsic::umin, Op0,
4088 Op1, nullptr, "elt.min");
4089 } else
4090 Result = Builder.CreateMinNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.min");
4091 return RValue::get(Result);
4092 }
4093
4094 case Builtin::BI__builtin_elementwise_maxnum: {
4095 Value *Op0 = EmitScalarExpr(E->getArg(0));
4096 Value *Op1 = EmitScalarExpr(E->getArg(1));
4097 Value *Result = Builder.CreateBinaryIntrinsic(llvm::Intrinsic::maxnum, Op0,
4098 Op1, nullptr, "elt.maxnum");
4099 return RValue::get(Result);
4100 }
4101
4102 case Builtin::BI__builtin_elementwise_minnum: {
4103 Value *Op0 = EmitScalarExpr(E->getArg(0));
4104 Value *Op1 = EmitScalarExpr(E->getArg(1));
4105 Value *Result = Builder.CreateBinaryIntrinsic(llvm::Intrinsic::minnum, Op0,
4106 Op1, nullptr, "elt.minnum");
4107 return RValue::get(Result);
4108 }
4109
4110 case Builtin::BI__builtin_elementwise_maximum: {
4111 Value *Op0 = EmitScalarExpr(E->getArg(0));
4112 Value *Op1 = EmitScalarExpr(E->getArg(1));
4113 Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::maximum, Op0, Op1,
4114 nullptr, "elt.maximum");
4115 return RValue::get(Result);
4116 }
4117
4118 case Builtin::BI__builtin_elementwise_minimum: {
4119 Value *Op0 = EmitScalarExpr(E->getArg(0));
4120 Value *Op1 = EmitScalarExpr(E->getArg(1));
4121 Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::minimum, Op0, Op1,
4122 nullptr, "elt.minimum");
4123 return RValue::get(Result);
4124 }
4125
4126 case Builtin::BI__builtin_elementwise_maximumnum: {
4127 Value *Op0 = EmitScalarExpr(E->getArg(0));
4128 Value *Op1 = EmitScalarExpr(E->getArg(1));
4129 Value *Result = Builder.CreateBinaryIntrinsic(
4130 Intrinsic::maximumnum, Op0, Op1, nullptr, "elt.maximumnum");
4131 return RValue::get(Result);
4132 }
4133
4134 case Builtin::BI__builtin_elementwise_minimumnum: {
4135 Value *Op0 = EmitScalarExpr(E->getArg(0));
4136 Value *Op1 = EmitScalarExpr(E->getArg(1));
4137 Value *Result = Builder.CreateBinaryIntrinsic(
4138 Intrinsic::minimumnum, Op0, Op1, nullptr, "elt.minimumnum");
4139 return RValue::get(Result);
4140 }
4141
4142 case Builtin::BI__builtin_reduce_max: {
4143 auto GetIntrinsicID = [this](QualType QT) {
4144 if (auto *VecTy = QT->getAs<VectorType>())
4145 QT = VecTy->getElementType();
4146 else if (QT->isSizelessVectorType())
4147 QT = QT->getSizelessVectorEltType(CGM.getContext());
4148
4149 if (QT->isSignedIntegerType())
4150 return Intrinsic::vector_reduce_smax;
4151 if (QT->isUnsignedIntegerType())
4152 return Intrinsic::vector_reduce_umax;
4153 assert(QT->isFloatingType() && "must have a float here");
4154 return Intrinsic::vector_reduce_fmax;
4155 };
4157 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
4158 }
4159
4160 case Builtin::BI__builtin_reduce_min: {
4161 auto GetIntrinsicID = [this](QualType QT) {
4162 if (auto *VecTy = QT->getAs<VectorType>())
4163 QT = VecTy->getElementType();
4164 else if (QT->isSizelessVectorType())
4165 QT = QT->getSizelessVectorEltType(CGM.getContext());
4166
4167 if (QT->isSignedIntegerType())
4168 return Intrinsic::vector_reduce_smin;
4169 if (QT->isUnsignedIntegerType())
4170 return Intrinsic::vector_reduce_umin;
4171 assert(QT->isFloatingType() && "must have a float here");
4172 return Intrinsic::vector_reduce_fmin;
4173 };
4174
4176 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
4177 }
4178
4179 case Builtin::BI__builtin_reduce_add:
4181 *this, E, Intrinsic::vector_reduce_add, "rdx.add"));
4182 case Builtin::BI__builtin_reduce_mul:
4184 *this, E, Intrinsic::vector_reduce_mul, "rdx.mul"));
4185 case Builtin::BI__builtin_reduce_xor:
4187 *this, E, Intrinsic::vector_reduce_xor, "rdx.xor"));
4188 case Builtin::BI__builtin_reduce_or:
4190 *this, E, Intrinsic::vector_reduce_or, "rdx.or"));
4191 case Builtin::BI__builtin_reduce_and:
4193 *this, E, Intrinsic::vector_reduce_and, "rdx.and"));
4194 case Builtin::BI__builtin_reduce_maximum:
4196 *this, E, Intrinsic::vector_reduce_fmaximum, "rdx.maximum"));
4197 case Builtin::BI__builtin_reduce_minimum:
4199 *this, E, Intrinsic::vector_reduce_fminimum, "rdx.minimum"));
4200
4201 case Builtin::BI__builtin_matrix_transpose: {
4202 auto *MatrixTy = E->getArg(0)->getType()->castAs<ConstantMatrixType>();
4203 Value *MatValue = EmitScalarExpr(E->getArg(0));
4204 MatrixBuilder MB(Builder);
4205 Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
4206 MatrixTy->getNumColumns());
4207 return RValue::get(Result);
4208 }
4209
4210 case Builtin::BI__builtin_matrix_column_major_load: {
4211 MatrixBuilder MB(Builder);
4212 // Emit everything that isn't dependent on the first parameter type
4213 Value *Stride = EmitScalarExpr(E->getArg(3));
4214 const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
4215 auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
4216 assert(PtrTy && "arg0 must be of pointer type");
4217 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
4218
4221 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4222 0);
4223 Value *Result = MB.CreateColumnMajorLoad(
4224 Src.getElementType(), Src.emitRawPointer(*this),
4225 Align(Src.getAlignment().getQuantity()), Stride, IsVolatile,
4226 ResultTy->getNumRows(), ResultTy->getNumColumns(), "matrix");
4227 return RValue::get(Result);
4228 }
4229
4230 case Builtin::BI__builtin_matrix_column_major_store: {
4231 MatrixBuilder MB(Builder);
4232 Value *Matrix = EmitScalarExpr(E->getArg(0));
4234 Value *Stride = EmitScalarExpr(E->getArg(2));
4235
4236 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
4237 auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
4238 assert(PtrTy && "arg1 must be of pointer type");
4239 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
4240
4242 E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
4243 0);
4244 Value *Result = MB.CreateColumnMajorStore(
4245 Matrix, Dst.emitRawPointer(*this),
4246 Align(Dst.getAlignment().getQuantity()), Stride, IsVolatile,
4247 MatrixTy->getNumRows(), MatrixTy->getNumColumns());
4249 return RValue::get(Result);
4250 }
4251
4252 case Builtin::BI__builtin_masked_load:
4253 case Builtin::BI__builtin_masked_expand_load: {
4254 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4255 llvm::Value *Ptr = EmitScalarExpr(E->getArg(1));
4256
4257 llvm::Type *RetTy = CGM.getTypes().ConvertType(E->getType());
4258 llvm::Value *PassThru = llvm::PoisonValue::get(RetTy);
4259 if (E->getNumArgs() > 2)
4260 PassThru = EmitScalarExpr(E->getArg(2));
4261
4262 CharUnits Align = CGM.getNaturalTypeAlignment(
4263 E->getType()->getAs<VectorType>()->getElementType(), nullptr);
4264
4265 llvm::Value *Result;
4266 if (BuiltinID == Builtin::BI__builtin_masked_load) {
4267 Result = Builder.CreateMaskedLoad(RetTy, Ptr, Align.getAsAlign(), Mask,
4268 PassThru, "masked_load");
4269 } else {
4270 Function *F = CGM.getIntrinsic(Intrinsic::masked_expandload, {RetTy});
4271 Result =
4272 Builder.CreateCall(F, {Ptr, Mask, PassThru}, "masked_expand_load");
4273 }
4274 return RValue::get(Result);
4275 };
4276 case Builtin::BI__builtin_masked_gather: {
4277 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4278 llvm::Value *Idx = EmitScalarExpr(E->getArg(1));
4279 llvm::Value *Ptr = EmitScalarExpr(E->getArg(2));
4280
4281 llvm::Type *RetTy = CGM.getTypes().ConvertType(E->getType());
4282 CharUnits Align = CGM.getNaturalTypeAlignment(
4283 E->getType()->getAs<VectorType>()->getElementType(), nullptr);
4284
4285 llvm::Value *PassThru = llvm::PoisonValue::get(RetTy);
4286 if (E->getNumArgs() > 3)
4287 PassThru = EmitScalarExpr(E->getArg(3));
4288
4289 llvm::Type *ElemTy = CGM.getTypes().ConvertType(
4291 llvm::Value *PtrVec = Builder.CreateGEP(ElemTy, Ptr, Idx);
4292
4293 llvm::Value *Result = Builder.CreateMaskedGather(
4294 RetTy, PtrVec, Align.getAsAlign(), Mask, PassThru, "masked_gather");
4295 return RValue::get(Result);
4296 }
4297 case Builtin::BI__builtin_masked_store:
4298 case Builtin::BI__builtin_masked_compress_store: {
4299 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4300 llvm::Value *Val = EmitScalarExpr(E->getArg(1));
4301 llvm::Value *Ptr = EmitScalarExpr(E->getArg(2));
4302
4303 QualType ValTy = E->getArg(1)->getType();
4304 llvm::Type *ValLLTy = CGM.getTypes().ConvertType(ValTy);
4305
4306 CharUnits Align = CGM.getNaturalTypeAlignment(
4308 nullptr);
4309
4310 if (BuiltinID == Builtin::BI__builtin_masked_store) {
4311 Builder.CreateMaskedStore(Val, Ptr, Align.getAsAlign(), Mask);
4312 } else {
4313 llvm::Function *F =
4314 CGM.getIntrinsic(llvm::Intrinsic::masked_compressstore, {ValLLTy});
4315 Builder.CreateCall(F, {Val, Ptr, Mask});
4316 }
4317 return RValue::get(nullptr);
4318 }
4319 case Builtin::BI__builtin_masked_scatter: {
4320 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4321 llvm::Value *Idx = EmitScalarExpr(E->getArg(1));
4322 llvm::Value *Val = EmitScalarExpr(E->getArg(2));
4323 llvm::Value *Ptr = EmitScalarExpr(E->getArg(3));
4324
4325 CharUnits Align = CGM.getNaturalTypeAlignment(
4327 nullptr);
4328
4329 llvm::Type *ElemTy = CGM.getTypes().ConvertType(
4330 E->getArg(1)->getType()->getAs<VectorType>()->getElementType());
4331 llvm::Value *PtrVec = Builder.CreateGEP(ElemTy, Ptr, Idx);
4332
4333 Builder.CreateMaskedScatter(Val, PtrVec, Align.getAsAlign(), Mask);
4334 return RValue();
4335 }
4336 case Builtin::BI__builtin_isinf_sign: {
4337 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
4338 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4339 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
4340 Value *Arg = EmitScalarExpr(E->getArg(0));
4341 Value *AbsArg = EmitFAbs(*this, Arg);
4342 Value *IsInf = Builder.CreateFCmpOEQ(
4343 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
4344 Value *IsNeg = EmitSignBit(*this, Arg);
4345
4346 llvm::Type *IntTy = ConvertType(E->getType());
4347 Value *Zero = Constant::getNullValue(IntTy);
4348 Value *One = ConstantInt::get(IntTy, 1);
4349 Value *NegativeOne = ConstantInt::getAllOnesValue(IntTy);
4350 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
4351 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
4352 return RValue::get(Result);
4353 }
4354
4355 case Builtin::BI__builtin_flt_rounds: {
4356 Function *F = CGM.getIntrinsic(Intrinsic::get_rounding);
4357
4358 llvm::Type *ResultType = ConvertType(E->getType());
4359 Value *Result = Builder.CreateCall(F);
4360 if (Result->getType() != ResultType)
4361 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
4362 "cast");
4363 return RValue::get(Result);
4364 }
4365
4366 case Builtin::BI__builtin_set_flt_rounds: {
4367 Function *F = CGM.getIntrinsic(Intrinsic::set_rounding);
4368
4369 Value *V = EmitScalarExpr(E->getArg(0));
4370 Builder.CreateCall(F, V);
4371 return RValue::get(nullptr);
4372 }
4373
4374 case Builtin::BI__builtin_fpclassify: {
4375 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4376 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
4377 Value *V = EmitScalarExpr(E->getArg(5));
4378 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
4379
4380 // Create Result
4381 BasicBlock *Begin = Builder.GetInsertBlock();
4382 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
4383 Builder.SetInsertPoint(End);
4384 PHINode *Result =
4385 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
4386 "fpclassify_result");
4387
4388 // if (V==0) return FP_ZERO
4389 Builder.SetInsertPoint(Begin);
4390 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
4391 "iszero");
4392 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
4393 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
4394 Builder.CreateCondBr(IsZero, End, NotZero);
4395 Result->addIncoming(ZeroLiteral, Begin);
4396
4397 // if (V != V) return FP_NAN
4398 Builder.SetInsertPoint(NotZero);
4399 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
4400 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
4401 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
4402 Builder.CreateCondBr(IsNan, End, NotNan);
4403 Result->addIncoming(NanLiteral, NotZero);
4404
4405 // if (fabs(V) == infinity) return FP_INFINITY
4406 Builder.SetInsertPoint(NotNan);
4407 Value *VAbs = EmitFAbs(*this, V);
4408 Value *IsInf =
4409 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
4410 "isinf");
4411 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
4412 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
4413 Builder.CreateCondBr(IsInf, End, NotInf);
4414 Result->addIncoming(InfLiteral, NotNan);
4415
4416 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
4417 Builder.SetInsertPoint(NotInf);
4418 APFloat Smallest = APFloat::getSmallestNormalized(
4419 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
4420 Value *IsNormal =
4421 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
4422 "isnormal");
4423 Value *NormalResult =
4424 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
4425 EmitScalarExpr(E->getArg(3)));
4426 Builder.CreateBr(End);
4427 Result->addIncoming(NormalResult, NotInf);
4428
4429 // return Result
4430 Builder.SetInsertPoint(End);
4431 return RValue::get(Result);
4432 }
4433
4434 // An alloca will always return a pointer to the alloca (stack) address
4435 // space. This address space need not be the same as the AST / Language
4436 // default (e.g. in C / C++ auto vars are in the generic address space). At
4437 // the AST level this is handled within CreateTempAlloca et al., but for the
4438 // builtin / dynamic alloca we have to handle it here. We use an explicit cast
4439 // instead of passing an AS to CreateAlloca so as to not inhibit optimisation.
4440 case Builtin::BIalloca:
4441 case Builtin::BI_alloca:
4442 case Builtin::BI__builtin_alloca_uninitialized:
4443 case Builtin::BI__builtin_alloca: {
4444 Value *Size = EmitScalarExpr(E->getArg(0));
4445 const TargetInfo &TI = getContext().getTargetInfo();
4446 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
4447 const Align SuitableAlignmentInBytes =
4448 CGM.getContext()
4449 .toCharUnitsFromBits(TI.getSuitableAlign())
4450 .getAsAlign();
4451 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
4452 AI->setAlignment(SuitableAlignmentInBytes);
4453 if (BuiltinID != Builtin::BI__builtin_alloca_uninitialized)
4454 initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
4457 if (AAS != EAS) {
4458 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
4459 return RValue::get(
4460 getTargetHooks().performAddrSpaceCast(*this, AI, AAS, Ty));
4461 }
4462 return RValue::get(AI);
4463 }
4464
4465 case Builtin::BI__builtin_alloca_with_align_uninitialized:
4466 case Builtin::BI__builtin_alloca_with_align: {
4467 Value *Size = EmitScalarExpr(E->getArg(0));
4468 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
4469 auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
4470 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
4471 const Align AlignmentInBytes =
4472 CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
4473 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
4474 AI->setAlignment(AlignmentInBytes);
4475 if (BuiltinID != Builtin::BI__builtin_alloca_with_align_uninitialized)
4476 initializeAlloca(*this, AI, Size, AlignmentInBytes);
4479 if (AAS != EAS) {
4480 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
4481 return RValue::get(
4482 getTargetHooks().performAddrSpaceCast(*this, AI, AAS, Ty));
4483 }
4484 return RValue::get(AI);
4485 }
4486
4487 case Builtin::BI__builtin_infer_alloc_token: {
4488 llvm::MDNode *MDN = buildAllocToken(E);
4489 llvm::Value *MDV = MetadataAsValue::get(getLLVMContext(), MDN);
4490 llvm::Function *F =
4491 CGM.getIntrinsic(llvm::Intrinsic::alloc_token_id, {IntPtrTy});
4492 llvm::CallBase *TokenID = Builder.CreateCall(F, MDV);
4493 return RValue::get(TokenID);
4494 }
4495
4496 case Builtin::BIbzero:
4497 case Builtin::BI__builtin_bzero: {
4499 Value *SizeVal = EmitScalarExpr(E->getArg(1));
4500 EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
4501 E->getArg(0)->getExprLoc(), FD, 0);
4502 auto *I = Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
4503 addInstToNewSourceAtom(I, nullptr);
4504 return RValue::get(nullptr);
4505 }
4506
4507 case Builtin::BIbcopy:
4508 case Builtin::BI__builtin_bcopy: {
4511 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4513 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4514 0);
4516 E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
4517 0);
4518 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4519 addInstToNewSourceAtom(I, nullptr);
4520 return RValue::get(nullptr);
4521 }
4522
4523 case Builtin::BImemcpy:
4524 case Builtin::BI__builtin_memcpy:
4525 case Builtin::BImempcpy:
4526 case Builtin::BI__builtin_mempcpy: {
4529 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4530 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4531 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4532 auto *I = Builder.CreateMemCpy(Dest, Src, SizeVal, false);
4533 addInstToNewSourceAtom(I, nullptr);
4534 if (BuiltinID == Builtin::BImempcpy ||
4535 BuiltinID == Builtin::BI__builtin_mempcpy)
4536 return RValue::get(Builder.CreateInBoundsGEP(
4537 Dest.getElementType(), Dest.emitRawPointer(*this), SizeVal));
4538 else
4539 return RValue::get(Dest, *this);
4540 }
4541
4542 case Builtin::BI__builtin_memcpy_inline: {
4545 uint64_t Size =
4546 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
4547 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4548 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4549 auto *I = Builder.CreateMemCpyInline(Dest, Src, Size);
4550 addInstToNewSourceAtom(I, nullptr);
4551 return RValue::get(nullptr);
4552 }
4553
4554 case Builtin::BI__builtin_char_memchr:
4555 BuiltinID = Builtin::BI__builtin_memchr;
4556 break;
4557
4558 case Builtin::BI__builtin___memcpy_chk: {
4559 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
4560 Expr::EvalResult SizeResult, DstSizeResult;
4561 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4562 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4563 break;
4564 llvm::APSInt Size = SizeResult.Val.getInt();
4565 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4566 if (Size.ugt(DstSize))
4567 break;
4570 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4571 auto *I = Builder.CreateMemCpy(Dest, Src, SizeVal, false);
4572 addInstToNewSourceAtom(I, nullptr);
4573 return RValue::get(Dest, *this);
4574 }
4575
4576 case Builtin::BI__builtin_objc_memmove_collectable: {
4577 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
4578 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
4579 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4580 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
4581 DestAddr, SrcAddr, SizeVal);
4582 return RValue::get(DestAddr, *this);
4583 }
4584
4585 case Builtin::BI__builtin___memmove_chk: {
4586 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
4587 Expr::EvalResult SizeResult, DstSizeResult;
4588 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4589 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4590 break;
4591 llvm::APSInt Size = SizeResult.Val.getInt();
4592 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4593 if (Size.ugt(DstSize))
4594 break;
4597 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4598 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4599 addInstToNewSourceAtom(I, nullptr);
4600 return RValue::get(Dest, *this);
4601 }
4602
4603 case Builtin::BI__builtin_trivially_relocate:
4604 case Builtin::BImemmove:
4605 case Builtin::BI__builtin_memmove: {
4608 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4609 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_trivially_relocate)
4610 SizeVal = Builder.CreateMul(
4611 SizeVal,
4612 ConstantInt::get(
4613 SizeVal->getType(),
4614 getContext()
4615 .getTypeSizeInChars(E->getArg(0)->getType()->getPointeeType())
4616 .getQuantity()));
4617 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4618 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4619 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4620 addInstToNewSourceAtom(I, nullptr);
4621 return RValue::get(Dest, *this);
4622 }
4623 case Builtin::BImemset:
4624 case Builtin::BI__builtin_memset: {
4626 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
4627 Builder.getInt8Ty());
4628 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4629 EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
4630 E->getArg(0)->getExprLoc(), FD, 0);
4631 auto *I = Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
4632 addInstToNewSourceAtom(I, ByteVal);
4633 return RValue::get(Dest, *this);
4634 }
4635 case Builtin::BI__builtin_memset_inline: {
4637 Value *ByteVal =
4638 Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty());
4639 uint64_t Size =
4640 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
4642 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4643 0);
4644 auto *I = Builder.CreateMemSetInline(Dest, ByteVal, Size);
4645 addInstToNewSourceAtom(I, nullptr);
4646 return RValue::get(nullptr);
4647 }
4648 case Builtin::BI__builtin___memset_chk: {
4649 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
4650 Expr::EvalResult SizeResult, DstSizeResult;
4651 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4652 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4653 break;
4654 llvm::APSInt Size = SizeResult.Val.getInt();
4655 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4656 if (Size.ugt(DstSize))
4657 break;
4659 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
4660 Builder.getInt8Ty());
4661 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4662 auto *I = Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
4663 addInstToNewSourceAtom(I, nullptr);
4664 return RValue::get(Dest, *this);
4665 }
4666 case Builtin::BI__builtin_wmemchr: {
4667 // The MSVC runtime library does not provide a definition of wmemchr, so we
4668 // need an inline implementation.
4669 if (!getTarget().getTriple().isOSMSVCRT())
4670 break;
4671
4672 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
4673 Value *Str = EmitScalarExpr(E->getArg(0));
4674 Value *Chr = EmitScalarExpr(E->getArg(1));
4675 Value *Size = EmitScalarExpr(E->getArg(2));
4676
4677 BasicBlock *Entry = Builder.GetInsertBlock();
4678 BasicBlock *CmpEq = createBasicBlock("wmemchr.eq");
4679 BasicBlock *Next = createBasicBlock("wmemchr.next");
4680 BasicBlock *Exit = createBasicBlock("wmemchr.exit");
4681 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
4682 Builder.CreateCondBr(SizeEq0, Exit, CmpEq);
4683
4684 EmitBlock(CmpEq);
4685 PHINode *StrPhi = Builder.CreatePHI(Str->getType(), 2);
4686 StrPhi->addIncoming(Str, Entry);
4687 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
4688 SizePhi->addIncoming(Size, Entry);
4689 CharUnits WCharAlign =
4691 Value *StrCh = Builder.CreateAlignedLoad(WCharTy, StrPhi, WCharAlign);
4692 Value *FoundChr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 0);
4693 Value *StrEqChr = Builder.CreateICmpEQ(StrCh, Chr);
4694 Builder.CreateCondBr(StrEqChr, Exit, Next);
4695
4696 EmitBlock(Next);
4697 Value *NextStr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 1);
4698 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
4699 Value *NextSizeEq0 =
4700 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
4701 Builder.CreateCondBr(NextSizeEq0, Exit, CmpEq);
4702 StrPhi->addIncoming(NextStr, Next);
4703 SizePhi->addIncoming(NextSize, Next);
4704
4705 EmitBlock(Exit);
4706 PHINode *Ret = Builder.CreatePHI(Str->getType(), 3);
4707 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Entry);
4708 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Next);
4709 Ret->addIncoming(FoundChr, CmpEq);
4710 return RValue::get(Ret);
4711 }
4712 case Builtin::BI__builtin_wmemcmp: {
4713 // The MSVC runtime library does not provide a definition of wmemcmp, so we
4714 // need an inline implementation.
4715 if (!getTarget().getTriple().isOSMSVCRT())
4716 break;
4717
4718 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
4719
4720 Value *Dst = EmitScalarExpr(E->getArg(0));
4721 Value *Src = EmitScalarExpr(E->getArg(1));
4722 Value *Size = EmitScalarExpr(E->getArg(2));
4723
4724 BasicBlock *Entry = Builder.GetInsertBlock();
4725 BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
4726 BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
4727 BasicBlock *Next = createBasicBlock("wmemcmp.next");
4728 BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
4729 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
4730 Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
4731
4732 EmitBlock(CmpGT);
4733 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
4734 DstPhi->addIncoming(Dst, Entry);
4735 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
4736 SrcPhi->addIncoming(Src, Entry);
4737 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
4738 SizePhi->addIncoming(Size, Entry);
4739 CharUnits WCharAlign =
4741 Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
4742 Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
4743 Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
4744 Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
4745
4746 EmitBlock(CmpLT);
4747 Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
4748 Builder.CreateCondBr(DstLtSrc, Exit, Next);
4749
4750 EmitBlock(Next);
4751 Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
4752 Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
4753 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
4754 Value *NextSizeEq0 =
4755 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
4756 Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
4757 DstPhi->addIncoming(NextDst, Next);
4758 SrcPhi->addIncoming(NextSrc, Next);
4759 SizePhi->addIncoming(NextSize, Next);
4760
4761 EmitBlock(Exit);
4762 PHINode *Ret = Builder.CreatePHI(IntTy, 4);
4763 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
4764 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
4765 Ret->addIncoming(ConstantInt::getAllOnesValue(IntTy), CmpLT);
4766 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
4767 return RValue::get(Ret);
4768 }
4769 case Builtin::BI__builtin_dwarf_cfa: {
4770 // The offset in bytes from the first argument to the CFA.
4771 //
4772 // Why on earth is this in the frontend? Is there any reason at
4773 // all that the backend can't reasonably determine this while
4774 // lowering llvm.eh.dwarf.cfa()?
4775 //
4776 // TODO: If there's a satisfactory reason, add a target hook for
4777 // this instead of hard-coding 0, which is correct for most targets.
4778 int32_t Offset = 0;
4779
4780 Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
4781 return RValue::get(Builder.CreateCall(F,
4782 llvm::ConstantInt::get(Int32Ty, Offset)));
4783 }
4784 case Builtin::BI__builtin_return_address: {
4785 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
4786 getContext().UnsignedIntTy);
4787 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
4788 return RValue::get(Builder.CreateCall(F, Depth));
4789 }
4790 case Builtin::BI_ReturnAddress: {
4791 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
4792 return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
4793 }
4794 case Builtin::BI__builtin_frame_address: {
4795 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
4796 getContext().UnsignedIntTy);
4797 Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
4798 return RValue::get(Builder.CreateCall(F, Depth));
4799 }
4800 case Builtin::BI__builtin_stack_address: {
4801 return RValue::get(Builder.CreateCall(
4802 CGM.getIntrinsic(Intrinsic::stackaddress, AllocaInt8PtrTy)));
4803 }
4804 case Builtin::BI__builtin_extract_return_addr: {
4807 return RValue::get(Result);
4808 }
4809 case Builtin::BI__builtin_frob_return_addr: {
4812 return RValue::get(Result);
4813 }
4814 case Builtin::BI__builtin_dwarf_sp_column: {
4815 llvm::IntegerType *Ty
4818 if (Column == -1) {
4819 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
4820 return RValue::get(llvm::UndefValue::get(Ty));
4821 }
4822 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
4823 }
4824 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
4826 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
4827 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
4828 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
4829 }
4830 case Builtin::BI__builtin_eh_return: {
4831 Value *Int = EmitScalarExpr(E->getArg(0));
4832 Value *Ptr = EmitScalarExpr(E->getArg(1));
4833
4834 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
4835 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
4836 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
4837 Function *F =
4838 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
4839 : Intrinsic::eh_return_i64);
4840 Builder.CreateCall(F, {Int, Ptr});
4841 Builder.CreateUnreachable();
4842
4843 // We do need to preserve an insertion point.
4844 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
4845
4846 return RValue::get(nullptr);
4847 }
4848 case Builtin::BI__builtin_unwind_init: {
4849 Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
4850 Builder.CreateCall(F);
4851 return RValue::get(nullptr);
4852 }
4853 case Builtin::BI__builtin_extend_pointer: {
4854 // Extends a pointer to the size of an _Unwind_Word, which is
4855 // uint64_t on all platforms. Generally this gets poked into a
4856 // register and eventually used as an address, so if the
4857 // addressing registers are wider than pointers and the platform
4858 // doesn't implicitly ignore high-order bits when doing
4859 // addressing, we need to make sure we zext / sext based on
4860 // the platform's expectations.
4861 //
4862 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
4863
4864 // Cast the pointer to intptr_t.
4865 Value *Ptr = EmitScalarExpr(E->getArg(0));
4866 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
4867
4868 // If that's 64 bits, we're done.
4869 if (IntPtrTy->getBitWidth() == 64)
4870 return RValue::get(Result);
4871
4872 // Otherwise, ask the codegen data what to do.
4873 if (getTargetHooks().extendPointerWithSExt())
4874 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
4875 else
4876 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
4877 }
4878 case Builtin::BI__builtin_setjmp: {
4879 // Buffer is a void**.
4881
4882 if (getTarget().getTriple().getArch() == llvm::Triple::systemz) {
4883 // On this target, the back end fills in the context buffer completely.
4884 // It doesn't really matter if the frontend stores to the buffer before
4885 // calling setjmp, the back-end is going to overwrite them anyway.
4886 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
4887 return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
4888 }
4889
4890 // Store the frame pointer to the setjmp buffer.
4891 Value *FrameAddr = Builder.CreateCall(
4892 CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
4893 ConstantInt::get(Int32Ty, 0));
4894 Builder.CreateStore(FrameAddr, Buf);
4895
4896 // Store the stack pointer to the setjmp buffer.
4897 Value *StackAddr = Builder.CreateStackSave();
4898 assert(Buf.emitRawPointer(*this)->getType() == StackAddr->getType());
4899
4900 Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
4901 Builder.CreateStore(StackAddr, StackSaveSlot);
4902
4903 // Call LLVM's EH setjmp, which is lightweight.
4904 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
4905 return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
4906 }
4907 case Builtin::BI__builtin_longjmp: {
4908 Value *Buf = EmitScalarExpr(E->getArg(0));
4909
4910 // Call LLVM's EH longjmp, which is lightweight.
4911 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
4912
4913 // longjmp doesn't return; mark this as unreachable.
4914 Builder.CreateUnreachable();
4915
4916 // We do need to preserve an insertion point.
4917 EmitBlock(createBasicBlock("longjmp.cont"));
4918
4919 return RValue::get(nullptr);
4920 }
4921 case Builtin::BI__builtin_launder: {
4922 const Expr *Arg = E->getArg(0);
4923 QualType ArgTy = Arg->getType()->getPointeeType();
4924 Value *Ptr = EmitScalarExpr(Arg);
4925 if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
4926 Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
4927
4928 return RValue::get(Ptr);
4929 }
4930 case Builtin::BI__sync_fetch_and_add:
4931 case Builtin::BI__sync_fetch_and_sub:
4932 case Builtin::BI__sync_fetch_and_or:
4933 case Builtin::BI__sync_fetch_and_and:
4934 case Builtin::BI__sync_fetch_and_xor:
4935 case Builtin::BI__sync_fetch_and_nand:
4936 case Builtin::BI__sync_add_and_fetch:
4937 case Builtin::BI__sync_sub_and_fetch:
4938 case Builtin::BI__sync_and_and_fetch:
4939 case Builtin::BI__sync_or_and_fetch:
4940 case Builtin::BI__sync_xor_and_fetch:
4941 case Builtin::BI__sync_nand_and_fetch:
4942 case Builtin::BI__sync_val_compare_and_swap:
4943 case Builtin::BI__sync_bool_compare_and_swap:
4944 case Builtin::BI__sync_lock_test_and_set:
4945 case Builtin::BI__sync_lock_release:
4946 case Builtin::BI__sync_swap:
4947 llvm_unreachable("Shouldn't make it through sema");
4948 case Builtin::BI__sync_fetch_and_add_1:
4949 case Builtin::BI__sync_fetch_and_add_2:
4950 case Builtin::BI__sync_fetch_and_add_4:
4951 case Builtin::BI__sync_fetch_and_add_8:
4952 case Builtin::BI__sync_fetch_and_add_16:
4953 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
4954 case Builtin::BI__sync_fetch_and_sub_1:
4955 case Builtin::BI__sync_fetch_and_sub_2:
4956 case Builtin::BI__sync_fetch_and_sub_4:
4957 case Builtin::BI__sync_fetch_and_sub_8:
4958 case Builtin::BI__sync_fetch_and_sub_16:
4959 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
4960 case Builtin::BI__sync_fetch_and_or_1:
4961 case Builtin::BI__sync_fetch_and_or_2:
4962 case Builtin::BI__sync_fetch_and_or_4:
4963 case Builtin::BI__sync_fetch_and_or_8:
4964 case Builtin::BI__sync_fetch_and_or_16:
4965 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
4966 case Builtin::BI__sync_fetch_and_and_1:
4967 case Builtin::BI__sync_fetch_and_and_2:
4968 case Builtin::BI__sync_fetch_and_and_4:
4969 case Builtin::BI__sync_fetch_and_and_8:
4970 case Builtin::BI__sync_fetch_and_and_16:
4971 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
4972 case Builtin::BI__sync_fetch_and_xor_1:
4973 case Builtin::BI__sync_fetch_and_xor_2:
4974 case Builtin::BI__sync_fetch_and_xor_4:
4975 case Builtin::BI__sync_fetch_and_xor_8:
4976 case Builtin::BI__sync_fetch_and_xor_16:
4977 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
4978 case Builtin::BI__sync_fetch_and_nand_1:
4979 case Builtin::BI__sync_fetch_and_nand_2:
4980 case Builtin::BI__sync_fetch_and_nand_4:
4981 case Builtin::BI__sync_fetch_and_nand_8:
4982 case Builtin::BI__sync_fetch_and_nand_16:
4983 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
4984
4985 // Clang extensions: not overloaded yet.
4986 case Builtin::BI__sync_fetch_and_min:
4987 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
4988 case Builtin::BI__sync_fetch_and_max:
4989 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
4990 case Builtin::BI__sync_fetch_and_umin:
4991 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
4992 case Builtin::BI__sync_fetch_and_umax:
4993 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
4994
4995 case Builtin::BI__sync_add_and_fetch_1:
4996 case Builtin::BI__sync_add_and_fetch_2:
4997 case Builtin::BI__sync_add_and_fetch_4:
4998 case Builtin::BI__sync_add_and_fetch_8:
4999 case Builtin::BI__sync_add_and_fetch_16:
5000 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
5001 llvm::Instruction::Add);
5002 case Builtin::BI__sync_sub_and_fetch_1:
5003 case Builtin::BI__sync_sub_and_fetch_2:
5004 case Builtin::BI__sync_sub_and_fetch_4:
5005 case Builtin::BI__sync_sub_and_fetch_8:
5006 case Builtin::BI__sync_sub_and_fetch_16:
5007 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
5008 llvm::Instruction::Sub);
5009 case Builtin::BI__sync_and_and_fetch_1:
5010 case Builtin::BI__sync_and_and_fetch_2:
5011 case Builtin::BI__sync_and_and_fetch_4:
5012 case Builtin::BI__sync_and_and_fetch_8:
5013 case Builtin::BI__sync_and_and_fetch_16:
5014 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
5015 llvm::Instruction::And);
5016 case Builtin::BI__sync_or_and_fetch_1:
5017 case Builtin::BI__sync_or_and_fetch_2:
5018 case Builtin::BI__sync_or_and_fetch_4:
5019 case Builtin::BI__sync_or_and_fetch_8:
5020 case Builtin::BI__sync_or_and_fetch_16:
5021 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
5022 llvm::Instruction::Or);
5023 case Builtin::BI__sync_xor_and_fetch_1:
5024 case Builtin::BI__sync_xor_and_fetch_2:
5025 case Builtin::BI__sync_xor_and_fetch_4:
5026 case Builtin::BI__sync_xor_and_fetch_8:
5027 case Builtin::BI__sync_xor_and_fetch_16:
5028 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
5029 llvm::Instruction::Xor);
5030 case Builtin::BI__sync_nand_and_fetch_1:
5031 case Builtin::BI__sync_nand_and_fetch_2:
5032 case Builtin::BI__sync_nand_and_fetch_4:
5033 case Builtin::BI__sync_nand_and_fetch_8:
5034 case Builtin::BI__sync_nand_and_fetch_16:
5035 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
5036 llvm::Instruction::And, true);
5037
5038 case Builtin::BI__sync_val_compare_and_swap_1:
5039 case Builtin::BI__sync_val_compare_and_swap_2:
5040 case Builtin::BI__sync_val_compare_and_swap_4:
5041 case Builtin::BI__sync_val_compare_and_swap_8:
5042 case Builtin::BI__sync_val_compare_and_swap_16:
5043 return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
5044
5045 case Builtin::BI__sync_bool_compare_and_swap_1:
5046 case Builtin::BI__sync_bool_compare_and_swap_2:
5047 case Builtin::BI__sync_bool_compare_and_swap_4:
5048 case Builtin::BI__sync_bool_compare_and_swap_8:
5049 case Builtin::BI__sync_bool_compare_and_swap_16:
5050 return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
5051
5052 case Builtin::BI__sync_swap_1:
5053 case Builtin::BI__sync_swap_2:
5054 case Builtin::BI__sync_swap_4:
5055 case Builtin::BI__sync_swap_8:
5056 case Builtin::BI__sync_swap_16:
5057 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
5058
5059 case Builtin::BI__sync_lock_test_and_set_1:
5060 case Builtin::BI__sync_lock_test_and_set_2:
5061 case Builtin::BI__sync_lock_test_and_set_4:
5062 case Builtin::BI__sync_lock_test_and_set_8:
5063 case Builtin::BI__sync_lock_test_and_set_16:
5064 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
5065
5066 case Builtin::BI__sync_lock_release_1:
5067 case Builtin::BI__sync_lock_release_2:
5068 case Builtin::BI__sync_lock_release_4:
5069 case Builtin::BI__sync_lock_release_8:
5070 case Builtin::BI__sync_lock_release_16: {
5071 Address Ptr = CheckAtomicAlignment(*this, E);
5072 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
5073
5074 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
5075 getContext().getTypeSize(ElTy));
5076 llvm::StoreInst *Store =
5077 Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
5078 Store->setAtomic(llvm::AtomicOrdering::Release);
5079 return RValue::get(nullptr);
5080 }
5081
5082 case Builtin::BI__sync_synchronize: {
5083 // We assume this is supposed to correspond to a C++0x-style
5084 // sequentially-consistent fence (i.e. this is only usable for
5085 // synchronization, not device I/O or anything like that). This intrinsic
5086 // is really badly designed in the sense that in theory, there isn't
5087 // any way to safely use it... but in practice, it mostly works
5088 // to use it with non-atomic loads and stores to get acquire/release
5089 // semantics.
5090 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
5091 return RValue::get(nullptr);
5092 }
5093
5094 case Builtin::BI__builtin_nontemporal_load:
5095 return RValue::get(EmitNontemporalLoad(*this, E));
5096 case Builtin::BI__builtin_nontemporal_store:
5097 return RValue::get(EmitNontemporalStore(*this, E));
5098 case Builtin::BI__c11_atomic_is_lock_free:
5099 case Builtin::BI__atomic_is_lock_free: {
5100 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
5101 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
5102 // _Atomic(T) is always properly-aligned.
5103 const char *LibCallName = "__atomic_is_lock_free";
5104 CallArgList Args;
5105 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
5106 getContext().getSizeType());
5107 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
5108 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
5110 else
5111 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
5113 const CGFunctionInfo &FuncInfo =
5114 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
5115 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
5116 llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
5117 return EmitCall(FuncInfo, CGCallee::forDirect(Func),
5118 ReturnValueSlot(), Args);
5119 }
5120
5121 case Builtin::BI__atomic_thread_fence:
5122 case Builtin::BI__atomic_signal_fence:
5123 case Builtin::BI__c11_atomic_thread_fence:
5124 case Builtin::BI__c11_atomic_signal_fence: {
5125 llvm::SyncScope::ID SSID;
5126 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
5127 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
5128 SSID = llvm::SyncScope::SingleThread;
5129 else
5130 SSID = llvm::SyncScope::System;
5131 Value *Order = EmitScalarExpr(E->getArg(0));
5132 if (isa<llvm::ConstantInt>(Order)) {
5133 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
5134 switch (ord) {
5135 case 0: // memory_order_relaxed
5136 default: // invalid order
5137 break;
5138 case 1: // memory_order_consume
5139 case 2: // memory_order_acquire
5140 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
5141 break;
5142 case 3: // memory_order_release
5143 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
5144 break;
5145 case 4: // memory_order_acq_rel
5146 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
5147 break;
5148 case 5: // memory_order_seq_cst
5149 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
5150 break;
5151 }
5152 return RValue::get(nullptr);
5153 }
5154
5155 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
5156 AcquireBB = createBasicBlock("acquire", CurFn);
5157 ReleaseBB = createBasicBlock("release", CurFn);
5158 AcqRelBB = createBasicBlock("acqrel", CurFn);
5159 SeqCstBB = createBasicBlock("seqcst", CurFn);
5160 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
5161
5162 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
5163 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
5164
5165 Builder.SetInsertPoint(AcquireBB);
5166 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
5167 Builder.CreateBr(ContBB);
5168 SI->addCase(Builder.getInt32(1), AcquireBB);
5169 SI->addCase(Builder.getInt32(2), AcquireBB);
5170
5171 Builder.SetInsertPoint(ReleaseBB);
5172 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
5173 Builder.CreateBr(ContBB);
5174 SI->addCase(Builder.getInt32(3), ReleaseBB);
5175
5176 Builder.SetInsertPoint(AcqRelBB);
5177 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
5178 Builder.CreateBr(ContBB);
5179 SI->addCase(Builder.getInt32(4), AcqRelBB);
5180
5181 Builder.SetInsertPoint(SeqCstBB);
5182 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
5183 Builder.CreateBr(ContBB);
5184 SI->addCase(Builder.getInt32(5), SeqCstBB);
5185
5186 Builder.SetInsertPoint(ContBB);
5187 return RValue::get(nullptr);
5188 }
5189 case Builtin::BI__scoped_atomic_thread_fence: {
5191
5192 Value *Order = EmitScalarExpr(E->getArg(0));
5193 Value *Scope = EmitScalarExpr(E->getArg(1));
5194 auto Ord = dyn_cast<llvm::ConstantInt>(Order);
5195 auto Scp = dyn_cast<llvm::ConstantInt>(Scope);
5196 if (Ord && Scp) {
5197 SyncScope SS = ScopeModel->isValid(Scp->getZExtValue())
5198 ? ScopeModel->map(Scp->getZExtValue())
5199 : ScopeModel->map(ScopeModel->getFallBackValue());
5200 switch (Ord->getZExtValue()) {
5201 case 0: // memory_order_relaxed
5202 default: // invalid order
5203 break;
5204 case 1: // memory_order_consume
5205 case 2: // memory_order_acquire
5206 Builder.CreateFence(
5207 llvm::AtomicOrdering::Acquire,
5208 getTargetHooks().getLLVMSyncScopeID(getLangOpts(), SS,
5209 llvm::AtomicOrdering::Acquire,
5210 getLLVMContext()));
5211 break;
5212 case 3: // memory_order_release
5213 Builder.CreateFence(
5214 llvm::AtomicOrdering::Release,
5215 getTargetHooks().getLLVMSyncScopeID(getLangOpts(), SS,
5216 llvm::AtomicOrdering::Release,
5217 getLLVMContext()));
5218 break;
5219 case 4: // memory_order_acq_rel
5220 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease,
5221 getTargetHooks().getLLVMSyncScopeID(
5222 getLangOpts(), SS,
5223 llvm::AtomicOrdering::AcquireRelease,
5224 getLLVMContext()));
5225 break;
5226 case 5: // memory_order_seq_cst
5227 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
5228 getTargetHooks().getLLVMSyncScopeID(
5229 getLangOpts(), SS,
5230 llvm::AtomicOrdering::SequentiallyConsistent,
5231 getLLVMContext()));
5232 break;
5233 }
5234 return RValue::get(nullptr);
5235 }
5236
5237 llvm::BasicBlock *ContBB = createBasicBlock("atomic.scope.continue", CurFn);
5238
5240 OrderBBs;
5241 if (Ord) {
5242 switch (Ord->getZExtValue()) {
5243 case 0: // memory_order_relaxed
5244 default: // invalid order
5245 ContBB->eraseFromParent();
5246 return RValue::get(nullptr);
5247 case 1: // memory_order_consume
5248 case 2: // memory_order_acquire
5249 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5250 llvm::AtomicOrdering::Acquire);
5251 break;
5252 case 3: // memory_order_release
5253 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5254 llvm::AtomicOrdering::Release);
5255 break;
5256 case 4: // memory_order_acq_rel
5257 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5258 llvm::AtomicOrdering::AcquireRelease);
5259 break;
5260 case 5: // memory_order_seq_cst
5261 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5262 llvm::AtomicOrdering::SequentiallyConsistent);
5263 break;
5264 }
5265 } else {
5266 llvm::BasicBlock *AcquireBB = createBasicBlock("acquire", CurFn);
5267 llvm::BasicBlock *ReleaseBB = createBasicBlock("release", CurFn);
5268 llvm::BasicBlock *AcqRelBB = createBasicBlock("acqrel", CurFn);
5269 llvm::BasicBlock *SeqCstBB = createBasicBlock("seqcst", CurFn);
5270
5271 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
5272 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
5273 SI->addCase(Builder.getInt32(1), AcquireBB);
5274 SI->addCase(Builder.getInt32(2), AcquireBB);
5275 SI->addCase(Builder.getInt32(3), ReleaseBB);
5276 SI->addCase(Builder.getInt32(4), AcqRelBB);
5277 SI->addCase(Builder.getInt32(5), SeqCstBB);
5278
5279 OrderBBs.emplace_back(AcquireBB, llvm::AtomicOrdering::Acquire);
5280 OrderBBs.emplace_back(ReleaseBB, llvm::AtomicOrdering::Release);
5281 OrderBBs.emplace_back(AcqRelBB, llvm::AtomicOrdering::AcquireRelease);
5282 OrderBBs.emplace_back(SeqCstBB,
5283 llvm::AtomicOrdering::SequentiallyConsistent);
5284 }
5285
5286 for (auto &[OrderBB, Ordering] : OrderBBs) {
5287 Builder.SetInsertPoint(OrderBB);
5288 if (Scp) {
5289 SyncScope SS = ScopeModel->isValid(Scp->getZExtValue())
5290 ? ScopeModel->map(Scp->getZExtValue())
5291 : ScopeModel->map(ScopeModel->getFallBackValue());
5292 Builder.CreateFence(Ordering,
5293 getTargetHooks().getLLVMSyncScopeID(
5294 getLangOpts(), SS, Ordering, getLLVMContext()));
5295 Builder.CreateBr(ContBB);
5296 } else {
5297 llvm::DenseMap<unsigned, llvm::BasicBlock *> BBs;
5298 for (unsigned Scp : ScopeModel->getRuntimeValues())
5299 BBs[Scp] = createBasicBlock(getAsString(ScopeModel->map(Scp)), CurFn);
5300
5301 auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
5302 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, ContBB);
5303 for (unsigned Scp : ScopeModel->getRuntimeValues()) {
5304 auto *B = BBs[Scp];
5305 SI->addCase(Builder.getInt32(Scp), B);
5306
5307 Builder.SetInsertPoint(B);
5308 Builder.CreateFence(Ordering, getTargetHooks().getLLVMSyncScopeID(
5309 getLangOpts(), ScopeModel->map(Scp),
5310 Ordering, getLLVMContext()));
5311 Builder.CreateBr(ContBB);
5312 }
5313 }
5314 }
5315
5316 Builder.SetInsertPoint(ContBB);
5317 return RValue::get(nullptr);
5318 }
5319
5320 case Builtin::BI__builtin_signbit:
5321 case Builtin::BI__builtin_signbitf:
5322 case Builtin::BI__builtin_signbitl: {
5323 return RValue::get(
5324 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
5325 ConvertType(E->getType())));
5326 }
5327 case Builtin::BI__warn_memset_zero_len:
5328 return RValue::getIgnored();
5329 case Builtin::BI__annotation: {
5330 // Re-encode each wide string to UTF8 and make an MDString.
5332 for (const Expr *Arg : E->arguments()) {
5333 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
5334 assert(Str->getCharByteWidth() == 2);
5335 StringRef WideBytes = Str->getBytes();
5336 std::string StrUtf8;
5337 if (!convertUTF16ToUTF8String(
5338 ArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
5339 CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
5340 continue;
5341 }
5342 Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
5343 }
5344
5345 // Build and MDTuple of MDStrings and emit the intrinsic call.
5346 llvm::Function *F = CGM.getIntrinsic(Intrinsic::codeview_annotation, {});
5347 MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
5348 Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
5349 return RValue::getIgnored();
5350 }
5351 case Builtin::BI__builtin_annotation: {
5352 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
5353 llvm::Function *F = CGM.getIntrinsic(
5354 Intrinsic::annotation, {AnnVal->getType(), CGM.ConstGlobalsPtrTy});
5355
5356 // Get the annotation string, go through casts. Sema requires this to be a
5357 // non-wide string literal, potentially casted, so the cast<> is safe.
5358 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
5359 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
5360 return RValue::get(
5361 EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr));
5362 }
5363 case Builtin::BI__builtin_addcb:
5364 case Builtin::BI__builtin_addcs:
5365 case Builtin::BI__builtin_addc:
5366 case Builtin::BI__builtin_addcl:
5367 case Builtin::BI__builtin_addcll:
5368 case Builtin::BI__builtin_subcb:
5369 case Builtin::BI__builtin_subcs:
5370 case Builtin::BI__builtin_subc:
5371 case Builtin::BI__builtin_subcl:
5372 case Builtin::BI__builtin_subcll: {
5373
5374 // We translate all of these builtins from expressions of the form:
5375 // int x = ..., y = ..., carryin = ..., carryout, result;
5376 // result = __builtin_addc(x, y, carryin, &carryout);
5377 //
5378 // to LLVM IR of the form:
5379 //
5380 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
5381 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
5382 // %carry1 = extractvalue {i32, i1} %tmp1, 1
5383 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
5384 // i32 %carryin)
5385 // %result = extractvalue {i32, i1} %tmp2, 0
5386 // %carry2 = extractvalue {i32, i1} %tmp2, 1
5387 // %tmp3 = or i1 %carry1, %carry2
5388 // %tmp4 = zext i1 %tmp3 to i32
5389 // store i32 %tmp4, i32* %carryout
5390
5391 // Scalarize our inputs.
5392 llvm::Value *X = EmitScalarExpr(E->getArg(0));
5393 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
5394 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
5395 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
5396
5397 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
5398 Intrinsic::ID IntrinsicId;
5399 switch (BuiltinID) {
5400 default: llvm_unreachable("Unknown multiprecision builtin id.");
5401 case Builtin::BI__builtin_addcb:
5402 case Builtin::BI__builtin_addcs:
5403 case Builtin::BI__builtin_addc:
5404 case Builtin::BI__builtin_addcl:
5405 case Builtin::BI__builtin_addcll:
5406 IntrinsicId = Intrinsic::uadd_with_overflow;
5407 break;
5408 case Builtin::BI__builtin_subcb:
5409 case Builtin::BI__builtin_subcs:
5410 case Builtin::BI__builtin_subc:
5411 case Builtin::BI__builtin_subcl:
5412 case Builtin::BI__builtin_subcll:
5413 IntrinsicId = Intrinsic::usub_with_overflow;
5414 break;
5415 }
5416
5417 // Construct our resulting LLVM IR expression.
5418 llvm::Value *Carry1;
5419 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
5420 X, Y, Carry1);
5421 llvm::Value *Carry2;
5422 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
5423 Sum1, Carryin, Carry2);
5424 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
5425 X->getType());
5426 Builder.CreateStore(CarryOut, CarryOutPtr);
5427 return RValue::get(Sum2);
5428 }
5429
5430 case Builtin::BI__builtin_add_overflow:
5431 case Builtin::BI__builtin_sub_overflow:
5432 case Builtin::BI__builtin_mul_overflow: {
5433 const clang::Expr *LeftArg = E->getArg(0);
5434 const clang::Expr *RightArg = E->getArg(1);
5435 const clang::Expr *ResultArg = E->getArg(2);
5436
5437 clang::QualType ResultQTy =
5438 ResultArg->getType()->castAs<PointerType>()->getPointeeType();
5439
5440 WidthAndSignedness LeftInfo =
5441 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
5442 WidthAndSignedness RightInfo =
5443 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
5444 WidthAndSignedness ResultInfo =
5445 getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
5446
5447 // Handle mixed-sign multiplication as a special case, because adding
5448 // runtime or backend support for our generic irgen would be too expensive.
5449 if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
5450 return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
5451 RightInfo, ResultArg, ResultQTy,
5452 ResultInfo);
5453
5454 if (isSpecialUnsignedMultiplySignedResult(BuiltinID, LeftInfo, RightInfo,
5455 ResultInfo))
5457 *this, LeftArg, LeftInfo, RightArg, RightInfo, ResultArg, ResultQTy,
5458 ResultInfo);
5459
5460 WidthAndSignedness EncompassingInfo =
5461 EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
5462
5463 llvm::Type *EncompassingLLVMTy =
5464 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
5465
5466 llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
5467
5468 Intrinsic::ID IntrinsicId;
5469 switch (BuiltinID) {
5470 default:
5471 llvm_unreachable("Unknown overflow builtin id.");
5472 case Builtin::BI__builtin_add_overflow:
5473 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::sadd_with_overflow
5474 : Intrinsic::uadd_with_overflow;
5475 break;
5476 case Builtin::BI__builtin_sub_overflow:
5477 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::ssub_with_overflow
5478 : Intrinsic::usub_with_overflow;
5479 break;
5480 case Builtin::BI__builtin_mul_overflow:
5481 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::smul_with_overflow
5482 : Intrinsic::umul_with_overflow;
5483 break;
5484 }
5485
5486 llvm::Value *Left = EmitScalarExpr(LeftArg);
5487 llvm::Value *Right = EmitScalarExpr(RightArg);
5488 Address ResultPtr = EmitPointerWithAlignment(ResultArg);
5489
5490 // Extend each operand to the encompassing type.
5491 Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
5492 Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
5493
5494 // Perform the operation on the extended values.
5495 llvm::Value *Overflow, *Result;
5496 Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
5497
5498 if (EncompassingInfo.Width > ResultInfo.Width) {
5499 // The encompassing type is wider than the result type, so we need to
5500 // truncate it.
5501 llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
5502
5503 // To see if the truncation caused an overflow, we will extend
5504 // the result and then compare it to the original result.
5505 llvm::Value *ResultTruncExt = Builder.CreateIntCast(
5506 ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
5507 llvm::Value *TruncationOverflow =
5508 Builder.CreateICmpNE(Result, ResultTruncExt);
5509
5510 Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
5511 Result = ResultTrunc;
5512 }
5513
5514 // Finally, store the result using the pointer.
5515 bool isVolatile =
5516 ResultArg->getType()->getPointeeType().isVolatileQualified();
5517 Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
5518
5519 return RValue::get(Overflow);
5520 }
5521
5522 case Builtin::BI__builtin_uadd_overflow:
5523 case Builtin::BI__builtin_uaddl_overflow:
5524 case Builtin::BI__builtin_uaddll_overflow:
5525 case Builtin::BI__builtin_usub_overflow:
5526 case Builtin::BI__builtin_usubl_overflow:
5527 case Builtin::BI__builtin_usubll_overflow:
5528 case Builtin::BI__builtin_umul_overflow:
5529 case Builtin::BI__builtin_umull_overflow:
5530 case Builtin::BI__builtin_umulll_overflow:
5531 case Builtin::BI__builtin_sadd_overflow:
5532 case Builtin::BI__builtin_saddl_overflow:
5533 case Builtin::BI__builtin_saddll_overflow:
5534 case Builtin::BI__builtin_ssub_overflow:
5535 case Builtin::BI__builtin_ssubl_overflow:
5536 case Builtin::BI__builtin_ssubll_overflow:
5537 case Builtin::BI__builtin_smul_overflow:
5538 case Builtin::BI__builtin_smull_overflow:
5539 case Builtin::BI__builtin_smulll_overflow: {
5540
5541 // We translate all of these builtins directly to the relevant llvm IR node.
5542
5543 // Scalarize our inputs.
5544 llvm::Value *X = EmitScalarExpr(E->getArg(0));
5545 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
5546 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
5547
5548 // Decide which of the overflow intrinsics we are lowering to:
5549 Intrinsic::ID IntrinsicId;
5550 switch (BuiltinID) {
5551 default: llvm_unreachable("Unknown overflow builtin id.");
5552 case Builtin::BI__builtin_uadd_overflow:
5553 case Builtin::BI__builtin_uaddl_overflow:
5554 case Builtin::BI__builtin_uaddll_overflow:
5555 IntrinsicId = Intrinsic::uadd_with_overflow;
5556 break;
5557 case Builtin::BI__builtin_usub_overflow:
5558 case Builtin::BI__builtin_usubl_overflow:
5559 case Builtin::BI__builtin_usubll_overflow:
5560 IntrinsicId = Intrinsic::usub_with_overflow;
5561 break;
5562 case Builtin::BI__builtin_umul_overflow:
5563 case Builtin::BI__builtin_umull_overflow:
5564 case Builtin::BI__builtin_umulll_overflow:
5565 IntrinsicId = Intrinsic::umul_with_overflow;
5566 break;
5567 case Builtin::BI__builtin_sadd_overflow:
5568 case Builtin::BI__builtin_saddl_overflow:
5569 case Builtin::BI__builtin_saddll_overflow:
5570 IntrinsicId = Intrinsic::sadd_with_overflow;
5571 break;
5572 case Builtin::BI__builtin_ssub_overflow:
5573 case Builtin::BI__builtin_ssubl_overflow:
5574 case Builtin::BI__builtin_ssubll_overflow:
5575 IntrinsicId = Intrinsic::ssub_with_overflow;
5576 break;
5577 case Builtin::BI__builtin_smul_overflow:
5578 case Builtin::BI__builtin_smull_overflow:
5579 case Builtin::BI__builtin_smulll_overflow:
5580 IntrinsicId = Intrinsic::smul_with_overflow;
5581 break;
5582 }
5583
5584
5585 llvm::Value *Carry;
5586 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
5587 Builder.CreateStore(Sum, SumOutPtr);
5588
5589 return RValue::get(Carry);
5590 }
5591 case Builtin::BIaddressof:
5592 case Builtin::BI__addressof:
5593 case Builtin::BI__builtin_addressof:
5594 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
5595 case Builtin::BI__builtin_function_start:
5596 return RValue::get(CGM.GetFunctionStart(
5597 E->getArg(0)->getAsBuiltinConstantDeclRef(CGM.getContext())));
5598 case Builtin::BI__builtin_operator_new:
5600 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
5601 case Builtin::BI__builtin_operator_delete:
5603 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
5604 return RValue::get(nullptr);
5605
5606 case Builtin::BI__builtin_is_aligned:
5607 return EmitBuiltinIsAligned(E);
5608 case Builtin::BI__builtin_align_up:
5609 return EmitBuiltinAlignTo(E, true);
5610 case Builtin::BI__builtin_align_down:
5611 return EmitBuiltinAlignTo(E, false);
5612
5613 case Builtin::BI__noop:
5614 // __noop always evaluates to an integer literal zero.
5615 return RValue::get(ConstantInt::get(IntTy, 0));
5616 case Builtin::BI__builtin_call_with_static_chain: {
5617 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
5618 const Expr *Chain = E->getArg(1);
5619 return EmitCall(Call->getCallee()->getType(),
5620 EmitCallee(Call->getCallee()), Call, ReturnValue,
5621 EmitScalarExpr(Chain));
5622 }
5623 case Builtin::BI_InterlockedExchange8:
5624 case Builtin::BI_InterlockedExchange16:
5625 case Builtin::BI_InterlockedExchange:
5626 case Builtin::BI_InterlockedExchangePointer:
5627 return RValue::get(
5629 case Builtin::BI_InterlockedCompareExchangePointer:
5630 return RValue::get(
5632 case Builtin::BI_InterlockedCompareExchangePointer_nf:
5633 return RValue::get(
5635 case Builtin::BI_InterlockedCompareExchange8:
5636 case Builtin::BI_InterlockedCompareExchange16:
5637 case Builtin::BI_InterlockedCompareExchange:
5638 case Builtin::BI_InterlockedCompareExchange64:
5639 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
5640 case Builtin::BI_InterlockedIncrement16:
5641 case Builtin::BI_InterlockedIncrement:
5642 return RValue::get(
5644 case Builtin::BI_InterlockedDecrement16:
5645 case Builtin::BI_InterlockedDecrement:
5646 return RValue::get(
5648 case Builtin::BI_InterlockedAnd8:
5649 case Builtin::BI_InterlockedAnd16:
5650 case Builtin::BI_InterlockedAnd:
5652 case Builtin::BI_InterlockedExchangeAdd8:
5653 case Builtin::BI_InterlockedExchangeAdd16:
5654 case Builtin::BI_InterlockedExchangeAdd:
5655 return RValue::get(
5657 case Builtin::BI_InterlockedExchangeSub8:
5658 case Builtin::BI_InterlockedExchangeSub16:
5659 case Builtin::BI_InterlockedExchangeSub:
5660 return RValue::get(
5662 case Builtin::BI_InterlockedOr8:
5663 case Builtin::BI_InterlockedOr16:
5664 case Builtin::BI_InterlockedOr:
5666 case Builtin::BI_InterlockedXor8:
5667 case Builtin::BI_InterlockedXor16:
5668 case Builtin::BI_InterlockedXor:
5670
5671 case Builtin::BI_bittest64:
5672 case Builtin::BI_bittest:
5673 case Builtin::BI_bittestandcomplement64:
5674 case Builtin::BI_bittestandcomplement:
5675 case Builtin::BI_bittestandreset64:
5676 case Builtin::BI_bittestandreset:
5677 case Builtin::BI_bittestandset64:
5678 case Builtin::BI_bittestandset:
5679 case Builtin::BI_interlockedbittestandreset:
5680 case Builtin::BI_interlockedbittestandreset64:
5681 case Builtin::BI_interlockedbittestandreset64_acq:
5682 case Builtin::BI_interlockedbittestandreset64_rel:
5683 case Builtin::BI_interlockedbittestandreset64_nf:
5684 case Builtin::BI_interlockedbittestandset64:
5685 case Builtin::BI_interlockedbittestandset64_acq:
5686 case Builtin::BI_interlockedbittestandset64_rel:
5687 case Builtin::BI_interlockedbittestandset64_nf:
5688 case Builtin::BI_interlockedbittestandset:
5689 case Builtin::BI_interlockedbittestandset_acq:
5690 case Builtin::BI_interlockedbittestandset_rel:
5691 case Builtin::BI_interlockedbittestandset_nf:
5692 case Builtin::BI_interlockedbittestandreset_acq:
5693 case Builtin::BI_interlockedbittestandreset_rel:
5694 case Builtin::BI_interlockedbittestandreset_nf:
5695 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
5696
5697 // These builtins exist to emit regular volatile loads and stores not
5698 // affected by the -fms-volatile setting.
5699 case Builtin::BI__iso_volatile_load8:
5700 case Builtin::BI__iso_volatile_load16:
5701 case Builtin::BI__iso_volatile_load32:
5702 case Builtin::BI__iso_volatile_load64:
5703 return RValue::get(EmitISOVolatileLoad(*this, E));
5704 case Builtin::BI__iso_volatile_store8:
5705 case Builtin::BI__iso_volatile_store16:
5706 case Builtin::BI__iso_volatile_store32:
5707 case Builtin::BI__iso_volatile_store64:
5708 return RValue::get(EmitISOVolatileStore(*this, E));
5709
5710 case Builtin::BI__builtin_ptrauth_sign_constant:
5711 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
5712
5713 case Builtin::BI__builtin_ptrauth_auth:
5714 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5715 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5716 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5717 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5718 case Builtin::BI__builtin_ptrauth_strip: {
5719 // Emit the arguments.
5721 for (auto argExpr : E->arguments())
5722 Args.push_back(EmitScalarExpr(argExpr));
5723
5724 // Cast the value to intptr_t, saving its original type.
5725 llvm::Type *OrigValueType = Args[0]->getType();
5726 if (OrigValueType->isPointerTy())
5727 Args[0] = Builder.CreatePtrToInt(Args[0], IntPtrTy);
5728
5729 switch (BuiltinID) {
5730 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5731 if (Args[4]->getType()->isPointerTy())
5732 Args[4] = Builder.CreatePtrToInt(Args[4], IntPtrTy);
5733 [[fallthrough]];
5734
5735 case Builtin::BI__builtin_ptrauth_auth:
5736 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5737 if (Args[2]->getType()->isPointerTy())
5738 Args[2] = Builder.CreatePtrToInt(Args[2], IntPtrTy);
5739 break;
5740
5741 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5742 if (Args[1]->getType()->isPointerTy())
5743 Args[1] = Builder.CreatePtrToInt(Args[1], IntPtrTy);
5744 break;
5745
5746 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5747 case Builtin::BI__builtin_ptrauth_strip:
5748 break;
5749 }
5750
5751 // Call the intrinsic.
5752 auto IntrinsicID = [&]() -> unsigned {
5753 switch (BuiltinID) {
5754 case Builtin::BI__builtin_ptrauth_auth:
5755 return Intrinsic::ptrauth_auth;
5756 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5757 return Intrinsic::ptrauth_resign;
5758 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5759 return Intrinsic::ptrauth_blend;
5760 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5761 return Intrinsic::ptrauth_sign_generic;
5762 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5763 return Intrinsic::ptrauth_sign;
5764 case Builtin::BI__builtin_ptrauth_strip:
5765 return Intrinsic::ptrauth_strip;
5766 }
5767 llvm_unreachable("bad ptrauth intrinsic");
5768 }();
5769 auto Intrinsic = CGM.getIntrinsic(IntrinsicID);
5770 llvm::Value *Result = EmitRuntimeCall(Intrinsic, Args);
5771
5772 if (BuiltinID != Builtin::BI__builtin_ptrauth_sign_generic_data &&
5773 BuiltinID != Builtin::BI__builtin_ptrauth_blend_discriminator &&
5774 OrigValueType->isPointerTy()) {
5775 Result = Builder.CreateIntToPtr(Result, OrigValueType);
5776 }
5777 return RValue::get(Result);
5778 }
5779
5780 case Builtin::BI__builtin_get_vtable_pointer: {
5781 const Expr *Target = E->getArg(0);
5782 QualType TargetType = Target->getType();
5783 const CXXRecordDecl *Decl = TargetType->getPointeeCXXRecordDecl();
5784 assert(Decl);
5785 auto ThisAddress = EmitPointerWithAlignment(Target);
5786 assert(ThisAddress.isValid());
5787 llvm::Value *VTablePointer =
5789 return RValue::get(VTablePointer);
5790 }
5791
5792 case Builtin::BI__exception_code:
5793 case Builtin::BI_exception_code:
5795 case Builtin::BI__exception_info:
5796 case Builtin::BI_exception_info:
5798 case Builtin::BI__abnormal_termination:
5799 case Builtin::BI_abnormal_termination:
5801 case Builtin::BI_setjmpex:
5802 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
5803 E->getArg(0)->getType()->isPointerType())
5804 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
5805 break;
5806 case Builtin::BI_setjmp:
5807 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
5808 E->getArg(0)->getType()->isPointerType()) {
5809 if (getTarget().getTriple().getArch() == llvm::Triple::x86)
5810 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
5811 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
5812 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
5813 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
5814 }
5815 break;
5816
5817 // C++ std:: builtins.
5818 case Builtin::BImove:
5819 case Builtin::BImove_if_noexcept:
5820 case Builtin::BIforward:
5821 case Builtin::BIforward_like:
5822 case Builtin::BIas_const:
5823 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
5824 case Builtin::BI__GetExceptionInfo: {
5825 if (llvm::GlobalVariable *GV =
5826 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
5827 return RValue::get(GV);
5828 break;
5829 }
5830
5831 case Builtin::BI__fastfail:
5833
5834 case Builtin::BI__builtin_coro_id:
5835 return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
5836 case Builtin::BI__builtin_coro_promise:
5837 return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
5838 case Builtin::BI__builtin_coro_resume:
5839 EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
5840 return RValue::get(nullptr);
5841 case Builtin::BI__builtin_coro_frame:
5842 return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
5843 case Builtin::BI__builtin_coro_noop:
5844 return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
5845 case Builtin::BI__builtin_coro_free:
5846 return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
5847 case Builtin::BI__builtin_coro_destroy:
5848 EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
5849 return RValue::get(nullptr);
5850 case Builtin::BI__builtin_coro_done:
5851 return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
5852 case Builtin::BI__builtin_coro_alloc:
5853 return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
5854 case Builtin::BI__builtin_coro_begin:
5855 return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
5856 case Builtin::BI__builtin_coro_end:
5857 return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
5858 case Builtin::BI__builtin_coro_suspend:
5859 return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
5860 case Builtin::BI__builtin_coro_size:
5861 return EmitCoroutineIntrinsic(E, Intrinsic::coro_size);
5862 case Builtin::BI__builtin_coro_align:
5863 return EmitCoroutineIntrinsic(E, Intrinsic::coro_align);
5864
5865 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
5866 case Builtin::BIread_pipe:
5867 case Builtin::BIwrite_pipe: {
5868 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5869 *Arg1 = EmitScalarExpr(E->getArg(1));
5870 CGOpenCLRuntime OpenCLRT(CGM);
5871 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5872 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5873
5874 // Type of the generic packet parameter.
5875 unsigned GenericAS =
5877 llvm::Type *I8PTy = llvm::PointerType::get(getLLVMContext(), GenericAS);
5878
5879 // Testing which overloaded version we should generate the call for.
5880 if (2U == E->getNumArgs()) {
5881 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
5882 : "__write_pipe_2";
5883 // Creating a generic function type to be able to call with any builtin or
5884 // user defined type.
5885 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
5886 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5887 Value *ACast = Builder.CreateAddrSpaceCast(Arg1, I8PTy);
5888 return RValue::get(
5889 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5890 {Arg0, ACast, PacketSize, PacketAlign}));
5891 } else {
5892 assert(4 == E->getNumArgs() &&
5893 "Illegal number of parameters to pipe function");
5894 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
5895 : "__write_pipe_4";
5896
5897 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
5898 Int32Ty, Int32Ty};
5899 Value *Arg2 = EmitScalarExpr(E->getArg(2)),
5900 *Arg3 = EmitScalarExpr(E->getArg(3));
5901 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5902 Value *ACast = Builder.CreateAddrSpaceCast(Arg3, I8PTy);
5903 // We know the third argument is an integer type, but we may need to cast
5904 // it to i32.
5905 if (Arg2->getType() != Int32Ty)
5906 Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
5907 return RValue::get(
5908 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5909 {Arg0, Arg1, Arg2, ACast, PacketSize, PacketAlign}));
5910 }
5911 }
5912 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
5913 // functions
5914 case Builtin::BIreserve_read_pipe:
5915 case Builtin::BIreserve_write_pipe:
5916 case Builtin::BIwork_group_reserve_read_pipe:
5917 case Builtin::BIwork_group_reserve_write_pipe:
5918 case Builtin::BIsub_group_reserve_read_pipe:
5919 case Builtin::BIsub_group_reserve_write_pipe: {
5920 // Composing the mangled name for the function.
5921 const char *Name;
5922 if (BuiltinID == Builtin::BIreserve_read_pipe)
5923 Name = "__reserve_read_pipe";
5924 else if (BuiltinID == Builtin::BIreserve_write_pipe)
5925 Name = "__reserve_write_pipe";
5926 else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
5927 Name = "__work_group_reserve_read_pipe";
5928 else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
5929 Name = "__work_group_reserve_write_pipe";
5930 else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
5931 Name = "__sub_group_reserve_read_pipe";
5932 else
5933 Name = "__sub_group_reserve_write_pipe";
5934
5935 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5936 *Arg1 = EmitScalarExpr(E->getArg(1));
5937 llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
5938 CGOpenCLRuntime OpenCLRT(CGM);
5939 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5940 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5941
5942 // Building the generic function prototype.
5943 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
5944 llvm::FunctionType *FTy =
5945 llvm::FunctionType::get(ReservedIDTy, ArgTys, false);
5946 // We know the second argument is an integer type, but we may need to cast
5947 // it to i32.
5948 if (Arg1->getType() != Int32Ty)
5949 Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
5950 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5951 {Arg0, Arg1, PacketSize, PacketAlign}));
5952 }
5953 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
5954 // functions
5955 case Builtin::BIcommit_read_pipe:
5956 case Builtin::BIcommit_write_pipe:
5957 case Builtin::BIwork_group_commit_read_pipe:
5958 case Builtin::BIwork_group_commit_write_pipe:
5959 case Builtin::BIsub_group_commit_read_pipe:
5960 case Builtin::BIsub_group_commit_write_pipe: {
5961 const char *Name;
5962 if (BuiltinID == Builtin::BIcommit_read_pipe)
5963 Name = "__commit_read_pipe";
5964 else if (BuiltinID == Builtin::BIcommit_write_pipe)
5965 Name = "__commit_write_pipe";
5966 else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
5967 Name = "__work_group_commit_read_pipe";
5968 else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
5969 Name = "__work_group_commit_write_pipe";
5970 else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
5971 Name = "__sub_group_commit_read_pipe";
5972 else
5973 Name = "__sub_group_commit_write_pipe";
5974
5975 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5976 *Arg1 = EmitScalarExpr(E->getArg(1));
5977 CGOpenCLRuntime OpenCLRT(CGM);
5978 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5979 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5980
5981 // Building the generic function prototype.
5982 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
5983 llvm::FunctionType *FTy = llvm::FunctionType::get(
5984 llvm::Type::getVoidTy(getLLVMContext()), ArgTys, false);
5985
5986 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5987 {Arg0, Arg1, PacketSize, PacketAlign}));
5988 }
5989 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
5990 case Builtin::BIget_pipe_num_packets:
5991 case Builtin::BIget_pipe_max_packets: {
5992 const char *BaseName;
5993 const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
5994 if (BuiltinID == Builtin::BIget_pipe_num_packets)
5995 BaseName = "__get_pipe_num_packets";
5996 else
5997 BaseName = "__get_pipe_max_packets";
5998 std::string Name = std::string(BaseName) +
5999 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
6000
6001 // Building the generic function prototype.
6002 Value *Arg0 = EmitScalarExpr(E->getArg(0));
6003 CGOpenCLRuntime OpenCLRT(CGM);
6004 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
6005 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
6006 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
6007 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6008
6009 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6010 {Arg0, PacketSize, PacketAlign}));
6011 }
6012
6013 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
6014 case Builtin::BIto_global:
6015 case Builtin::BIto_local:
6016 case Builtin::BIto_private: {
6017 auto Arg0 = EmitScalarExpr(E->getArg(0));
6018 auto NewArgT = llvm::PointerType::get(
6020 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
6021 auto NewRetT = llvm::PointerType::get(
6023 CGM.getContext().getTargetAddressSpace(
6025 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
6026 llvm::Value *NewArg;
6027 if (Arg0->getType()->getPointerAddressSpace() !=
6028 NewArgT->getPointerAddressSpace())
6029 NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
6030 else
6031 NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
6032 auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
6033 auto NewCall =
6034 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
6035 return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
6036 ConvertType(E->getType())));
6037 }
6038
6039 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
6040 // Table 6.13.17.1 specifies four overload forms of enqueue_kernel.
6041 // The code below expands the builtin call to a call to one of the following
6042 // functions that an OpenCL runtime library will have to provide:
6043 // __enqueue_kernel_basic
6044 // __enqueue_kernel_varargs
6045 // __enqueue_kernel_basic_events
6046 // __enqueue_kernel_events_varargs
6047 case Builtin::BIenqueue_kernel: {
6048 StringRef Name; // Generated function call name
6049 unsigned NumArgs = E->getNumArgs();
6050
6051 llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
6052 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6053 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6054
6055 llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
6056 llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
6057 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
6058 llvm::Value *Range = NDRangeL.getAddress().emitRawPointer(*this);
6059
6060 // FIXME: Look through the addrspacecast which may exist to the stack
6061 // temporary as a hack.
6062 //
6063 // This is hardcoding the assumed ABI of the target function. This assumes
6064 // direct passing for every argument except NDRange, which is assumed to be
6065 // byval or byref indirect passed.
6066 //
6067 // This should be fixed to query a signature from CGOpenCLRuntime, and go
6068 // through EmitCallArgs to get the correct target ABI.
6069 Range = Range->stripPointerCasts();
6070
6071 llvm::Type *RangePtrTy = Range->getType();
6072
6073 if (NumArgs == 4) {
6074 // The most basic form of the call with parameters:
6075 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
6076 Name = "__enqueue_kernel_basic";
6077 llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangePtrTy, GenericVoidPtrTy,
6078 GenericVoidPtrTy};
6079 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6080
6081 auto Info =
6082 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
6083 llvm::Value *Kernel =
6084 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6085 llvm::Value *Block =
6086 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6087
6088 auto RTCall = EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6089 {Queue, Flags, Range, Kernel, Block});
6090 return RValue::get(RTCall);
6091 }
6092 assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
6093
6094 // Create a temporary array to hold the sizes of local pointer arguments
6095 // for the block. \p First is the position of the first size argument.
6096 auto CreateArrayForSizeVar =
6097 [=](unsigned First) -> std::pair<llvm::Value *, llvm::Value *> {
6098 llvm::APInt ArraySize(32, NumArgs - First);
6100 getContext().getSizeType(), ArraySize, nullptr,
6102 /*IndexTypeQuals=*/0);
6103 auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
6104 llvm::Value *TmpPtr = Tmp.getPointer();
6105 // The EmitLifetime* pair expect a naked Alloca as their last argument,
6106 // however for cases where the default AS is not the Alloca AS, Tmp is
6107 // actually the Alloca ascasted to the default AS, hence the
6108 // stripPointerCasts()
6109 llvm::Value *Alloca = TmpPtr->stripPointerCasts();
6110 llvm::Value *ElemPtr;
6111 EmitLifetimeStart(Alloca);
6112 // Each of the following arguments specifies the size of the corresponding
6113 // argument passed to the enqueued block.
6114 auto *Zero = llvm::ConstantInt::get(IntTy, 0);
6115 for (unsigned I = First; I < NumArgs; ++I) {
6116 auto *Index = llvm::ConstantInt::get(IntTy, I - First);
6117 auto *GEP =
6118 Builder.CreateGEP(Tmp.getElementType(), Alloca, {Zero, Index});
6119 if (I == First)
6120 ElemPtr = GEP;
6121 auto *V =
6122 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
6123 Builder.CreateAlignedStore(
6124 V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
6125 }
6126 // Return the Alloca itself rather than a potential ascast as this is only
6127 // used by the paired EmitLifetimeEnd.
6128 return {ElemPtr, Alloca};
6129 };
6130
6131 // Could have events and/or varargs.
6132 if (E->getArg(3)->getType()->isBlockPointerType()) {
6133 // No events passed, but has variadic arguments.
6134 Name = "__enqueue_kernel_varargs";
6135 auto Info =
6136 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
6137 llvm::Value *Kernel =
6138 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6139 auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6140 auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(4);
6141
6142 // Create a vector of the arguments, as well as a constant value to
6143 // express to the runtime the number of variadic arguments.
6144 llvm::Value *const Args[] = {Queue, Flags,
6145 Range, Kernel,
6146 Block, ConstantInt::get(IntTy, NumArgs - 4),
6147 ElemPtr};
6148 llvm::Type *const ArgTys[] = {
6149 QueueTy, IntTy, RangePtrTy, GenericVoidPtrTy,
6150 GenericVoidPtrTy, IntTy, ElemPtr->getType()};
6151
6152 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6153 auto Call = RValue::get(
6154 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6155 EmitLifetimeEnd(TmpPtr);
6156 return Call;
6157 }
6158 // Any calls now have event arguments passed.
6159 if (NumArgs >= 7) {
6160 llvm::PointerType *PtrTy = llvm::PointerType::get(
6161 CGM.getLLVMContext(),
6162 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
6163
6164 llvm::Value *NumEvents =
6165 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
6166
6167 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
6168 // to be a null pointer constant (including `0` literal), we can take it
6169 // into account and emit null pointer directly.
6170 llvm::Value *EventWaitList = nullptr;
6171 if (E->getArg(4)->isNullPointerConstant(
6173 EventWaitList = llvm::ConstantPointerNull::get(PtrTy);
6174 } else {
6175 EventWaitList =
6176 E->getArg(4)->getType()->isArrayType()
6178 : EmitScalarExpr(E->getArg(4));
6179 // Convert to generic address space.
6180 EventWaitList = Builder.CreatePointerCast(EventWaitList, PtrTy);
6181 }
6182 llvm::Value *EventRet = nullptr;
6183 if (E->getArg(5)->isNullPointerConstant(
6185 EventRet = llvm::ConstantPointerNull::get(PtrTy);
6186 } else {
6187 EventRet =
6188 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), PtrTy);
6189 }
6190
6191 auto Info =
6192 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
6193 llvm::Value *Kernel =
6194 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6195 llvm::Value *Block =
6196 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6197
6198 std::vector<llvm::Type *> ArgTys = {
6199 QueueTy, Int32Ty, RangePtrTy, Int32Ty,
6200 PtrTy, PtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
6201
6202 std::vector<llvm::Value *> Args = {Queue, Flags, Range,
6203 NumEvents, EventWaitList, EventRet,
6204 Kernel, Block};
6205
6206 if (NumArgs == 7) {
6207 // Has events but no variadics.
6208 Name = "__enqueue_kernel_basic_events";
6209 llvm::FunctionType *FTy =
6210 llvm::FunctionType::get(Int32Ty, ArgTys, false);
6211 return RValue::get(
6212 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6213 }
6214 // Has event info and variadics
6215 // Pass the number of variadics to the runtime function too.
6216 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
6217 ArgTys.push_back(Int32Ty);
6218 Name = "__enqueue_kernel_events_varargs";
6219
6220 auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(7);
6221 Args.push_back(ElemPtr);
6222 ArgTys.push_back(ElemPtr->getType());
6223
6224 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6225 auto Call = RValue::get(
6226 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6227 EmitLifetimeEnd(TmpPtr);
6228 return Call;
6229 }
6230 llvm_unreachable("Unexpected enqueue_kernel signature");
6231 }
6232 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
6233 // parameter.
6234 case Builtin::BIget_kernel_work_group_size: {
6235 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6236 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6237 auto Info =
6238 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
6239 Value *Kernel =
6240 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6241 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6243 CGM.CreateRuntimeFunction(
6244 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
6245 false),
6246 "__get_kernel_work_group_size_impl"),
6247 {Kernel, Arg}));
6248 }
6249 case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
6250 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6251 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6252 auto Info =
6253 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
6254 Value *Kernel =
6255 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6256 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6258 CGM.CreateRuntimeFunction(
6259 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
6260 false),
6261 "__get_kernel_preferred_work_group_size_multiple_impl"),
6262 {Kernel, Arg}));
6263 }
6264 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
6265 case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
6266 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6267 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6268 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
6269 llvm::Value *NDRange = NDRangeL.getAddress().emitRawPointer(*this);
6270 auto Info =
6271 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
6272 Value *Kernel =
6273 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6274 Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6275 const char *Name =
6276 BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
6277 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
6278 : "__get_kernel_sub_group_count_for_ndrange_impl";
6280 CGM.CreateRuntimeFunction(
6281 llvm::FunctionType::get(
6282 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
6283 false),
6284 Name),
6285 {NDRange, Kernel, Block}));
6286 }
6287 case Builtin::BI__builtin_store_half:
6288 case Builtin::BI__builtin_store_halff: {
6289 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
6290 Value *Val = EmitScalarExpr(E->getArg(0));
6292 Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
6293 Builder.CreateStore(HalfVal, Address);
6294 return RValue::get(nullptr);
6295 }
6296 case Builtin::BI__builtin_load_half: {
6298 Value *HalfVal = Builder.CreateLoad(Address);
6299 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
6300 }
6301 case Builtin::BI__builtin_load_halff: {
6303 Value *HalfVal = Builder.CreateLoad(Address);
6304 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
6305 }
6306 case Builtin::BI__builtin_printf:
6307 case Builtin::BIprintf:
6308 if (getTarget().getTriple().isNVPTX() ||
6309 getTarget().getTriple().isAMDGCN() ||
6310 (getTarget().getTriple().isSPIRV() &&
6311 getTarget().getTriple().getVendor() == Triple::VendorType::AMD)) {
6312 if (getTarget().getTriple().isNVPTX())
6314 if ((getTarget().getTriple().isAMDGCN() ||
6315 getTarget().getTriple().isSPIRV()) &&
6316 getLangOpts().HIP)
6318 }
6319
6320 break;
6321 case Builtin::BI__builtin_canonicalize:
6322 case Builtin::BI__builtin_canonicalizef:
6323 case Builtin::BI__builtin_canonicalizef16:
6324 case Builtin::BI__builtin_canonicalizel:
6325 return RValue::get(
6326 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::canonicalize));
6327
6328 case Builtin::BI__builtin_thread_pointer: {
6329 if (!getContext().getTargetInfo().isTLSSupported())
6330 CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
6331
6332 return RValue::get(Builder.CreateIntrinsic(llvm::Intrinsic::thread_pointer,
6333 {GlobalsInt8PtrTy}, {}));
6334 }
6335 case Builtin::BI__builtin_os_log_format:
6336 return emitBuiltinOSLogFormat(*E);
6337
6338 case Builtin::BI__xray_customevent: {
6340 return RValue::getIgnored();
6341
6342 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
6344 return RValue::getIgnored();
6345
6346 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
6347 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
6348 return RValue::getIgnored();
6349
6350 Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
6351 auto FTy = F->getFunctionType();
6352 auto Arg0 = E->getArg(0);
6353 auto Arg0Val = EmitScalarExpr(Arg0);
6354 auto Arg0Ty = Arg0->getType();
6355 auto PTy0 = FTy->getParamType(0);
6356 if (PTy0 != Arg0Val->getType()) {
6357 if (Arg0Ty->isArrayType())
6358 Arg0Val = EmitArrayToPointerDecay(Arg0).emitRawPointer(*this);
6359 else
6360 Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
6361 }
6362 auto Arg1 = EmitScalarExpr(E->getArg(1));
6363 auto PTy1 = FTy->getParamType(1);
6364 if (PTy1 != Arg1->getType())
6365 Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
6366 return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
6367 }
6368
6369 case Builtin::BI__xray_typedevent: {
6370 // TODO: There should be a way to always emit events even if the current
6371 // function is not instrumented. Losing events in a stream can cripple
6372 // a trace.
6374 return RValue::getIgnored();
6375
6376 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
6378 return RValue::getIgnored();
6379
6380 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
6381 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
6382 return RValue::getIgnored();
6383
6384 Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
6385 auto FTy = F->getFunctionType();
6386 auto Arg0 = EmitScalarExpr(E->getArg(0));
6387 auto PTy0 = FTy->getParamType(0);
6388 if (PTy0 != Arg0->getType())
6389 Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
6390 auto Arg1 = E->getArg(1);
6391 auto Arg1Val = EmitScalarExpr(Arg1);
6392 auto Arg1Ty = Arg1->getType();
6393 auto PTy1 = FTy->getParamType(1);
6394 if (PTy1 != Arg1Val->getType()) {
6395 if (Arg1Ty->isArrayType())
6396 Arg1Val = EmitArrayToPointerDecay(Arg1).emitRawPointer(*this);
6397 else
6398 Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
6399 }
6400 auto Arg2 = EmitScalarExpr(E->getArg(2));
6401 auto PTy2 = FTy->getParamType(2);
6402 if (PTy2 != Arg2->getType())
6403 Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
6404 return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
6405 }
6406
6407 case Builtin::BI__builtin_ms_va_start:
6408 case Builtin::BI__builtin_ms_va_end:
6409 return RValue::get(
6411 BuiltinID == Builtin::BI__builtin_ms_va_start));
6412
6413 case Builtin::BI__builtin_ms_va_copy: {
6414 // Lower this manually. We can't reliably determine whether or not any
6415 // given va_copy() is for a Win64 va_list from the calling convention
6416 // alone, because it's legal to do this from a System V ABI function.
6417 // With opaque pointer types, we won't have enough information in LLVM
6418 // IR to determine this from the argument types, either. Best to do it
6419 // now, while we have enough information.
6420 Address DestAddr = EmitMSVAListRef(E->getArg(0));
6421 Address SrcAddr = EmitMSVAListRef(E->getArg(1));
6422
6423 DestAddr = DestAddr.withElementType(Int8PtrTy);
6424 SrcAddr = SrcAddr.withElementType(Int8PtrTy);
6425
6426 Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
6427 return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
6428 }
6429
6430 case Builtin::BI__builtin_get_device_side_mangled_name: {
6431 auto Name = CGM.getCUDARuntime().getDeviceSideName(
6432 cast<DeclRefExpr>(E->getArg(0)->IgnoreImpCasts())->getDecl());
6433 auto Str = CGM.GetAddrOfConstantCString(Name, "");
6434 return RValue::get(Str.getPointer());
6435 }
6436 }
6437
6438 // If this is an alias for a lib function (e.g. __builtin_sin), emit
6439 // the call using the normal call path, but using the unmangled
6440 // version of the function name.
6441 const auto &BI = getContext().BuiltinInfo;
6442 if (!shouldEmitBuiltinAsIR(BuiltinID, BI, *this) &&
6443 BI.isLibFunction(BuiltinID))
6444 return emitLibraryCall(*this, FD, E,
6445 CGM.getBuiltinLibFunction(FD, BuiltinID));
6446
6447 // If this is a predefined lib function (e.g. malloc), emit the call
6448 // using exactly the normal call path.
6449 if (BI.isPredefinedLibFunction(BuiltinID))
6450 return emitLibraryCall(*this, FD, E, CGM.getRawFunctionPointer(FD));
6451
6452 // Check that a call to a target specific builtin has the correct target
6453 // features.
6454 // This is down here to avoid non-target specific builtins, however, if
6455 // generic builtins start to require generic target features then we
6456 // can move this up to the beginning of the function.
6457 checkTargetFeatures(E, FD);
6458
6459 if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
6460 LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
6461
6462 // See if we have a target specific intrinsic.
6463 std::string Name = getContext().BuiltinInfo.getName(BuiltinID);
6464 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
6465 StringRef Prefix =
6466 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
6467 if (!Prefix.empty()) {
6468 IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin(Prefix.data(), Name);
6469 if (IntrinsicID == Intrinsic::not_intrinsic && Prefix == "spv" &&
6470 getTarget().getTriple().getOS() == llvm::Triple::OSType::AMDHSA)
6471 IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin("amdgcn", Name);
6472 // NOTE we don't need to perform a compatibility flag check here since the
6473 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
6474 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
6475 if (IntrinsicID == Intrinsic::not_intrinsic)
6476 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
6477 }
6478
6479 if (IntrinsicID != Intrinsic::not_intrinsic) {
6481
6482 // Find out if any arguments are required to be integer constant
6483 // expressions.
6484 unsigned ICEArguments = 0;
6486 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
6487 assert(Error == ASTContext::GE_None && "Should not codegen an error");
6488
6489 Function *F = CGM.getIntrinsic(IntrinsicID);
6490 llvm::FunctionType *FTy = F->getFunctionType();
6491
6492 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
6493 Value *ArgValue = EmitScalarOrConstFoldImmArg(ICEArguments, i, E);
6494 // If the intrinsic arg type is different from the builtin arg type
6495 // we need to do a bit cast.
6496 llvm::Type *PTy = FTy->getParamType(i);
6497 if (PTy != ArgValue->getType()) {
6498 // XXX - vector of pointers?
6499 if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
6500 if (PtrTy->getAddressSpace() !=
6501 ArgValue->getType()->getPointerAddressSpace()) {
6502 ArgValue = Builder.CreateAddrSpaceCast(
6503 ArgValue, llvm::PointerType::get(getLLVMContext(),
6504 PtrTy->getAddressSpace()));
6505 }
6506 }
6507
6508 // Cast vector type (e.g., v256i32) to x86_amx, this only happen
6509 // in amx intrinsics.
6510 if (PTy->isX86_AMXTy())
6511 ArgValue = Builder.CreateIntrinsic(Intrinsic::x86_cast_vector_to_tile,
6512 {ArgValue->getType()}, {ArgValue});
6513 else
6514 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
6515 }
6516
6517 Args.push_back(ArgValue);
6518 }
6519
6520 Value *V = Builder.CreateCall(F, Args);
6521 QualType BuiltinRetType = E->getType();
6522
6523 llvm::Type *RetTy = VoidTy;
6524 if (!BuiltinRetType->isVoidType())
6525 RetTy = ConvertType(BuiltinRetType);
6526
6527 if (RetTy != V->getType()) {
6528 // XXX - vector of pointers?
6529 if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
6530 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
6531 V = Builder.CreateAddrSpaceCast(
6532 V, llvm::PointerType::get(getLLVMContext(),
6533 PtrTy->getAddressSpace()));
6534 }
6535 }
6536
6537 // Cast x86_amx to vector type (e.g., v256i32), this only happen
6538 // in amx intrinsics.
6539 if (V->getType()->isX86_AMXTy())
6540 V = Builder.CreateIntrinsic(Intrinsic::x86_cast_tile_to_vector, {RetTy},
6541 {V});
6542 else
6543 V = Builder.CreateBitCast(V, RetTy);
6544 }
6545
6546 if (RetTy->isVoidTy())
6547 return RValue::get(nullptr);
6548
6549 return RValue::get(V);
6550 }
6551
6552 // Some target-specific builtins can have aggregate return values, e.g.
6553 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
6554 // ReturnValue to be non-null, so that the target-specific emission code can
6555 // always just emit into it.
6557 if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) {
6558 Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp");
6559 ReturnValue = ReturnValueSlot(DestPtr, false);
6560 }
6561
6562 // Now see if we can emit a target-specific builtin.
6563 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
6564 switch (EvalKind) {
6565 case TEK_Scalar:
6566 if (V->getType()->isVoidTy())
6567 return RValue::get(nullptr);
6568 return RValue::get(V);
6569 case TEK_Aggregate:
6570 return RValue::getAggregate(ReturnValue.getAddress(),
6571 ReturnValue.isVolatile());
6572 case TEK_Complex:
6573 llvm_unreachable("No current target builtin returns complex");
6574 }
6575 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
6576 }
6577
6578 // EmitHLSLBuiltinExpr will check getLangOpts().HLSL
6579 if (Value *V = EmitHLSLBuiltinExpr(BuiltinID, E, ReturnValue)) {
6580 switch (EvalKind) {
6581 case TEK_Scalar:
6582 if (V->getType()->isVoidTy())
6583 return RValue::get(nullptr);
6584 return RValue::get(V);
6585 case TEK_Aggregate:
6586 return RValue::getAggregate(ReturnValue.getAddress(),
6587 ReturnValue.isVolatile());
6588 case TEK_Complex:
6589 llvm_unreachable("No current hlsl builtin returns complex");
6590 }
6591 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
6592 }
6593
6594 if (getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice)
6595 return EmitHipStdParUnsupportedBuiltin(this, FD);
6596
6597 ErrorUnsupported(E, "builtin function");
6598
6599 // Unknown builtin, for now just dump it out and return undef.
6600 return GetUndefRValue(E->getType());
6601}
6602
6603namespace {
6604struct BuiltinAlignArgs {
6605 llvm::Value *Src = nullptr;
6606 llvm::Type *SrcType = nullptr;
6607 llvm::Value *Alignment = nullptr;
6608 llvm::Value *Mask = nullptr;
6609 llvm::IntegerType *IntType = nullptr;
6610
6611 BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
6612 QualType AstType = E->getArg(0)->getType();
6613 if (AstType->isArrayType())
6614 Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(CGF);
6615 else
6616 Src = CGF.EmitScalarExpr(E->getArg(0));
6617 SrcType = Src->getType();
6618 if (SrcType->isPointerTy()) {
6619 IntType = IntegerType::get(
6620 CGF.getLLVMContext(),
6621 CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType));
6622 } else {
6623 assert(SrcType->isIntegerTy());
6624 IntType = cast<llvm::IntegerType>(SrcType);
6625 }
6626 Alignment = CGF.EmitScalarExpr(E->getArg(1));
6627 Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment");
6628 auto *One = llvm::ConstantInt::get(IntType, 1);
6629 Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
6630 }
6631};
6632} // namespace
6633
6634/// Generate (x & (y-1)) == 0.
6636 BuiltinAlignArgs Args(E, *this);
6637 llvm::Value *SrcAddress = Args.Src;
6638 if (Args.SrcType->isPointerTy())
6639 SrcAddress =
6640 Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr");
6641 return RValue::get(Builder.CreateICmpEQ(
6642 Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"),
6643 llvm::Constant::getNullValue(Args.IntType), "is_aligned"));
6644}
6645
6646/// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
6647/// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
6648/// llvm.ptrmask intrinsic (with a GEP before in the align_up case).
6650 BuiltinAlignArgs Args(E, *this);
6651 llvm::Value *SrcForMask = Args.Src;
6652 if (AlignUp) {
6653 // When aligning up we have to first add the mask to ensure we go over the
6654 // next alignment value and then align down to the next valid multiple.
6655 // By adding the mask, we ensure that align_up on an already aligned
6656 // value will not change the value.
6657 if (Args.Src->getType()->isPointerTy()) {
6658 if (getLangOpts().PointerOverflowDefined)
6659 SrcForMask =
6660 Builder.CreateGEP(Int8Ty, SrcForMask, Args.Mask, "over_boundary");
6661 else
6662 SrcForMask = EmitCheckedInBoundsGEP(Int8Ty, SrcForMask, Args.Mask,
6663 /*SignedIndices=*/true,
6664 /*isSubtraction=*/false,
6665 E->getExprLoc(), "over_boundary");
6666 } else {
6667 SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
6668 }
6669 }
6670 // Invert the mask to only clear the lower bits.
6671 llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
6672 llvm::Value *Result = nullptr;
6673 if (Args.Src->getType()->isPointerTy()) {
6674 Result = Builder.CreateIntrinsic(
6675 Intrinsic::ptrmask, {Args.SrcType, Args.IntType},
6676 {SrcForMask, InvertedMask}, nullptr, "aligned_result");
6677 } else {
6678 Result = Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
6679 }
6680 assert(Result->getType() == Args.SrcType);
6681 return RValue::get(Result);
6682}
#define V(N, I)
static char bitActionToX86BTCode(BitTest::ActionKind A)
static Value * EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering SuccessOrdering)
static void emitSincosBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static CanQualType getOSLogArgType(ASTContext &C, int Size)
Get the argument type for arguments to os_log_helper.
static Value * EmitOverflowCheckedAbs(CodeGenFunction &CGF, const CallExpr *E, bool SanitizeOverflow)
static llvm::Value * EmitBitCountExpr(CodeGenFunction &CGF, const Expr *E)
static Value * tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID, Value *V)
static bool areBOSTypesCompatible(int From, int To)
Checks if using the result of __builtin_object_size(p, From) in place of __builtin_object_size(p,...
static std::pair< llvm::Value *, llvm::Value * > GetCountFieldAndIndex(CodeGenFunction &CGF, const MemberExpr *ME, const FieldDecl *ArrayFD, const FieldDecl *CountFD, const Expr *Idx, llvm::IntegerType *ResType, bool IsSigned)
Value * EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, QualType T, llvm::Type *ResultType)
static bool TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty, llvm::SmallPtrSetImpl< const Decl * > &Seen)
static Value * EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)
static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind, const CallExpr *E)
MSVC handles setjmp a bit differently on different platforms.
#define MUTATE_LDBL(func)
static Value * emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty)
Determine if the specified type requires laundering by checking if it is a dynamic class type or cont...
static Value * EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E)
static Value * EmitTargetArchBuiltinExpr(CodeGenFunction *CGF, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
Definition CGBuiltin.cpp:72
static Value * emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, Instruction::BinaryOps Op, bool Invert=false)
Utility to insert an atomic instruction based Intrinsic::ID and the expression node,...
static bool HasNoIndirectArgumentsOrResults(CGFunctionInfo const &FnInfo)
Checks no arguments or results are passed indirectly in the ABI (i.e.
Value * EmitToInt(CodeGenFunction &CGF, llvm::Value *V, QualType T, llvm::IntegerType *IntType)
Emit the conversions required to turn the given value into an integer of the given size.
static Value * emitBinaryExpMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID, Intrinsic::ID ConstrainedIntrinsicID)
static llvm::Value * EmitBitTestIntrinsic(CodeGenFunction &CGF, unsigned BuiltinID, const CallExpr *E)
Emit a _bittest* intrinsic.
static Value * EmitSignBit(CodeGenFunction &CGF, Value *V)
Emit the computation of the sign bit for a floating point value.
static Value * EmitFAbs(CodeGenFunction &CGF, Value *V)
EmitFAbs - Emit a call to @llvm.fabs().
static llvm::Value * EmitPositiveResultOrZero(CodeGenFunction &CGF, llvm::Value *Res, llvm::Value *Index, llvm::IntegerType *ResType, bool IsSigned)
static bool shouldEmitBuiltinAsIR(unsigned BuiltinID, const Builtin::Context &BI, const CodeGenFunction &CGF)
Some builtins do not have library implementation on some targets and are instead emitted as LLVM IRs ...
Definition CGBuiltin.cpp:48
static bool isSpecialUnsignedMultiplySignedResult(unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, WidthAndSignedness ResultInfo)
static llvm::Value * getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType)
static Value * emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static RValue EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, const clang::Expr *Op2, WidthAndSignedness Op2Info, const clang::Expr *ResultArg, QualType ResultQTy, WidthAndSignedness ResultInfo)
Emit a checked mixed-sign multiply.
static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID)
Value * MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E, bool ReturnBool)
Utility to insert an atomic cmpxchg instruction.
static RValue EmitBinaryAtomic(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E)
static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size, Align AlignmentInBytes)
static Value * EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering SuccessOrdering=AtomicOrdering::SequentiallyConsistent)
This function should be invoked to emit atomic cmpxchg for Microsoft's _InterlockedCompareExchange* i...
static bool isSpecialMixedSignMultiply(unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, WidthAndSignedness ResultInfo)
Determine if a binop is a checked mixed-sign multiply we can specialize.
static Value * emitFrexpBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static llvm::Value * emitModfBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static Value * EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E)
static const FieldDecl * FindFlexibleArrayMemberField(CodeGenFunction &CGF, ASTContext &Ctx, const RecordDecl *RD)
Find a struct's flexible array member.
static Value * EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E)
static RValue EmitHipStdParUnsupportedBuiltin(CodeGenFunction *CGF, const FunctionDecl *FD)
static llvm::Value * EmitX86BitTestIntrinsic(CodeGenFunction &CGF, BitTest BT, const CallExpr *E, Value *BitBase, Value *BitPos)
static RValue EmitCheckedUnsignedMultiplySignedResult(CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, const clang::Expr *Op2, WidthAndSignedness Op2Info, const clang::Expr *ResultArg, QualType ResultQTy, WidthAndSignedness ResultInfo)
Address CheckAtomicAlignment(CodeGenFunction &CGF, const CallExpr *E)
static Value * EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E)
static llvm::AtomicOrdering getBitTestAtomicOrdering(BitTest::InterlockingKind I)
static bool GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FD, int64_t &Offset)
Calculate the offset of a struct field.
Value * MakeBinaryAtomicValue(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, AtomicOrdering Ordering)
Utility to insert an atomic instruction based on Intrinsic::ID and the expression node.
llvm::Value * EmitOverflowIntrinsic(CodeGenFunction &CGF, const Intrinsic::ID IntrinsicID, llvm::Value *X, llvm::Value *Y, llvm::Value *&Carry)
Emit a call to llvm.
static Value * EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW)
static Value * EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)
llvm::Value * emitBuiltinWithOneOverloadedType(clang::CodeGen::CodeGenFunction &CGF, const clang::CallExpr *E, unsigned IntrinsicID, llvm::StringRef Name="")
Definition CGBuiltin.h:63
static RValue emitUnaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitLibraryCall(CIRGenFunction &cgf, const FunctionDecl *fd, const CallExpr *e, mlir::Operation *calleeValue)
static WidthAndSignedness getIntegerWidthAndSignedness(const clang::ASTContext &astContext, const clang::QualType type)
static struct WidthAndSignedness EncompassingIntegerType(ArrayRef< struct WidthAndSignedness > types)
TokenType getType() const
Returns the token's type, e.g.
FormatToken * Next
The next token in the unwrapped line.
#define X(type, name)
Definition Value.h:97
static unsigned getCharWidth(tok::TokenKind kind, const TargetInfo &Target)
llvm::MachO::Record Record
Definition MachO.h:31
SanitizerHandler
static QualType getPointeeType(const MemRegion *R)
__DEVICE__ float modf(float __x, float *__iptr)
__DEVICE__ double nan(const char *)
APSInt & getInt()
Definition APValue.h:489
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType VoidPtrTy
IdentifierTable & Idents
Definition ASTContext.h:790
Builtin::Context & BuiltinInfo
Definition ASTContext.h:792
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, const Expr *SizeExpr, ArraySizeModifier ASM, unsigned IndexTypeQuals) const
Return the unique reference to the type for a constant array of the specified element type.
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CanQualType VoidTy
QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error, unsigned *IntegerConstantArgs=nullptr) const
Return the type for the specified builtin.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:909
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
@ GE_None
No error.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
QualType getElementType() const
Definition TypeBase.h:3735
static std::unique_ptr< AtomicScopeModel > create(AtomicScopeModelKind K)
Create an atomic scope model by AtomicScopeModelKind.
Definition SyncScope.h:298
static bool isCommaOp(Opcode Opc)
Definition Expr.h:4141
Expr * getRHS() const
Definition Expr.h:4090
Holds information about both target-independent and target-specific builtins, allowing easy queries b...
Definition Builtins.h:235
bool shouldGenerateFPMathIntrinsic(unsigned BuiltinID, llvm::Triple Trip, std::optional< bool > ErrnoOverwritten, bool MathErrnoEnabled, bool HasOptNoneAttr, bool IsOptimizationEnabled) const
Determine whether we can generate LLVM intrinsics for the given builtin ID, based on whether it has s...
Definition Builtins.cpp:225
bool isConstWithoutErrnoAndExceptions(unsigned ID) const
Return true if this function has no side effects and doesn't read memory, except for possibly errno o...
Definition Builtins.h:412
std::string getName(unsigned ID) const
Return the identifier name for the specified builtin, e.g.
Definition Builtins.cpp:80
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2943
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3147
bool hasStoredFPFeatures() const
Definition Expr.h:3102
SourceLocation getBeginLoc() const
Definition Expr.h:3277
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3126
Expr * getCallee()
Definition Expr.h:3090
FPOptionsOverride getFPFeatures() const
Definition Expr.h:3242
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3134
arg_range arguments()
Definition Expr.h:3195
CastKind getCastKind() const
Definition Expr.h:3720
Expr * getSubExpr()
Definition Expr.h:3726
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
Address withAlignment(CharUnits NewAlignment) const
Return address with different alignment, but same pointer and element type.
Definition Address.h:269
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition CGBuilder.h:140
llvm::StoreInst * CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr, CharUnits Align, bool IsVolatile=false)
Definition CGBuilder.h:147
llvm::AtomicRMWInst * CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Ordering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition CGBuilder.h:184
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition CGBuilder.h:402
llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(Address Addr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition CGBuilder.h:173
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:112
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition CGBuilder.h:132
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition CGBuilder.h:350
All available information about a concrete callee.
Definition CGCall.h:63
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition CGCall.h:137
llvm::DILocation * CreateTrapFailureMessageFor(llvm::DebugLoc TrapLocation, StringRef Category, StringRef FailureMsg)
Create a debug location from TrapLocation that adds an artificial inline frame where the frame name i...
CGFunctionInfo - Class to encapsulate the information about a function definition.
MutableArrayRef< ArgInfo > arguments()
virtual llvm::Value * getPipeElemAlign(const Expr *PipeArg)
virtual llvm::Value * getPipeElemSize(const Expr *PipeArg)
llvm::StructType * getLLVMType() const
Return the "complete object" LLVM type associated with this record.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition CGCall.h:274
void add(RValue rvalue, QualType type)
Definition CGCall.h:302
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E)
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Definition CGClass.cpp:2822
RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E)
RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID)
llvm::Value * EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, const CallExpr *E)
SanitizerSet SanOpts
Sanitizers enabled for this function.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
llvm::Value * GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Definition CGExpr.cpp:1189
llvm::Type * ConvertType(QualType T)
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
BuiltinCheckKind
Specifies which type of sanitizer check to apply when handling a particular builtin.
llvm::Value * EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition SystemZ.cpp:39
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
Definition CGCall.cpp:5093
llvm::Value * EmitSEHAbnormalTermination()
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
Produce the code to do a retain.
Definition CGObjC.cpp:2328
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
llvm::Value * EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart)
Emits a call to an LLVM variable-argument intrinsic, either llvm.va_start or llvm....
llvm::Value * EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition AMDGPU.cpp:415
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3903
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:6980
RValue emitBuiltinOSLogFormat(const CallExpr &E)
Emit IR for __builtin_os_log_format.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
llvm::Function * generateBuiltinOSLogHelperFunction(const analyze_os_log::OSLogBufferLayout &Layout, CharUnits BufferAlignment)
const LangOptions & getLangOpts() const
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
llvm::Value * EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
Definition RISCV.cpp:1073
llvm::Value * EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind)
Emits an argument for a call to a builtin.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3793
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
Definition CGCall.cpp:4592
const TargetInfo & getTarget() const
RValue emitRotate(const CallExpr *E, bool IsRotateRight)
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location, const AnnotateAttr *Attr)
Emit an annotation call (intrinsic).
llvm::Value * EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
Definition ARM.cpp:2760
CGCallee EmitCallee(const Expr *E)
Definition CGExpr.cpp:6374
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression, potentially with an active ...
llvm::Value * EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition ARM.cpp:7911
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:226
llvm::Value * EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition PPC.cpp:73
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:4051
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
Definition CGDecl.cpp:1356
llvm::MDNode * buildAllocToken(QualType AllocType)
Build metadata used by the AllocToken instrumentation.
Definition CGExpr.cpp:1301
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2216
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition CGCall.cpp:5249
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp)
Emit IR for __builtin_align_up/__builtin_align_down.
void EmitLifetimeEnd(llvm::Value *Addr)
Definition CGDecl.cpp:1368
llvm::Value * EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
llvm::Value * EmitDirectXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition DirectX.cpp:22
llvm::Value * EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm::Triple::ArchType Arch)
Definition ARM.cpp:5102
llvm::Value * EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4481
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
Definition CGDecl.cpp:2346
llvm::Value * EmitSPIRVBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition SPIR.cpp:22
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
Address EmitVAListRef(const Expr *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
Definition CGExpr.cpp:1593
RValue EmitBuiltinIsAligned(const CallExpr *E)
Emit IR for __builtin_is_aligned.
RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, const CallExpr *TheCallExpr, bool IsDelete)
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition Hexagon.cpp:77
CodeGenTypes & getTypes() const
llvm::Value * EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition X86.cpp:793
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1576
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:189
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
Definition CGExpr.cpp:4466
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4393
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2250
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
Definition CGExpr.cpp:1232
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
llvm::Value * EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition NVPTX.cpp:428
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
Definition CGExpr.cpp:4381
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1692
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
EmitTargetBuiltinExpr - Emit the given builtin call.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
llvm::Value * EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Given a number of pointers, inform the optimizer that they're being intrinsically used up until this ...
Definition CGObjC.cpp:2167
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:652
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
llvm::Constant * getBuiltinLibFunction(const FunctionDecl *FD, unsigned BuiltinID)
Given a builtin id for a function like "__builtin_fabsf", return a Function* for "fabsf".
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
const llvm::Triple & getTriple() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition CGCall.cpp:1703
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
LValue - This represents an lvalue references.
Definition CGValue.h:183
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition CGValue.h:373
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
static RValue getIgnored()
Definition CGValue.h:94
static RValue get(llvm::Value *V)
Definition CGValue.h:99
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition CGValue.h:126
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition CGValue.h:109
An abstract representation of an aligned address.
Definition Address.h:42
static RawAddress invalid()
Definition Address.h:61
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition CGCall.h:379
virtual bool supportsLibCall() const
supportsLibCall - Query to whether or not target supports all lib calls.
Definition TargetInfo.h:78
virtual llvm::Value * encodeReturnAddress(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Performs the code-generation required to convert the address of an instruction into a return address ...
Definition TargetInfo.h:176
virtual llvm::Value * decodeReturnAddress(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Performs the code-generation required to convert a return address as stored by the system into the ac...
Definition TargetInfo.h:166
virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const
Determines the DWARF register number for the stack pointer, for exception-handling purposes.
Definition TargetInfo.h:148
virtual llvm::Value * testFPKind(llvm::Value *V, unsigned BuiltinID, CGBuilderTy &Builder, CodeGenModule &CGM) const
Performs a target specific test of a floating point value for things like IsNaN, Infinity,...
Definition TargetInfo.h:185
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3276
Represents a concrete matrix type with constant number of rows and columns.
Definition TypeBase.h:4388
Represents a sugar type with __counted_by or __sized_by annotations, including their _or_null variant...
Definition TypeBase.h:3437
DynamicCountPointerKind getKind() const
Definition TypeBase.h:3467
static bool isFlexibleArrayMemberLike(const ASTContext &Context, const Decl *D, QualType Ty, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, bool IgnoreTemplateOrMacroSubstitution)
Whether it resembles a flexible array member.
Definition DeclBase.cpp:459
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition DeclBase.h:593
FunctionDecl * getAsFunction() LLVM_READONLY
Returns the function itself, or the templated function if this is a function template.
Definition DeclBase.cpp:273
bool hasAttr() const
Definition DeclBase.h:577
Concrete class used by the front-end to report problems and issues.
Definition Diagnostic.h:232
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3116
Expr * IgnoreParenCasts() LLVM_READONLY
Skip past any parentheses and casts which might surround this expression until reaching a fixed point...
Definition Expr.cpp:3094
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3089
bool EvaluateAsFloat(llvm::APFloat &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsFloat - Return true if this is a constant which we can fold and convert to a floating point...
bool isPRValue() const
Definition Expr.h:285
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
Definition Expr.h:835
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3669
std::optional< std::string > tryEvaluateString(ASTContext &Ctx) const
If the current Expr can be evaluated to a pointer to a null-terminated constant string,...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3069
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Definition Expr.cpp:4047
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
bool tryEvaluateObjectSize(uint64_t &Result, ASTContext &Ctx, unsigned Type) const
If the current Expr is a pointer, this will try to statically determine the number of bytes available...
const ValueDecl * getAsBuiltinConstantDeclRef(const ASTContext &Context) const
If this expression is an unambiguous reference to a single declaration, in the style of __builtin_fun...
Definition Expr.cpp:225
Represents difference between two FPOptions values.
LangOptions::FPExceptionModeKind getExceptionMode() const
Represents a member of a struct/union/class.
Definition Decl.h:3160
const FieldDecl * findCountedByField() const
Find the FieldDecl specified in a FAM's "counted_by" attribute.
Definition Decl.cpp:4855
Represents a function declaration or definition.
Definition Decl.h:2000
const ParmVarDecl * getParamDecl(unsigned i) const
Definition Decl.h:2797
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition Decl.cpp:3762
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5269
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
IdentifierInfo & get(StringRef Name)
Return the identifier token info for the specified named identifier.
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition Decl.cpp:5597
@ FPE_Ignore
Assume that floating-point exceptions are masked.
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3364
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3447
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
std::string getNameAsString() const
Get a human-readable name for the declaration, even if it is one of the special kinds of names (C++ c...
Definition Decl.h:317
const Expr * getSubExpr() const
Definition Expr.h:2199
PipeType - OpenCL20.
Definition TypeBase.h:8120
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3329
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8386
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8428
Represents a struct/union/class.
Definition Decl.h:4324
field_range fields() const
Definition Decl.h:4527
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
Encodes a location in the source.
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:350
bool isUnion() const
Definition Decl.h:3925
Exposes information about the current target.
Definition TargetInfo.h:226
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool isBigEndian() const
virtual bool checkArithmeticFenceSupported() const
Controls if __arithmetic_fence is supported in the targeted backend.
unsigned getSuitableAlign() const
Return the alignment that is the largest alignment ever used for any scalar/SIMD data type on the tar...
Definition TargetInfo.h:747
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
The base class of the type hierarchy.
Definition TypeBase.h:1833
bool isBlockPointerType() const
Definition TypeBase.h:8559
bool isVoidType() const
Definition TypeBase.h:8901
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2206
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isArrayType() const
Definition TypeBase.h:8638
bool isCountAttributedType() const
Definition Type.cpp:742
bool isPointerType() const
Definition TypeBase.h:8539
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8945
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9188
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1910
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9121
Expr * getSubExpr() const
Definition Expr.h:2285
QualType getType() const
Definition Decl.h:723
QualType getType() const
Definition Value.cpp:237
Represents a GCC generic vector type.
Definition TypeBase.h:4176
QualType getElementType() const
Definition TypeBase.h:4190
SmallVector< OSLogBufferItem, 4 > Items
Definition OSLog.h:113
unsigned char getNumArgsByte() const
Definition OSLog.h:148
unsigned char getSummaryByte() const
Definition OSLog.h:139
Defines the clang::TargetInfo interface.
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
Definition CGValue.h:155
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:146
llvm::Constant * initializationPatternFor(CodeGenModule &, llvm::Type *)
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
constexpr XRayInstrMask Typed
Definition XRayInstr.h:42
constexpr XRayInstrMask Custom
Definition XRayInstr.h:41
bool computeOSLogBufferLayout(clang::ASTContext &Ctx, const clang::CallExpr *E, OSLogBufferLayout &layout)
Definition OSLog.cpp:192
bool Mul(InterpState &S, CodePtr OpPC)
Definition Interp.h:356
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ Success
Annotation was successful.
Definition Parser.h:65
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Expr * Cond
};
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
const FunctionProtoType * T
LangAS
Defines the address space values used by the address space qualifier of QualType.
SyncScope
Defines sync scope values used internally by clang.
Definition SyncScope.h:42
llvm::StringRef getAsString(SyncScope S)
Definition SyncScope.h:62
U cast(CodeGen::Address addr)
Definition Address.h:327
@ Other
Other implicit parameter.
Definition Decl.h:1746
unsigned long uint64_t
long int64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
void clear(SanitizerMask K=SanitizerKind::All)
Disable the sanitizers specified in K.
Definition Sanitizers.h:195
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition Sanitizers.h:187
#define sinh(__x)
Definition tgmath.h:373
#define asin(__x)
Definition tgmath.h:112
#define scalbln(__x, __y)
Definition tgmath.h:1182
#define sqrt(__x)
Definition tgmath.h:520
#define acos(__x)
Definition tgmath.h:83
#define fmin(__x, __y)
Definition tgmath.h:780
#define exp(__x)
Definition tgmath.h:431
#define ilogb(__x)
Definition tgmath.h:851
#define copysign(__x, __y)
Definition tgmath.h:618
#define erf(__x)
Definition tgmath.h:636
#define atanh(__x)
Definition tgmath.h:228
#define remquo(__x, __y, __z)
Definition tgmath.h:1111
#define nextafter(__x, __y)
Definition tgmath.h:1055
#define frexp(__x, __y)
Definition tgmath.h:816
#define asinh(__x)
Definition tgmath.h:199
#define erfc(__x)
Definition tgmath.h:653
#define atan2(__x, __y)
Definition tgmath.h:566
#define nexttoward(__x, __y)
Definition tgmath.h:1073
#define hypot(__x, __y)
Definition tgmath.h:833
#define exp2(__x)
Definition tgmath.h:670
#define sin(__x)
Definition tgmath.h:286
#define cbrt(__x)
Definition tgmath.h:584
#define log2(__x)
Definition tgmath.h:970
#define llround(__x)
Definition tgmath.h:919
#define cosh(__x)
Definition tgmath.h:344
#define trunc(__x)
Definition tgmath.h:1216
#define fmax(__x, __y)
Definition tgmath.h:762
#define ldexp(__x, __y)
Definition tgmath.h:868
#define acosh(__x)
Definition tgmath.h:170
#define tgamma(__x)
Definition tgmath.h:1199
#define scalbn(__x, __y)
Definition tgmath.h:1165
#define round(__x)
Definition tgmath.h:1148
#define fmod(__x, __y)
Definition tgmath.h:798
#define llrint(__x)
Definition tgmath.h:902
#define tan(__x)
Definition tgmath.h:315
#define cos(__x)
Definition tgmath.h:257
#define log10(__x)
Definition tgmath.h:936
#define fabs(__x)
Definition tgmath.h:549
#define pow(__x, __y)
Definition tgmath.h:490
#define log1p(__x)
Definition tgmath.h:953
#define rint(__x)
Definition tgmath.h:1131
#define expm1(__x)
Definition tgmath.h:687
#define remainder(__x, __y)
Definition tgmath.h:1090
#define fdim(__x, __y)
Definition tgmath.h:704
#define lgamma(__x)
Definition tgmath.h:885
#define tanh(__x)
Definition tgmath.h:402
#define lrint(__x)
Definition tgmath.h:1004
#define atan(__x)
Definition tgmath.h:141
#define floor(__x)
Definition tgmath.h:722
#define ceil(__x)
Definition tgmath.h:601
#define log(__x)
Definition tgmath.h:460
#define logb(__x)
Definition tgmath.h:987
#define nearbyint(__x)
Definition tgmath.h:1038
#define lround(__x)
Definition tgmath.h:1021
#define fma(__x, __y, __z)
Definition tgmath.h:742