clang 22.0.0git
CGBuiltin.cpp
Go to the documentation of this file.
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGBuiltin.h"
14#include "ABIInfo.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGDebugInfo.h"
18#include "CGObjCRuntime.h"
19#include "CGOpenCLRuntime.h"
20#include "CGRecordLayout.h"
21#include "CGValue.h"
22#include "CodeGenFunction.h"
23#include "CodeGenModule.h"
24#include "ConstantEmitter.h"
25#include "PatternInit.h"
26#include "TargetInfo.h"
27#include "clang/AST/OSLog.h"
31#include "llvm/IR/InlineAsm.h"
32#include "llvm/IR/Instruction.h"
33#include "llvm/IR/Intrinsics.h"
34#include "llvm/IR/IntrinsicsX86.h"
35#include "llvm/IR/MatrixBuilder.h"
36#include "llvm/Support/ConvertUTF.h"
37#include "llvm/Support/ScopedPrinter.h"
38#include <optional>
39#include <utility>
40
41using namespace clang;
42using namespace CodeGen;
43using namespace llvm;
44
45/// Some builtins do not have library implementation on some targets and
46/// are instead emitted as LLVM IRs by some target builtin emitters.
47/// FIXME: Remove this when library support is added
48static bool shouldEmitBuiltinAsIR(unsigned BuiltinID,
49 const Builtin::Context &BI,
50 const CodeGenFunction &CGF) {
51 if (!CGF.CGM.getLangOpts().MathErrno &&
55 switch (BuiltinID) {
56 default:
57 return false;
58 case Builtin::BIlogbf:
59 case Builtin::BI__builtin_logbf:
60 case Builtin::BIlogb:
61 case Builtin::BI__builtin_logb:
62 case Builtin::BIscalbnf:
63 case Builtin::BI__builtin_scalbnf:
64 case Builtin::BIscalbn:
65 case Builtin::BI__builtin_scalbn:
66 return true;
67 }
68 }
69 return false;
70}
71
73 unsigned BuiltinID, const CallExpr *E,
74 ReturnValueSlot ReturnValue,
75 llvm::Triple::ArchType Arch) {
76 // When compiling in HipStdPar mode we have to be conservative in rejecting
77 // target specific features in the FE, and defer the possible error to the
78 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
79 // referenced by an accelerator executable function, we emit an error.
80 // Returning nullptr here leads to the builtin being handled in
81 // EmitStdParUnsupportedBuiltin.
82 if (CGF->getLangOpts().HIPStdPar && CGF->getLangOpts().CUDAIsDevice &&
83 Arch != CGF->getTarget().getTriple().getArch())
84 return nullptr;
85
86 switch (Arch) {
87 case llvm::Triple::arm:
88 case llvm::Triple::armeb:
89 case llvm::Triple::thumb:
90 case llvm::Triple::thumbeb:
91 return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch);
92 case llvm::Triple::aarch64:
93 case llvm::Triple::aarch64_32:
94 case llvm::Triple::aarch64_be:
95 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
96 case llvm::Triple::bpfeb:
97 case llvm::Triple::bpfel:
98 return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
99 case llvm::Triple::dxil:
100 return CGF->EmitDirectXBuiltinExpr(BuiltinID, E);
101 case llvm::Triple::x86:
102 case llvm::Triple::x86_64:
103 return CGF->EmitX86BuiltinExpr(BuiltinID, E);
104 case llvm::Triple::ppc:
105 case llvm::Triple::ppcle:
106 case llvm::Triple::ppc64:
107 case llvm::Triple::ppc64le:
108 return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
109 case llvm::Triple::r600:
110 case llvm::Triple::amdgcn:
111 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
112 case llvm::Triple::systemz:
113 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
114 case llvm::Triple::nvptx:
115 case llvm::Triple::nvptx64:
116 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
117 case llvm::Triple::wasm32:
118 case llvm::Triple::wasm64:
119 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
120 case llvm::Triple::hexagon:
121 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
122 case llvm::Triple::riscv32:
123 case llvm::Triple::riscv64:
124 return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue);
125 case llvm::Triple::spirv32:
126 case llvm::Triple::spirv64:
127 if (CGF->getTarget().getTriple().getOS() == llvm::Triple::OSType::AMDHSA)
128 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
129 [[fallthrough]];
130 case llvm::Triple::spirv:
131 return CGF->EmitSPIRVBuiltinExpr(BuiltinID, E);
132 default:
133 return nullptr;
134 }
135}
136
138 const CallExpr *E,
140 if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
141 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
143 this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
144 ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
145 }
146
147 return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue,
148 getTarget().getTriple().getArch());
149}
150
151static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
152 Align AlignmentInBytes) {
153 ConstantInt *Byte;
154 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
156 // Nothing to initialize.
157 return;
159 Byte = CGF.Builder.getInt8(0x00);
160 break;
162 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
163 Byte = llvm::dyn_cast<llvm::ConstantInt>(
164 initializationPatternFor(CGF.CGM, Int8));
165 break;
166 }
167 }
168 if (CGF.CGM.stopAutoInit())
169 return;
170 auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
171 I->addAnnotationMetadata("auto-init");
172}
173
174/// getBuiltinLibFunction - Given a builtin id for a function like
175/// "__builtin_fabsf", return a Function* for "fabsf".
177 unsigned BuiltinID) {
178 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
179
180 // Get the name, skip over the __builtin_ prefix (if necessary). We may have
181 // to build this up so provide a small stack buffer to handle the vast
182 // majority of names.
184 GlobalDecl D(FD);
185
186 // TODO: This list should be expanded or refactored after all GCC-compatible
187 // std libcall builtins are implemented.
188 static SmallDenseMap<unsigned, StringRef, 64> F128Builtins{
189 {Builtin::BI__builtin___fprintf_chk, "__fprintf_chkieee128"},
190 {Builtin::BI__builtin___printf_chk, "__printf_chkieee128"},
191 {Builtin::BI__builtin___snprintf_chk, "__snprintf_chkieee128"},
192 {Builtin::BI__builtin___sprintf_chk, "__sprintf_chkieee128"},
193 {Builtin::BI__builtin___vfprintf_chk, "__vfprintf_chkieee128"},
194 {Builtin::BI__builtin___vprintf_chk, "__vprintf_chkieee128"},
195 {Builtin::BI__builtin___vsnprintf_chk, "__vsnprintf_chkieee128"},
196 {Builtin::BI__builtin___vsprintf_chk, "__vsprintf_chkieee128"},
197 {Builtin::BI__builtin_fprintf, "__fprintfieee128"},
198 {Builtin::BI__builtin_printf, "__printfieee128"},
199 {Builtin::BI__builtin_snprintf, "__snprintfieee128"},
200 {Builtin::BI__builtin_sprintf, "__sprintfieee128"},
201 {Builtin::BI__builtin_vfprintf, "__vfprintfieee128"},
202 {Builtin::BI__builtin_vprintf, "__vprintfieee128"},
203 {Builtin::BI__builtin_vsnprintf, "__vsnprintfieee128"},
204 {Builtin::BI__builtin_vsprintf, "__vsprintfieee128"},
205 {Builtin::BI__builtin_fscanf, "__fscanfieee128"},
206 {Builtin::BI__builtin_scanf, "__scanfieee128"},
207 {Builtin::BI__builtin_sscanf, "__sscanfieee128"},
208 {Builtin::BI__builtin_vfscanf, "__vfscanfieee128"},
209 {Builtin::BI__builtin_vscanf, "__vscanfieee128"},
210 {Builtin::BI__builtin_vsscanf, "__vsscanfieee128"},
211 {Builtin::BI__builtin_nexttowardf128, "__nexttowardieee128"},
212 };
213
214 // The AIX library functions frexpl, ldexpl, and modfl are for 128-bit
215 // IBM 'long double' (i.e. __ibm128). Map to the 'double' versions
216 // if it is 64-bit 'long double' mode.
217 static SmallDenseMap<unsigned, StringRef, 4> AIXLongDouble64Builtins{
218 {Builtin::BI__builtin_frexpl, "frexp"},
219 {Builtin::BI__builtin_ldexpl, "ldexp"},
220 {Builtin::BI__builtin_modfl, "modf"},
221 };
222
223 // If the builtin has been declared explicitly with an assembler label,
224 // use the mangled name. This differs from the plain label on platforms
225 // that prefix labels.
226 if (FD->hasAttr<AsmLabelAttr>())
227 Name = getMangledName(D);
228 else {
229 // TODO: This mutation should also be applied to other targets other than
230 // PPC, after backend supports IEEE 128-bit style libcalls.
231 if (getTriple().isPPC64() &&
232 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad() &&
233 F128Builtins.contains(BuiltinID))
234 Name = F128Builtins[BuiltinID];
235 else if (getTriple().isOSAIX() &&
236 &getTarget().getLongDoubleFormat() ==
237 &llvm::APFloat::IEEEdouble() &&
238 AIXLongDouble64Builtins.contains(BuiltinID))
239 Name = AIXLongDouble64Builtins[BuiltinID];
240 else
241 Name = Context.BuiltinInfo.getName(BuiltinID).substr(10);
242 }
243
244 llvm::FunctionType *Ty =
245 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
246
247 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
248}
249
250/// Emit the conversions required to turn the given value into an
251/// integer of the given size.
252Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
253 QualType T, llvm::IntegerType *IntType) {
254 V = CGF.EmitToMemory(V, T);
255
256 if (V->getType()->isPointerTy())
257 return CGF.Builder.CreatePtrToInt(V, IntType);
258
259 assert(V->getType() == IntType);
260 return V;
261}
262
263Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
264 QualType T, llvm::Type *ResultType) {
265 V = CGF.EmitFromMemory(V, T);
266
267 if (ResultType->isPointerTy())
268 return CGF.Builder.CreateIntToPtr(V, ResultType);
269
270 assert(V->getType() == ResultType);
271 return V;
272}
273
275 ASTContext &Ctx = CGF.getContext();
276 Address Ptr = CGF.EmitPointerWithAlignment(E->getArg(0));
277 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
278 unsigned Bytes = Ptr.getElementType()->isPointerTy()
280 : DL.getTypeStoreSize(Ptr.getElementType());
281 unsigned Align = Ptr.getAlignment().getQuantity();
282 if (Align % Bytes != 0) {
283 DiagnosticsEngine &Diags = CGF.CGM.getDiags();
284 Diags.Report(E->getBeginLoc(), diag::warn_sync_op_misaligned);
285 // Force address to be at least naturally-aligned.
286 return Ptr.withAlignment(CharUnits::fromQuantity(Bytes));
287 }
288 return Ptr;
289}
290
291/// Utility to insert an atomic instruction based on Intrinsic::ID
292/// and the expression node.
294 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
295 AtomicOrdering Ordering) {
296
297 QualType T = E->getType();
298 assert(E->getArg(0)->getType()->isPointerType());
300 E->getArg(0)->getType()->getPointeeType()));
301 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
302
303 Address DestAddr = CheckAtomicAlignment(CGF, E);
304
305 llvm::IntegerType *IntType = llvm::IntegerType::get(
306 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
307
308 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1));
309 llvm::Type *ValueType = Val->getType();
310 Val = EmitToInt(CGF, Val, T, IntType);
311
312 llvm::Value *Result =
313 CGF.Builder.CreateAtomicRMW(Kind, DestAddr, Val, Ordering);
314 return EmitFromInt(CGF, Result, T, ValueType);
315}
316
318 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
320
321 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
322 LValue LV = CGF.MakeAddrLValue(Addr, E->getArg(0)->getType());
323 LV.setNontemporal(true);
324 CGF.EmitStoreOfScalar(Val, LV, false);
325 return nullptr;
326}
327
330
331 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
332 LV.setNontemporal(true);
333 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
334}
335
337 llvm::AtomicRMWInst::BinOp Kind,
338 const CallExpr *E) {
339 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
340}
341
342/// Utility to insert an atomic instruction based Intrinsic::ID and
343/// the expression node, where the return value is the result of the
344/// operation.
346 llvm::AtomicRMWInst::BinOp Kind,
347 const CallExpr *E,
348 Instruction::BinaryOps Op,
349 bool Invert = false) {
350 QualType T = E->getType();
351 assert(E->getArg(0)->getType()->isPointerType());
353 E->getArg(0)->getType()->getPointeeType()));
354 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
355
356 Address DestAddr = CheckAtomicAlignment(CGF, E);
357
358 llvm::IntegerType *IntType = llvm::IntegerType::get(
359 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
360
361 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1));
362 llvm::Type *ValueType = Val->getType();
363 Val = EmitToInt(CGF, Val, T, IntType);
364
365 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
366 Kind, DestAddr, Val, llvm::AtomicOrdering::SequentiallyConsistent);
367 Result = CGF.Builder.CreateBinOp(Op, Result, Val);
368 if (Invert)
369 Result =
370 CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
371 llvm::ConstantInt::getAllOnesValue(IntType));
372 Result = EmitFromInt(CGF, Result, T, ValueType);
373 return RValue::get(Result);
374}
375
376/// Utility to insert an atomic cmpxchg instruction.
377///
378/// @param CGF The current codegen function.
379/// @param E Builtin call expression to convert to cmpxchg.
380/// arg0 - address to operate on
381/// arg1 - value to compare with
382/// arg2 - new value
383/// @param ReturnBool Specifies whether to return success flag of
384/// cmpxchg result or the old value.
385///
386/// @returns result of cmpxchg, according to ReturnBool
387///
388/// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
389/// invoke the function EmitAtomicCmpXchgForMSIntrin.
391 bool ReturnBool) {
392 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
393 Address DestAddr = CheckAtomicAlignment(CGF, E);
394
395 llvm::IntegerType *IntType = llvm::IntegerType::get(
396 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
397
398 Value *Cmp = CGF.EmitScalarExpr(E->getArg(1));
399 llvm::Type *ValueType = Cmp->getType();
400 Cmp = EmitToInt(CGF, Cmp, T, IntType);
401 Value *New = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
402
404 DestAddr, Cmp, New, llvm::AtomicOrdering::SequentiallyConsistent,
405 llvm::AtomicOrdering::SequentiallyConsistent);
406 if (ReturnBool)
407 // Extract boolean success flag and zext it to int.
408 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
409 CGF.ConvertType(E->getType()));
410 else
411 // Extract old value and emit it using the same type as compare value.
412 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
413 ValueType);
414}
415
416/// This function should be invoked to emit atomic cmpxchg for Microsoft's
417/// _InterlockedCompareExchange* intrinsics which have the following signature:
418/// T _InterlockedCompareExchange(T volatile *Destination,
419/// T Exchange,
420/// T Comparand);
421///
422/// Whereas the llvm 'cmpxchg' instruction has the following syntax:
423/// cmpxchg *Destination, Comparand, Exchange.
424/// So we need to swap Comparand and Exchange when invoking
425/// CreateAtomicCmpXchg. That is the reason we could not use the above utility
426/// function MakeAtomicCmpXchgValue since it expects the arguments to be
427/// already swapped.
428
429static
431 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
432 assert(E->getArg(0)->getType()->isPointerType());
434 E->getType(), E->getArg(0)->getType()->getPointeeType()));
435 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
436 E->getArg(1)->getType()));
437 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
438 E->getArg(2)->getType()));
439
440 Address DestAddr = CheckAtomicAlignment(CGF, E);
441
442 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
443 auto *RTy = Exchange->getType();
444
445 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
446
447 if (RTy->isPointerTy()) {
448 Exchange = CGF.Builder.CreatePtrToInt(Exchange, CGF.IntPtrTy);
449 Comparand = CGF.Builder.CreatePtrToInt(Comparand, CGF.IntPtrTy);
450 }
451
452 // For Release ordering, the failure ordering should be Monotonic.
453 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
454 AtomicOrdering::Monotonic :
455 SuccessOrdering;
456
457 // The atomic instruction is marked volatile for consistency with MSVC. This
458 // blocks the few atomics optimizations that LLVM has. If we want to optimize
459 // _Interlocked* operations in the future, we will have to remove the volatile
460 // marker.
461 auto *CmpXchg = CGF.Builder.CreateAtomicCmpXchg(
462 DestAddr, Comparand, Exchange, SuccessOrdering, FailureOrdering);
463 CmpXchg->setVolatile(true);
464
465 auto *Result = CGF.Builder.CreateExtractValue(CmpXchg, 0);
466 if (RTy->isPointerTy()) {
467 Result = CGF.Builder.CreateIntToPtr(Result, RTy);
468 }
469
470 return Result;
471}
472
473// 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
474// prototyped like this:
475//
476// unsigned char _InterlockedCompareExchange128...(
477// __int64 volatile * _Destination,
478// __int64 _ExchangeHigh,
479// __int64 _ExchangeLow,
480// __int64 * _ComparandResult);
481//
482// Note that Destination is assumed to be at least 16-byte aligned, despite
483// being typed int64.
484
486 const CallExpr *E,
487 AtomicOrdering SuccessOrdering) {
488 assert(E->getNumArgs() == 4);
489 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
490 llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1));
491 llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2));
492 Address ComparandAddr = CGF.EmitPointerWithAlignment(E->getArg(3));
493
494 assert(DestPtr->getType()->isPointerTy());
495 assert(!ExchangeHigh->getType()->isPointerTy());
496 assert(!ExchangeLow->getType()->isPointerTy());
497
498 // For Release ordering, the failure ordering should be Monotonic.
499 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
500 ? AtomicOrdering::Monotonic
501 : SuccessOrdering;
502
503 // Convert to i128 pointers and values. Alignment is also overridden for
504 // destination pointer.
505 llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128);
506 Address DestAddr(DestPtr, Int128Ty,
508 ComparandAddr = ComparandAddr.withElementType(Int128Ty);
509
510 // (((i128)hi) << 64) | ((i128)lo)
511 ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
512 ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty);
513 ExchangeHigh =
514 CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64));
515 llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow);
516
517 // Load the comparand for the instruction.
518 llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandAddr);
519
520 auto *CXI = CGF.Builder.CreateAtomicCmpXchg(DestAddr, Comparand, Exchange,
521 SuccessOrdering, FailureOrdering);
522
523 // The atomic instruction is marked volatile for consistency with MSVC. This
524 // blocks the few atomics optimizations that LLVM has. If we want to optimize
525 // _Interlocked* operations in the future, we will have to remove the volatile
526 // marker.
527 CXI->setVolatile(true);
528
529 // Store the result as an outparameter.
530 CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0),
531 ComparandAddr);
532
533 // Get the success boolean and zero extend it to i8.
534 Value *Success = CGF.Builder.CreateExtractValue(CXI, 1);
535 return CGF.Builder.CreateZExt(Success, CGF.Int8Ty);
536}
537
539 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
540 assert(E->getArg(0)->getType()->isPointerType());
541
542 auto *IntTy = CGF.ConvertType(E->getType());
543 Address DestAddr = CheckAtomicAlignment(CGF, E);
544 auto *Result = CGF.Builder.CreateAtomicRMW(
545 AtomicRMWInst::Add, DestAddr, ConstantInt::get(IntTy, 1), Ordering);
546 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
547}
548
550 CodeGenFunction &CGF, const CallExpr *E,
551 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
552 assert(E->getArg(0)->getType()->isPointerType());
553
554 auto *IntTy = CGF.ConvertType(E->getType());
555 Address DestAddr = CheckAtomicAlignment(CGF, E);
556 auto *Result = CGF.Builder.CreateAtomicRMW(
557 AtomicRMWInst::Sub, DestAddr, ConstantInt::get(IntTy, 1), Ordering);
558 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
559}
560
561// Build a plain volatile load.
563 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
564 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
565 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
566 llvm::Type *ITy =
567 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
568 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize);
569 Load->setAtomic(llvm::AtomicOrdering::Monotonic);
570 Load->setVolatile(true);
571 return Load;
572}
573
574// Build a plain volatile store.
576 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
577 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
578 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
579 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
580 llvm::StoreInst *Store =
581 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
582 Store->setAtomic(llvm::AtomicOrdering::Monotonic);
583 Store->setVolatile(true);
584 return Store;
585}
586
587// Emit a simple mangled intrinsic that has 1 argument and a return type
588// matching the argument type. Depending on mode, this may be a constrained
589// floating-point intrinsic.
591 const CallExpr *E, unsigned IntrinsicID,
592 unsigned ConstrainedIntrinsicID) {
593 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
594
595 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
596 if (CGF.Builder.getIsFPConstrained()) {
597 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
598 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
599 } else {
600 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
601 return CGF.Builder.CreateCall(F, Src0);
602 }
603}
604
605// Emit an intrinsic that has 2 operands of the same type as its result.
606// Depending on mode, this may be a constrained floating-point intrinsic.
608 const CallExpr *E, unsigned IntrinsicID,
609 unsigned ConstrainedIntrinsicID) {
610 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
611 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
612
613 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
614 if (CGF.Builder.getIsFPConstrained()) {
615 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
616 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
617 } else {
618 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
619 return CGF.Builder.CreateCall(F, { Src0, Src1 });
620 }
621}
622
623// Has second type mangled argument.
624static Value *
626 Intrinsic::ID IntrinsicID,
627 Intrinsic::ID ConstrainedIntrinsicID) {
628 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
629 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
630
631 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
632 if (CGF.Builder.getIsFPConstrained()) {
633 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
634 {Src0->getType(), Src1->getType()});
635 return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1});
636 }
637
638 Function *F =
639 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), Src1->getType()});
640 return CGF.Builder.CreateCall(F, {Src0, Src1});
641}
642
643// Emit an intrinsic that has 3 operands of the same type as its result.
644// Depending on mode, this may be a constrained floating-point intrinsic.
646 const CallExpr *E, unsigned IntrinsicID,
647 unsigned ConstrainedIntrinsicID) {
648 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
649 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
650 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
651
652 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
653 if (CGF.Builder.getIsFPConstrained()) {
654 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
655 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
656 } else {
657 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
658 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
659 }
660}
661
662// Emit an intrinsic that has overloaded integer result and fp operand.
663static Value *
665 unsigned IntrinsicID,
666 unsigned ConstrainedIntrinsicID) {
667 llvm::Type *ResultType = CGF.ConvertType(E->getType());
668 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
669
670 if (CGF.Builder.getIsFPConstrained()) {
671 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
672 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
673 {ResultType, Src0->getType()});
674 return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
675 } else {
676 Function *F =
677 CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
678 return CGF.Builder.CreateCall(F, Src0);
679 }
680}
681
683 Intrinsic::ID IntrinsicID) {
684 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
685 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
686
687 QualType IntPtrTy = E->getArg(1)->getType()->getPointeeType();
688 llvm::Type *IntTy = CGF.ConvertType(IntPtrTy);
689 llvm::Function *F =
690 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), IntTy});
691 llvm::Value *Call = CGF.Builder.CreateCall(F, Src0);
692
693 llvm::Value *Exp = CGF.Builder.CreateExtractValue(Call, 1);
694 LValue LV = CGF.MakeNaturalAlignAddrLValue(Src1, IntPtrTy);
695 CGF.EmitStoreOfScalar(Exp, LV);
696
697 return CGF.Builder.CreateExtractValue(Call, 0);
698}
699
700static void emitSincosBuiltin(CodeGenFunction &CGF, const CallExpr *E,
701 Intrinsic::ID IntrinsicID) {
702 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(0));
703 llvm::Value *Dest0 = CGF.EmitScalarExpr(E->getArg(1));
704 llvm::Value *Dest1 = CGF.EmitScalarExpr(E->getArg(2));
705
706 llvm::Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {Val->getType()});
707 llvm::Value *Call = CGF.Builder.CreateCall(F, Val);
708
709 llvm::Value *SinResult = CGF.Builder.CreateExtractValue(Call, 0);
710 llvm::Value *CosResult = CGF.Builder.CreateExtractValue(Call, 1);
711
712 QualType DestPtrType = E->getArg(1)->getType()->getPointeeType();
713 LValue SinLV = CGF.MakeNaturalAlignAddrLValue(Dest0, DestPtrType);
714 LValue CosLV = CGF.MakeNaturalAlignAddrLValue(Dest1, DestPtrType);
715
716 llvm::StoreInst *StoreSin =
717 CGF.Builder.CreateStore(SinResult, SinLV.getAddress());
718 llvm::StoreInst *StoreCos =
719 CGF.Builder.CreateStore(CosResult, CosLV.getAddress());
720
721 // Mark the two stores as non-aliasing with each other. The order of stores
722 // emitted by this builtin is arbitrary, enforcing a particular order will
723 // prevent optimizations later on.
724 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
725 MDNode *Domain = MDHelper.createAnonymousAliasScopeDomain();
726 MDNode *AliasScope = MDHelper.createAnonymousAliasScope(Domain);
727 MDNode *AliasScopeList = MDNode::get(Call->getContext(), AliasScope);
728 StoreSin->setMetadata(LLVMContext::MD_alias_scope, AliasScopeList);
729 StoreCos->setMetadata(LLVMContext::MD_noalias, AliasScopeList);
730}
731
732static llvm::Value *emitModfBuiltin(CodeGenFunction &CGF, const CallExpr *E,
733 Intrinsic::ID IntrinsicID) {
734 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(0));
735 llvm::Value *IntPartDest = CGF.EmitScalarExpr(E->getArg(1));
736
737 llvm::Value *Call =
738 CGF.Builder.CreateIntrinsic(IntrinsicID, {Val->getType()}, Val);
739
740 llvm::Value *FractionalResult = CGF.Builder.CreateExtractValue(Call, 0);
741 llvm::Value *IntegralResult = CGF.Builder.CreateExtractValue(Call, 1);
742
743 QualType DestPtrType = E->getArg(1)->getType()->getPointeeType();
744 LValue IntegralLV = CGF.MakeNaturalAlignAddrLValue(IntPartDest, DestPtrType);
745 CGF.EmitStoreOfScalar(IntegralResult, IntegralLV);
746
747 return FractionalResult;
748}
749
750/// EmitFAbs - Emit a call to @llvm.fabs().
752 Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
753 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
754 Call->setDoesNotAccessMemory();
755 return Call;
756}
757
758/// Emit the computation of the sign bit for a floating point value. Returns
759/// the i1 sign bit value.
761 LLVMContext &C = CGF.CGM.getLLVMContext();
762
763 llvm::Type *Ty = V->getType();
764 int Width = Ty->getPrimitiveSizeInBits();
765 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
766 V = CGF.Builder.CreateBitCast(V, IntTy);
767 if (Ty->isPPC_FP128Ty()) {
768 // We want the sign bit of the higher-order double. The bitcast we just
769 // did works as if the double-double was stored to memory and then
770 // read as an i128. The "store" will put the higher-order double in the
771 // lower address in both little- and big-Endian modes, but the "load"
772 // will treat those bits as a different part of the i128: the low bits in
773 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
774 // we need to shift the high bits down to the low before truncating.
775 Width >>= 1;
776 if (CGF.getTarget().isBigEndian()) {
777 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
778 V = CGF.Builder.CreateLShr(V, ShiftCst);
779 }
780 // We are truncating value in order to extract the higher-order
781 // double, which we will be using to extract the sign from.
782 IntTy = llvm::IntegerType::get(C, Width);
783 V = CGF.Builder.CreateTrunc(V, IntTy);
784 }
785 Value *Zero = llvm::Constant::getNullValue(IntTy);
786 return CGF.Builder.CreateICmpSLT(V, Zero);
787}
788
789/// Checks no arguments or results are passed indirectly in the ABI (i.e. via a
790/// hidden pointer). This is used to check annotating FP libcalls (that could
791/// set `errno`) with "int" TBAA metadata is safe. If any floating-point
792/// arguments are passed indirectly, setup for the call could be incorrectly
793/// optimized out.
795 auto IsIndirect = [&](ABIArgInfo const &info) {
796 return info.isIndirect() || info.isIndirectAliased() || info.isInAlloca();
797 };
798 return !IsIndirect(FnInfo.getReturnInfo()) &&
799 llvm::none_of(FnInfo.arguments(),
800 [&](CGFunctionInfoArgInfo const &ArgInfo) {
801 return IsIndirect(ArgInfo.info);
802 });
803}
804
806 const CallExpr *E, llvm::Constant *calleeValue) {
807 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
808 CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
809 llvm::CallBase *callOrInvoke = nullptr;
810 CGFunctionInfo const *FnInfo = nullptr;
811 RValue Call =
812 CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot(),
813 /*Chain=*/nullptr, &callOrInvoke, &FnInfo);
814
815 if (unsigned BuiltinID = FD->getBuiltinID()) {
816 // Check whether a FP math builtin function, such as BI__builtin_expf
817 ASTContext &Context = CGF.getContext();
818 bool ConstWithoutErrnoAndExceptions =
820 // Restrict to target with errno, for example, MacOS doesn't set errno.
821 // TODO: Support builtin function with complex type returned, eg: cacosh
822 if (ConstWithoutErrnoAndExceptions && CGF.CGM.getLangOpts().MathErrno &&
823 !CGF.Builder.getIsFPConstrained() && Call.isScalar() &&
825 // Emit "int" TBAA metadata on FP math libcalls.
826 clang::QualType IntTy = Context.IntTy;
827 TBAAAccessInfo TBAAInfo = CGF.CGM.getTBAAAccessInfo(IntTy);
828 CGF.CGM.DecorateInstructionWithTBAA(callOrInvoke, TBAAInfo);
829 }
830 }
831 return Call;
832}
833
834/// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
835/// depending on IntrinsicID.
836///
837/// \arg CGF The current codegen function.
838/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
839/// \arg X The first argument to the llvm.*.with.overflow.*.
840/// \arg Y The second argument to the llvm.*.with.overflow.*.
841/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
842/// \returns The result (i.e. sum/product) returned by the intrinsic.
844 const Intrinsic::ID IntrinsicID,
845 llvm::Value *X, llvm::Value *Y,
846 llvm::Value *&Carry) {
847 // Make sure we have integers of the same width.
848 assert(X->getType() == Y->getType() &&
849 "Arguments must be the same type. (Did you forget to make sure both "
850 "arguments have the same integer width?)");
851
852 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
853 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
854 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
855 return CGF.Builder.CreateExtractValue(Tmp, 0);
856}
857
858namespace {
859 struct WidthAndSignedness {
860 unsigned Width;
861 bool Signed;
862 };
863}
864
865static WidthAndSignedness
867 const clang::QualType Type) {
868 assert(Type->isIntegerType() && "Given type is not an integer.");
869 unsigned Width = context.getIntWidth(Type);
871 return {Width, Signed};
872}
873
874// Given one or more integer types, this function produces an integer type that
875// encompasses them: any value in one of the given types could be expressed in
876// the encompassing type.
877static struct WidthAndSignedness
878EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
879 assert(Types.size() > 0 && "Empty list of types.");
880
881 // If any of the given types is signed, we must return a signed type.
882 bool Signed = false;
883 for (const auto &Type : Types) {
884 Signed |= Type.Signed;
885 }
886
887 // The encompassing type must have a width greater than or equal to the width
888 // of the specified types. Additionally, if the encompassing type is signed,
889 // its width must be strictly greater than the width of any unsigned types
890 // given.
891 unsigned Width = 0;
892 for (const auto &Type : Types) {
893 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
894 if (Width < MinWidth) {
895 Width = MinWidth;
896 }
897 }
898
899 return {Width, Signed};
900}
901
902Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
903 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
904 return Builder.CreateCall(CGM.getIntrinsic(inst, {ArgValue->getType()}),
905 ArgValue);
906}
907
908/// Checks if using the result of __builtin_object_size(p, @p From) in place of
909/// __builtin_object_size(p, @p To) is correct
910static bool areBOSTypesCompatible(int From, int To) {
911 // Note: Our __builtin_object_size implementation currently treats Type=0 and
912 // Type=2 identically. Encoding this implementation detail here may make
913 // improving __builtin_object_size difficult in the future, so it's omitted.
914 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
915}
916
917static llvm::Value *
918getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
919 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
920}
921
922llvm::Value *
923CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
924 llvm::IntegerType *ResType,
925 llvm::Value *EmittedE,
926 bool IsDynamic) {
927 uint64_t ObjectSize;
928 if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
929 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
930 return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
931}
932
933namespace {
934
935/// StructFieldAccess is a simple visitor class to grab the first MemberExpr
936/// from an Expr. It records any ArraySubscriptExpr we meet along the way.
937class StructFieldAccess
938 : public ConstStmtVisitor<StructFieldAccess, const Expr *> {
939 bool AddrOfSeen = false;
940
941public:
942 const Expr *ArrayIndex = nullptr;
943 QualType ArrayElementTy;
944
945 const Expr *VisitMemberExpr(const MemberExpr *E) {
946 if (AddrOfSeen && E->getType()->isArrayType())
947 // Avoid forms like '&ptr->array'.
948 return nullptr;
949 return E;
950 }
951
952 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
953 if (ArrayIndex)
954 // We don't support multiple subscripts.
955 return nullptr;
956
957 AddrOfSeen = false; // '&ptr->array[idx]' is okay.
958 ArrayIndex = E->getIdx();
959 ArrayElementTy = E->getBase()->getType();
960 return Visit(E->getBase());
961 }
962 const Expr *VisitCastExpr(const CastExpr *E) {
963 if (E->getCastKind() == CK_LValueToRValue)
964 return E;
965 return Visit(E->getSubExpr());
966 }
967 const Expr *VisitParenExpr(const ParenExpr *E) {
968 return Visit(E->getSubExpr());
969 }
970 const Expr *VisitUnaryAddrOf(const clang::UnaryOperator *E) {
971 AddrOfSeen = true;
972 return Visit(E->getSubExpr());
973 }
974 const Expr *VisitUnaryDeref(const clang::UnaryOperator *E) {
975 AddrOfSeen = false;
976 return Visit(E->getSubExpr());
977 }
978 const Expr *VisitBinaryOperator(const clang::BinaryOperator *Op) {
979 return Op->isCommaOp() ? Visit(Op->getRHS()) : nullptr;
980 }
981};
982
983} // end anonymous namespace
984
985/// Find a struct's flexible array member. It may be embedded inside multiple
986/// sub-structs, but must still be the last field.
988 ASTContext &Ctx,
989 const RecordDecl *RD) {
990 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
991 CGF.getLangOpts().getStrictFlexArraysLevel();
992
993 if (RD->isImplicit())
994 return nullptr;
995
996 for (const FieldDecl *FD : RD->fields()) {
998 Ctx, FD, FD->getType(), StrictFlexArraysLevel,
999 /*IgnoreTemplateOrMacroSubstitution=*/true))
1000 return FD;
1001
1002 if (const auto *RD = FD->getType()->getAsRecordDecl())
1003 if (const FieldDecl *FD = FindFlexibleArrayMemberField(CGF, Ctx, RD))
1004 return FD;
1005 }
1006
1007 return nullptr;
1008}
1009
1010/// Calculate the offset of a struct field. It may be embedded inside multiple
1011/// sub-structs.
1012static bool GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD,
1013 const FieldDecl *FD, int64_t &Offset) {
1014 if (RD->isImplicit())
1015 return false;
1016
1017 // Keep track of the field number ourselves, because the other methods
1018 // (CGRecordLayout::getLLVMFieldNo) aren't always equivalent to how the AST
1019 // is laid out.
1020 uint32_t FieldNo = 0;
1021 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
1022
1023 for (const FieldDecl *Field : RD->fields()) {
1024 if (Field == FD) {
1025 Offset += Layout.getFieldOffset(FieldNo);
1026 return true;
1027 }
1028
1029 if (const auto *RD = Field->getType()->getAsRecordDecl()) {
1030 if (GetFieldOffset(Ctx, RD, FD, Offset)) {
1031 Offset += Layout.getFieldOffset(FieldNo);
1032 return true;
1033 }
1034 }
1035
1036 if (!RD->isUnion())
1037 ++FieldNo;
1038 }
1039
1040 return false;
1041}
1042
1043static std::optional<int64_t>
1044GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FD) {
1045 int64_t Offset = 0;
1046
1047 if (GetFieldOffset(Ctx, RD, FD, Offset))
1048 return std::optional<int64_t>(Offset);
1049
1050 return std::nullopt;
1051}
1052
1053llvm::Value *CodeGenFunction::emitCountedBySize(const Expr *E,
1054 llvm::Value *EmittedE,
1055 unsigned Type,
1056 llvm::IntegerType *ResType) {
1057 // Note: If the whole struct is specificed in the __bdos (i.e. Visitor
1058 // returns a DeclRefExpr). The calculation of the whole size of the structure
1059 // with a flexible array member can be done in two ways:
1060 //
1061 // 1) sizeof(struct S) + count * sizeof(typeof(fam))
1062 // 2) offsetof(struct S, fam) + count * sizeof(typeof(fam))
1063 //
1064 // The first will add additional padding after the end of the array
1065 // allocation while the second method is more precise, but not quite expected
1066 // from programmers. See
1067 // https://lore.kernel.org/lkml/ZvV6X5FPBBW7CO1f@archlinux/ for a discussion
1068 // of the topic.
1069 //
1070 // GCC isn't (currently) able to calculate __bdos on a pointer to the whole
1071 // structure. Therefore, because of the above issue, we choose to match what
1072 // GCC does for consistency's sake.
1073
1074 StructFieldAccess Visitor;
1075 E = Visitor.Visit(E);
1076 if (!E)
1077 return nullptr;
1078
1079 const Expr *Idx = Visitor.ArrayIndex;
1080 if (Idx) {
1081 if (Idx->HasSideEffects(getContext()))
1082 // We can't have side-effects.
1083 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1084
1085 if (const auto *IL = dyn_cast<IntegerLiteral>(Idx)) {
1086 int64_t Val = IL->getValue().getSExtValue();
1087 if (Val < 0)
1088 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1089
1090 // The index is 0, so we don't need to take it into account.
1091 if (Val == 0)
1092 Idx = nullptr;
1093 }
1094 }
1095
1096 // __counted_by on either a flexible array member or a pointer into a struct
1097 // with a flexible array member.
1098 if (const auto *ME = dyn_cast<MemberExpr>(E))
1099 return emitCountedByMemberSize(ME, Idx, EmittedE, Visitor.ArrayElementTy,
1100 Type, ResType);
1101
1102 // __counted_by on a pointer in a struct.
1103 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E);
1104 ICE && ICE->getCastKind() == CK_LValueToRValue)
1105 return emitCountedByPointerSize(ICE, Idx, EmittedE, Visitor.ArrayElementTy,
1106 Type, ResType);
1107
1108 return nullptr;
1109}
1110
1112 llvm::Value *Res,
1113 llvm::Value *Index,
1114 llvm::IntegerType *ResType,
1115 bool IsSigned) {
1116 // cmp = (array_size >= 0)
1117 Value *Cmp = CGF.Builder.CreateIsNotNeg(Res);
1118 if (Index)
1119 // cmp = (cmp && index >= 0)
1120 Cmp = CGF.Builder.CreateAnd(CGF.Builder.CreateIsNotNeg(Index), Cmp);
1121
1122 // return cmp ? result : 0
1123 return CGF.Builder.CreateSelect(Cmp, Res,
1124 ConstantInt::get(ResType, 0, IsSigned));
1125}
1126
1127static std::pair<llvm::Value *, llvm::Value *>
1129 const FieldDecl *ArrayFD, const FieldDecl *CountFD,
1130 const Expr *Idx, llvm::IntegerType *ResType,
1131 bool IsSigned) {
1132 // count = ptr->count;
1133 Value *Count = CGF.EmitLoadOfCountedByField(ME, ArrayFD, CountFD);
1134 if (!Count)
1135 return std::make_pair<Value *>(nullptr, nullptr);
1136 Count = CGF.Builder.CreateIntCast(Count, ResType, IsSigned, "count");
1137
1138 // index = ptr->index;
1139 Value *Index = nullptr;
1140 if (Idx) {
1141 bool IdxSigned = Idx->getType()->isSignedIntegerType();
1142 Index = CGF.EmitScalarExpr(Idx);
1143 Index = CGF.Builder.CreateIntCast(Index, ResType, IdxSigned, "index");
1144 }
1145
1146 return std::make_pair(Count, Index);
1147}
1148
1149llvm::Value *CodeGenFunction::emitCountedByPointerSize(
1150 const ImplicitCastExpr *E, const Expr *Idx, llvm::Value *EmittedE,
1151 QualType CastedArrayElementTy, unsigned Type, llvm::IntegerType *ResType) {
1152 assert(E->getCastKind() == CK_LValueToRValue &&
1153 "must be an LValue to RValue cast");
1154
1155 const MemberExpr *ME =
1156 dyn_cast<MemberExpr>(E->getSubExpr()->IgnoreParenNoopCasts(getContext()));
1157 if (!ME)
1158 return nullptr;
1159
1160 const auto *ArrayBaseFD = dyn_cast<FieldDecl>(ME->getMemberDecl());
1161 if (!ArrayBaseFD || !ArrayBaseFD->getType()->isPointerType() ||
1162 !ArrayBaseFD->getType()->isCountAttributedType())
1163 return nullptr;
1164
1165 // Get the 'count' FieldDecl.
1166 const FieldDecl *CountFD = ArrayBaseFD->findCountedByField();
1167 if (!CountFD)
1168 // Can't find the field referenced by the "counted_by" attribute.
1169 return nullptr;
1170
1171 // Calculate the array's object size using these formulae. (Note: if the
1172 // calculation is negative, we return 0.):
1173 //
1174 // struct p;
1175 // struct s {
1176 // /* ... */
1177 // struct p **array __attribute__((counted_by(count)));
1178 // int count;
1179 // };
1180 //
1181 // 1) 'ptr->array':
1182 //
1183 // count = ptr->count;
1184 //
1185 // array_element_size = sizeof (*ptr->array);
1186 // array_size = count * array_element_size;
1187 //
1188 // result = array_size;
1189 //
1190 // cmp = (result >= 0)
1191 // return cmp ? result : 0;
1192 //
1193 // 2) '&((cast) ptr->array)[idx]':
1194 //
1195 // count = ptr->count;
1196 // index = idx;
1197 //
1198 // array_element_size = sizeof (*ptr->array);
1199 // array_size = count * array_element_size;
1200 //
1201 // casted_array_element_size = sizeof (*((cast) ptr->array));
1202 //
1203 // index_size = index * casted_array_element_size;
1204 // result = array_size - index_size;
1205 //
1206 // cmp = (result >= 0)
1207 // if (index)
1208 // cmp = (cmp && index > 0)
1209 // return cmp ? result : 0;
1210
1211 auto GetElementBaseSize = [&](QualType ElementTy) {
1212 CharUnits ElementSize =
1213 getContext().getTypeSizeInChars(ElementTy->getPointeeType());
1214
1215 if (ElementSize.isZero()) {
1216 // This might be a __sized_by (or __counted_by) on a
1217 // 'void *', which counts bytes, not elements.
1218 [[maybe_unused]] auto *CAT = ElementTy->getAs<CountAttributedType>();
1219 assert(CAT && "must have an CountAttributedType");
1220
1221 ElementSize = CharUnits::One();
1222 }
1223
1224 return std::optional<CharUnits>(ElementSize);
1225 };
1226
1227 // Get the sizes of the original array element and the casted array element,
1228 // if different.
1229 std::optional<CharUnits> ArrayElementBaseSize =
1230 GetElementBaseSize(ArrayBaseFD->getType());
1231 if (!ArrayElementBaseSize)
1232 return nullptr;
1233
1234 std::optional<CharUnits> CastedArrayElementBaseSize = ArrayElementBaseSize;
1235 if (!CastedArrayElementTy.isNull() && CastedArrayElementTy->isPointerType()) {
1236 CastedArrayElementBaseSize = GetElementBaseSize(CastedArrayElementTy);
1237 if (!CastedArrayElementBaseSize)
1238 return nullptr;
1239 }
1240
1241 bool IsSigned = CountFD->getType()->isSignedIntegerType();
1242
1243 // count = ptr->count;
1244 // index = ptr->index;
1245 Value *Count, *Index;
1246 std::tie(Count, Index) = GetCountFieldAndIndex(
1247 *this, ME, ArrayBaseFD, CountFD, Idx, ResType, IsSigned);
1248 if (!Count)
1249 return nullptr;
1250
1251 // array_element_size = sizeof (*ptr->array)
1252 auto *ArrayElementSize = llvm::ConstantInt::get(
1253 ResType, ArrayElementBaseSize->getQuantity(), IsSigned);
1254
1255 // casted_array_element_size = sizeof (*((cast) ptr->array));
1256 auto *CastedArrayElementSize = llvm::ConstantInt::get(
1257 ResType, CastedArrayElementBaseSize->getQuantity(), IsSigned);
1258
1259 // array_size = count * array_element_size;
1260 Value *ArraySize = Builder.CreateMul(Count, ArrayElementSize, "array_size",
1261 !IsSigned, IsSigned);
1262
1263 // Option (1) 'ptr->array'
1264 // result = array_size
1265 Value *Result = ArraySize;
1266
1267 if (Idx) { // Option (2) '&((cast) ptr->array)[idx]'
1268 // index_size = index * casted_array_element_size;
1269 Value *IndexSize = Builder.CreateMul(Index, CastedArrayElementSize,
1270 "index_size", !IsSigned, IsSigned);
1271
1272 // result = result - index_size;
1273 Result =
1274 Builder.CreateSub(Result, IndexSize, "result", !IsSigned, IsSigned);
1275 }
1276
1277 return EmitPositiveResultOrZero(*this, Result, Index, ResType, IsSigned);
1278}
1279
1280llvm::Value *CodeGenFunction::emitCountedByMemberSize(
1281 const MemberExpr *ME, const Expr *Idx, llvm::Value *EmittedE,
1282 QualType CastedArrayElementTy, unsigned Type, llvm::IntegerType *ResType) {
1283 const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
1284 if (!FD)
1285 return nullptr;
1286
1287 // Find the flexible array member and check that it has the __counted_by
1288 // attribute.
1289 ASTContext &Ctx = getContext();
1290 const RecordDecl *RD = FD->getDeclContext()->getOuterLexicalRecordContext();
1291 const FieldDecl *FlexibleArrayMemberFD = nullptr;
1292
1294 Ctx, FD, FD->getType(), getLangOpts().getStrictFlexArraysLevel(),
1295 /*IgnoreTemplateOrMacroSubstitution=*/true))
1296 FlexibleArrayMemberFD = FD;
1297 else
1298 FlexibleArrayMemberFD = FindFlexibleArrayMemberField(*this, Ctx, RD);
1299
1300 if (!FlexibleArrayMemberFD ||
1301 !FlexibleArrayMemberFD->getType()->isCountAttributedType())
1302 return nullptr;
1303
1304 // Get the 'count' FieldDecl.
1305 const FieldDecl *CountFD = FlexibleArrayMemberFD->findCountedByField();
1306 if (!CountFD)
1307 // Can't find the field referenced by the "counted_by" attribute.
1308 return nullptr;
1309
1310 // Calculate the flexible array member's object size using these formulae.
1311 // (Note: if the calculation is negative, we return 0.):
1312 //
1313 // struct p;
1314 // struct s {
1315 // /* ... */
1316 // int count;
1317 // struct p *array[] __attribute__((counted_by(count)));
1318 // };
1319 //
1320 // 1) 'ptr->array':
1321 //
1322 // count = ptr->count;
1323 //
1324 // flexible_array_member_element_size = sizeof (*ptr->array);
1325 // flexible_array_member_size =
1326 // count * flexible_array_member_element_size;
1327 //
1328 // result = flexible_array_member_size;
1329 //
1330 // cmp = (result >= 0)
1331 // return cmp ? result : 0;
1332 //
1333 // 2) '&((cast) ptr->array)[idx]':
1334 //
1335 // count = ptr->count;
1336 // index = idx;
1337 //
1338 // flexible_array_member_element_size = sizeof (*ptr->array);
1339 // flexible_array_member_size =
1340 // count * flexible_array_member_element_size;
1341 //
1342 // casted_flexible_array_member_element_size =
1343 // sizeof (*((cast) ptr->array));
1344 // index_size = index * casted_flexible_array_member_element_size;
1345 //
1346 // result = flexible_array_member_size - index_size;
1347 //
1348 // cmp = (result >= 0)
1349 // if (index != 0)
1350 // cmp = (cmp && index >= 0)
1351 // return cmp ? result : 0;
1352 //
1353 // 3) '&ptr->field':
1354 //
1355 // count = ptr->count;
1356 // sizeof_struct = sizeof (struct s);
1357 //
1358 // flexible_array_member_element_size = sizeof (*ptr->array);
1359 // flexible_array_member_size =
1360 // count * flexible_array_member_element_size;
1361 //
1362 // field_offset = offsetof (struct s, field);
1363 // offset_diff = sizeof_struct - field_offset;
1364 //
1365 // result = offset_diff + flexible_array_member_size;
1366 //
1367 // cmp = (result >= 0)
1368 // return cmp ? result : 0;
1369 //
1370 // 4) '&((cast) ptr->field_array)[idx]':
1371 //
1372 // count = ptr->count;
1373 // index = idx;
1374 // sizeof_struct = sizeof (struct s);
1375 //
1376 // flexible_array_member_element_size = sizeof (*ptr->array);
1377 // flexible_array_member_size =
1378 // count * flexible_array_member_element_size;
1379 //
1380 // casted_field_element_size = sizeof (*((cast) ptr->field_array));
1381 // field_offset = offsetof (struct s, field)
1382 // field_offset += index * casted_field_element_size;
1383 //
1384 // offset_diff = sizeof_struct - field_offset;
1385 //
1386 // result = offset_diff + flexible_array_member_size;
1387 //
1388 // cmp = (result >= 0)
1389 // if (index != 0)
1390 // cmp = (cmp && index >= 0)
1391 // return cmp ? result : 0;
1392
1393 bool IsSigned = CountFD->getType()->isSignedIntegerType();
1394
1395 QualType FlexibleArrayMemberTy = FlexibleArrayMemberFD->getType();
1396
1397 // Explicit cast because otherwise the CharWidth will promote an i32's into
1398 // u64's leading to overflows.
1399 int64_t CharWidth = static_cast<int64_t>(CGM.getContext().getCharWidth());
1400
1401 // field_offset = offsetof (struct s, field);
1402 Value *FieldOffset = nullptr;
1403 if (FlexibleArrayMemberFD != FD) {
1404 std::optional<int64_t> Offset = GetFieldOffset(Ctx, RD, FD);
1405 if (!Offset)
1406 return nullptr;
1407 FieldOffset =
1408 llvm::ConstantInt::get(ResType, *Offset / CharWidth, IsSigned);
1409 }
1410
1411 // count = ptr->count;
1412 // index = ptr->index;
1413 Value *Count, *Index;
1414 std::tie(Count, Index) = GetCountFieldAndIndex(
1415 *this, ME, FlexibleArrayMemberFD, CountFD, Idx, ResType, IsSigned);
1416 if (!Count)
1417 return nullptr;
1418
1419 // flexible_array_member_element_size = sizeof (*ptr->array);
1420 const ArrayType *ArrayTy = Ctx.getAsArrayType(FlexibleArrayMemberTy);
1421 CharUnits BaseSize = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
1422 auto *FlexibleArrayMemberElementSize =
1423 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1424
1425 // flexible_array_member_size = count * flexible_array_member_element_size;
1426 Value *FlexibleArrayMemberSize =
1427 Builder.CreateMul(Count, FlexibleArrayMemberElementSize,
1428 "flexible_array_member_size", !IsSigned, IsSigned);
1429
1430 Value *Result = nullptr;
1431 if (FlexibleArrayMemberFD == FD) {
1432 if (Idx) { // Option (2) '&((cast) ptr->array)[idx]'
1433 // casted_flexible_array_member_element_size =
1434 // sizeof (*((cast) ptr->array));
1435 llvm::ConstantInt *CastedFlexibleArrayMemberElementSize =
1436 FlexibleArrayMemberElementSize;
1437 if (!CastedArrayElementTy.isNull() &&
1438 CastedArrayElementTy->isPointerType()) {
1439 CharUnits BaseSize =
1440 Ctx.getTypeSizeInChars(CastedArrayElementTy->getPointeeType());
1441 CastedFlexibleArrayMemberElementSize =
1442 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1443 }
1444
1445 // index_size = index * casted_flexible_array_member_element_size;
1446 Value *IndexSize =
1447 Builder.CreateMul(Index, CastedFlexibleArrayMemberElementSize,
1448 "index_size", !IsSigned, IsSigned);
1449
1450 // result = flexible_array_member_size - index_size;
1451 Result = Builder.CreateSub(FlexibleArrayMemberSize, IndexSize, "result",
1452 !IsSigned, IsSigned);
1453 } else { // Option (1) 'ptr->array'
1454 // result = flexible_array_member_size;
1455 Result = FlexibleArrayMemberSize;
1456 }
1457 } else {
1458 // sizeof_struct = sizeof (struct s);
1459 llvm::StructType *StructTy = getTypes().getCGRecordLayout(RD).getLLVMType();
1460 const llvm::DataLayout &Layout = CGM.getDataLayout();
1461 TypeSize Size = Layout.getTypeSizeInBits(StructTy);
1462 Value *SizeofStruct =
1463 llvm::ConstantInt::get(ResType, Size.getKnownMinValue() / CharWidth);
1464
1465 if (Idx) { // Option (4) '&((cast) ptr->field_array)[idx]'
1466 // casted_field_element_size = sizeof (*((cast) ptr->field_array));
1467 CharUnits BaseSize;
1468 if (!CastedArrayElementTy.isNull() &&
1469 CastedArrayElementTy->isPointerType()) {
1470 BaseSize =
1471 Ctx.getTypeSizeInChars(CastedArrayElementTy->getPointeeType());
1472 } else {
1473 const ArrayType *ArrayTy = Ctx.getAsArrayType(FD->getType());
1474 BaseSize = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
1475 }
1476
1477 llvm::ConstantInt *CastedFieldElementSize =
1478 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1479
1480 // field_offset += index * casted_field_element_size;
1481 Value *Mul = Builder.CreateMul(Index, CastedFieldElementSize,
1482 "field_offset", !IsSigned, IsSigned);
1483 FieldOffset = Builder.CreateAdd(FieldOffset, Mul);
1484 }
1485 // Option (3) '&ptr->field', and Option (4) continuation.
1486 // offset_diff = flexible_array_member_offset - field_offset;
1487 Value *OffsetDiff = Builder.CreateSub(SizeofStruct, FieldOffset,
1488 "offset_diff", !IsSigned, IsSigned);
1489
1490 // result = offset_diff + flexible_array_member_size;
1491 Result = Builder.CreateAdd(FlexibleArrayMemberSize, OffsetDiff, "result");
1492 }
1493
1494 return EmitPositiveResultOrZero(*this, Result, Index, ResType, IsSigned);
1495}
1496
1497/// Returns a Value corresponding to the size of the given expression.
1498/// This Value may be either of the following:
1499/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
1500/// it)
1501/// - A call to the @llvm.objectsize intrinsic
1502///
1503/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
1504/// and we wouldn't otherwise try to reference a pass_object_size parameter,
1505/// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
1506llvm::Value *
1507CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
1508 llvm::IntegerType *ResType,
1509 llvm::Value *EmittedE, bool IsDynamic) {
1510 // We need to reference an argument if the pointer is a parameter with the
1511 // pass_object_size attribute.
1512 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
1513 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
1514 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
1515 if (Param != nullptr && PS != nullptr &&
1516 areBOSTypesCompatible(PS->getType(), Type)) {
1517 auto Iter = SizeArguments.find(Param);
1518 assert(Iter != SizeArguments.end());
1519
1520 const ImplicitParamDecl *D = Iter->second;
1521 auto DIter = LocalDeclMap.find(D);
1522 assert(DIter != LocalDeclMap.end());
1523
1524 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
1525 getContext().getSizeType(), E->getBeginLoc());
1526 }
1527 }
1528
1529 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
1530 // evaluate E for side-effects. In either case, we shouldn't lower to
1531 // @llvm.objectsize.
1532 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
1533 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1534
1535 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
1536 assert(Ptr->getType()->isPointerTy() &&
1537 "Non-pointer passed to __builtin_object_size?");
1538
1539 if (IsDynamic)
1540 // Emit special code for a flexible array member with the "counted_by"
1541 // attribute.
1542 if (Value *V = emitCountedBySize(E, Ptr, Type, ResType))
1543 return V;
1544
1545 Function *F =
1546 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
1547
1548 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
1549 Value *Min = Builder.getInt1((Type & 2) != 0);
1550 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
1551 Value *NullIsUnknown = Builder.getTrue();
1552 Value *Dynamic = Builder.getInt1(IsDynamic);
1553 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
1554}
1555
1556namespace {
1557/// A struct to generically describe a bit test intrinsic.
1558struct BitTest {
1559 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
1560 enum InterlockingKind : uint8_t {
1561 Unlocked,
1562 Sequential,
1563 Acquire,
1564 Release,
1565 NoFence
1566 };
1567
1568 ActionKind Action;
1569 InterlockingKind Interlocking;
1570 bool Is64Bit;
1571
1572 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
1573};
1574
1575} // namespace
1576
1577BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
1578 switch (BuiltinID) {
1579 // Main portable variants.
1580 case Builtin::BI_bittest:
1581 return {TestOnly, Unlocked, false};
1582 case Builtin::BI_bittestandcomplement:
1583 return {Complement, Unlocked, false};
1584 case Builtin::BI_bittestandreset:
1585 return {Reset, Unlocked, false};
1586 case Builtin::BI_bittestandset:
1587 return {Set, Unlocked, false};
1588 case Builtin::BI_interlockedbittestandreset:
1589 return {Reset, Sequential, false};
1590 case Builtin::BI_interlockedbittestandset:
1591 return {Set, Sequential, false};
1592
1593 // 64-bit variants.
1594 case Builtin::BI_bittest64:
1595 return {TestOnly, Unlocked, true};
1596 case Builtin::BI_bittestandcomplement64:
1597 return {Complement, Unlocked, true};
1598 case Builtin::BI_bittestandreset64:
1599 return {Reset, Unlocked, true};
1600 case Builtin::BI_bittestandset64:
1601 return {Set, Unlocked, true};
1602 case Builtin::BI_interlockedbittestandreset64:
1603 return {Reset, Sequential, true};
1604 case Builtin::BI_interlockedbittestandset64:
1605 return {Set, Sequential, true};
1606
1607 // ARM/AArch64-specific ordering variants.
1608 case Builtin::BI_interlockedbittestandset_acq:
1609 return {Set, Acquire, false};
1610 case Builtin::BI_interlockedbittestandset_rel:
1611 return {Set, Release, false};
1612 case Builtin::BI_interlockedbittestandset_nf:
1613 return {Set, NoFence, false};
1614 case Builtin::BI_interlockedbittestandreset_acq:
1615 return {Reset, Acquire, false};
1616 case Builtin::BI_interlockedbittestandreset_rel:
1617 return {Reset, Release, false};
1618 case Builtin::BI_interlockedbittestandreset_nf:
1619 return {Reset, NoFence, false};
1620 case Builtin::BI_interlockedbittestandreset64_acq:
1621 return {Reset, Acquire, false};
1622 case Builtin::BI_interlockedbittestandreset64_rel:
1623 return {Reset, Release, false};
1624 case Builtin::BI_interlockedbittestandreset64_nf:
1625 return {Reset, NoFence, false};
1626 case Builtin::BI_interlockedbittestandset64_acq:
1627 return {Set, Acquire, false};
1628 case Builtin::BI_interlockedbittestandset64_rel:
1629 return {Set, Release, false};
1630 case Builtin::BI_interlockedbittestandset64_nf:
1631 return {Set, NoFence, false};
1632 }
1633 llvm_unreachable("expected only bittest intrinsics");
1634}
1635
1636static char bitActionToX86BTCode(BitTest::ActionKind A) {
1637 switch (A) {
1638 case BitTest::TestOnly: return '\0';
1639 case BitTest::Complement: return 'c';
1640 case BitTest::Reset: return 'r';
1641 case BitTest::Set: return 's';
1642 }
1643 llvm_unreachable("invalid action");
1644}
1645
1647 BitTest BT,
1648 const CallExpr *E, Value *BitBase,
1649 Value *BitPos) {
1650 char Action = bitActionToX86BTCode(BT.Action);
1651 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
1652
1653 // Build the assembly.
1655 raw_svector_ostream AsmOS(Asm);
1656 if (BT.Interlocking != BitTest::Unlocked)
1657 AsmOS << "lock ";
1658 AsmOS << "bt";
1659 if (Action)
1660 AsmOS << Action;
1661 AsmOS << SizeSuffix << " $2, ($1)";
1662
1663 // Build the constraints. FIXME: We should support immediates when possible.
1664 std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
1665 std::string_view MachineClobbers = CGF.getTarget().getClobbers();
1666 if (!MachineClobbers.empty()) {
1667 Constraints += ',';
1668 Constraints += MachineClobbers;
1669 }
1670 llvm::IntegerType *IntType = llvm::IntegerType::get(
1671 CGF.getLLVMContext(),
1672 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
1673 llvm::FunctionType *FTy =
1674 llvm::FunctionType::get(CGF.Int8Ty, {CGF.DefaultPtrTy, IntType}, false);
1675
1676 llvm::InlineAsm *IA =
1677 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1678 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
1679}
1680
1681static llvm::AtomicOrdering
1682getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
1683 switch (I) {
1684 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
1685 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
1686 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
1687 case BitTest::Release: return llvm::AtomicOrdering::Release;
1688 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
1689 }
1690 llvm_unreachable("invalid interlocking");
1691}
1692
1693static llvm::Value *EmitBitCountExpr(CodeGenFunction &CGF, const Expr *E) {
1694 llvm::Value *ArgValue = CGF.EmitScalarExpr(E);
1695 llvm::Type *ArgType = ArgValue->getType();
1696
1697 // Boolean vectors can be casted directly to its bitfield representation. We
1698 // intentionally do not round up to the next power of two size and let LLVM
1699 // handle the trailing bits.
1700 if (auto *VT = dyn_cast<llvm::FixedVectorType>(ArgType);
1701 VT && VT->getElementType()->isIntegerTy(1)) {
1702 llvm::Type *StorageType =
1703 llvm::Type::getIntNTy(CGF.getLLVMContext(), VT->getNumElements());
1704 ArgValue = CGF.Builder.CreateBitCast(ArgValue, StorageType);
1705 }
1706
1707 return ArgValue;
1708}
1709
1710/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
1711/// bits and a bit position and read and optionally modify the bit at that
1712/// position. The position index can be arbitrarily large, i.e. it can be larger
1713/// than 31 or 63, so we need an indexed load in the general case.
1714static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
1715 unsigned BuiltinID,
1716 const CallExpr *E) {
1717 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
1718 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
1719
1720 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
1721
1722 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
1723 // indexing operation internally. Use them if possible.
1724 if (CGF.getTarget().getTriple().isX86())
1725 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
1726
1727 // Otherwise, use generic code to load one byte and test the bit. Use all but
1728 // the bottom three bits as the array index, and the bottom three bits to form
1729 // a mask.
1730 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
1731 Value *ByteIndex = CGF.Builder.CreateAShr(
1732 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
1733 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBase, ByteIndex,
1734 "bittest.byteaddr"),
1735 CGF.Int8Ty, CharUnits::One());
1736 Value *PosLow =
1737 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
1738 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
1739
1740 // The updating instructions will need a mask.
1741 Value *Mask = nullptr;
1742 if (BT.Action != BitTest::TestOnly) {
1743 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
1744 "bittest.mask");
1745 }
1746
1747 // Check the action and ordering of the interlocked intrinsics.
1748 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
1749
1750 Value *OldByte = nullptr;
1751 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
1752 // Emit a combined atomicrmw load/store operation for the interlocked
1753 // intrinsics.
1754 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
1755 if (BT.Action == BitTest::Reset) {
1756 Mask = CGF.Builder.CreateNot(Mask);
1757 RMWOp = llvm::AtomicRMWInst::And;
1758 }
1759 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr, Mask, Ordering);
1760 } else {
1761 // Emit a plain load for the non-interlocked intrinsics.
1762 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
1763 Value *NewByte = nullptr;
1764 switch (BT.Action) {
1765 case BitTest::TestOnly:
1766 // Don't store anything.
1767 break;
1768 case BitTest::Complement:
1769 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
1770 break;
1771 case BitTest::Reset:
1772 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
1773 break;
1774 case BitTest::Set:
1775 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
1776 break;
1777 }
1778 if (NewByte)
1779 CGF.Builder.CreateStore(NewByte, ByteAddr);
1780 }
1781
1782 // However we loaded the old byte, either by plain load or atomicrmw, shift
1783 // the bit into the low position and mask it to 0 or 1.
1784 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
1785 return CGF.Builder.CreateAnd(
1786 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
1787}
1788
1789namespace {
1790enum class MSVCSetJmpKind {
1791 _setjmpex,
1792 _setjmp3,
1793 _setjmp
1794};
1795}
1796
1797/// MSVC handles setjmp a bit differently on different platforms. On every
1798/// architecture except 32-bit x86, the frame address is passed. On x86, extra
1799/// parameters can be passed as variadic arguments, but we always pass none.
1800static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
1801 const CallExpr *E) {
1802 llvm::Value *Arg1 = nullptr;
1803 llvm::Type *Arg1Ty = nullptr;
1804 StringRef Name;
1805 bool IsVarArg = false;
1806 if (SJKind == MSVCSetJmpKind::_setjmp3) {
1807 Name = "_setjmp3";
1808 Arg1Ty = CGF.Int32Ty;
1809 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
1810 IsVarArg = true;
1811 } else {
1812 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
1813 Arg1Ty = CGF.Int8PtrTy;
1814 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
1815 Arg1 = CGF.Builder.CreateCall(
1816 CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
1817 } else
1818 Arg1 = CGF.Builder.CreateCall(
1819 CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
1820 llvm::ConstantInt::get(CGF.Int32Ty, 0));
1821 }
1822
1823 // Mark the call site and declaration with ReturnsTwice.
1824 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
1825 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
1826 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
1827 llvm::Attribute::ReturnsTwice);
1828 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
1829 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
1830 ReturnsTwiceAttr, /*Local=*/true);
1831
1832 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
1833 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
1834 llvm::Value *Args[] = {Buf, Arg1};
1835 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
1836 CB->setAttributes(ReturnsTwiceAttr);
1837 return RValue::get(CB);
1838}
1839
1840// Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated.
1842 const CallExpr *E) {
1843 switch (BuiltinID) {
1846 Address IndexAddress(EmitPointerWithAlignment(E->getArg(0)));
1847 Value *ArgValue = EmitScalarExpr(E->getArg(1));
1848
1849 llvm::Type *ArgType = ArgValue->getType();
1850 llvm::Type *IndexType = IndexAddress.getElementType();
1851 llvm::Type *ResultType = ConvertType(E->getType());
1852
1853 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
1854 Value *ResZero = llvm::Constant::getNullValue(ResultType);
1855 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
1856
1857 BasicBlock *Begin = Builder.GetInsertBlock();
1858 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
1859 Builder.SetInsertPoint(End);
1860 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
1861
1862 Builder.SetInsertPoint(Begin);
1863 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
1864 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
1865 Builder.CreateCondBr(IsZero, End, NotZero);
1866 Result->addIncoming(ResZero, Begin);
1867
1868 Builder.SetInsertPoint(NotZero);
1869
1870 if (BuiltinID == MSVCIntrin::_BitScanForward) {
1871 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1872 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1873 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1874 Builder.CreateStore(ZeroCount, IndexAddress, false);
1875 } else {
1876 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
1877 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
1878
1879 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1880 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1881 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1882 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
1883 Builder.CreateStore(Index, IndexAddress, false);
1884 }
1885 Builder.CreateBr(End);
1886 Result->addIncoming(ResOne, NotZero);
1887
1888 Builder.SetInsertPoint(End);
1889 return Result;
1890 }
1892 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
1894 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
1896 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
1898 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
1900 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
1902 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
1904 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1905 AtomicOrdering::Acquire);
1907 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1908 AtomicOrdering::Release);
1910 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1911 AtomicOrdering::Monotonic);
1913 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1914 AtomicOrdering::Acquire);
1916 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1917 AtomicOrdering::Release);
1919 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1920 AtomicOrdering::Monotonic);
1922 return EmitAtomicCmpXchgForMSIntrin(*this, E);
1924 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
1926 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
1928 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1931 *this, E, AtomicOrdering::SequentiallyConsistent);
1933 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire);
1935 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release);
1937 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1939 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1940 AtomicOrdering::Acquire);
1942 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1943 AtomicOrdering::Release);
1945 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1946 AtomicOrdering::Monotonic);
1948 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1949 AtomicOrdering::Acquire);
1951 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1952 AtomicOrdering::Release);
1954 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1955 AtomicOrdering::Monotonic);
1957 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1958 AtomicOrdering::Acquire);
1960 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1961 AtomicOrdering::Release);
1963 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1964 AtomicOrdering::Monotonic);
1966 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1968 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1970 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1972 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1974 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1976 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1977
1979 return EmitAtomicDecrementValue(*this, E);
1981 return EmitAtomicIncrementValue(*this, E);
1982
1984 // Request immediate process termination from the kernel. The instruction
1985 // sequences to do this are documented on MSDN:
1986 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1987 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1988 StringRef Asm, Constraints;
1989 switch (ISA) {
1990 default:
1991 ErrorUnsupported(E, "__fastfail call for this architecture");
1992 break;
1993 case llvm::Triple::x86:
1994 case llvm::Triple::x86_64:
1995 Asm = "int $$0x29";
1996 Constraints = "{cx}";
1997 break;
1998 case llvm::Triple::thumb:
1999 Asm = "udf #251";
2000 Constraints = "{r0}";
2001 break;
2002 case llvm::Triple::aarch64:
2003 Asm = "brk #0xF003";
2004 Constraints = "{w0}";
2005 }
2006 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
2007 llvm::InlineAsm *IA =
2008 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
2009 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
2010 getLLVMContext(), llvm::AttributeList::FunctionIndex,
2011 llvm::Attribute::NoReturn);
2012 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
2013 CI->setAttributes(NoReturnAttr);
2014 return CI;
2015 }
2016 }
2017 llvm_unreachable("Incorrect MSVC intrinsic!");
2018}
2019
2020namespace {
2021// ARC cleanup for __builtin_os_log_format
2022struct CallObjCArcUse final : EHScopeStack::Cleanup {
2023 CallObjCArcUse(llvm::Value *object) : object(object) {}
2024 llvm::Value *object;
2025
2026 void Emit(CodeGenFunction &CGF, Flags flags) override {
2027 CGF.EmitARCIntrinsicUse(object);
2028 }
2029};
2030}
2031
2033 BuiltinCheckKind Kind) {
2034 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) &&
2035 "Unsupported builtin check kind");
2036
2037 Value *ArgValue = EmitBitCountExpr(*this, E);
2038 if (!SanOpts.has(SanitizerKind::Builtin))
2039 return ArgValue;
2040
2041 auto CheckOrdinal = SanitizerKind::SO_Builtin;
2042 auto CheckHandler = SanitizerHandler::InvalidBuiltin;
2043 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2044 Value *Cond = Builder.CreateICmpNE(
2045 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
2046 EmitCheck(std::make_pair(Cond, CheckOrdinal), CheckHandler,
2048 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
2049 {});
2050 return ArgValue;
2051}
2052
2054 Value *ArgValue = EvaluateExprAsBool(E);
2055 if (!SanOpts.has(SanitizerKind::Builtin))
2056 return ArgValue;
2057
2058 auto CheckOrdinal = SanitizerKind::SO_Builtin;
2059 auto CheckHandler = SanitizerHandler::InvalidBuiltin;
2060 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2061 EmitCheck(
2062 std::make_pair(ArgValue, CheckOrdinal), CheckHandler,
2064 llvm::ConstantInt::get(Builder.getInt8Ty(), BCK_AssumePassedFalse)},
2065 {});
2066 return ArgValue;
2067}
2068
2069static Value *EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW) {
2070 return CGF.Builder.CreateBinaryIntrinsic(
2071 Intrinsic::abs, ArgValue,
2072 ConstantInt::get(CGF.Builder.getInt1Ty(), HasNSW));
2073}
2074
2076 bool SanitizeOverflow) {
2077 Value *ArgValue = CGF.EmitScalarExpr(E->getArg(0));
2078
2079 // Try to eliminate overflow check.
2080 if (const auto *VCI = dyn_cast<llvm::ConstantInt>(ArgValue)) {
2081 if (!VCI->isMinSignedValue())
2082 return EmitAbs(CGF, ArgValue, true);
2083 }
2084
2086 SanitizerHandler CheckHandler;
2087 if (SanitizeOverflow) {
2088 Ordinals.push_back(SanitizerKind::SO_SignedIntegerOverflow);
2089 CheckHandler = SanitizerHandler::NegateOverflow;
2090 } else
2091 CheckHandler = SanitizerHandler::SubOverflow;
2092
2093 SanitizerDebugLocation SanScope(&CGF, Ordinals, CheckHandler);
2094
2095 Constant *Zero = Constant::getNullValue(ArgValue->getType());
2096 Value *ResultAndOverflow = CGF.Builder.CreateBinaryIntrinsic(
2097 Intrinsic::ssub_with_overflow, Zero, ArgValue);
2098 Value *Result = CGF.Builder.CreateExtractValue(ResultAndOverflow, 0);
2099 Value *NotOverflow = CGF.Builder.CreateNot(
2100 CGF.Builder.CreateExtractValue(ResultAndOverflow, 1));
2101
2102 // TODO: support -ftrapv-handler.
2103 if (SanitizeOverflow) {
2104 CGF.EmitCheck({{NotOverflow, SanitizerKind::SO_SignedIntegerOverflow}},
2105 CheckHandler,
2108 {ArgValue});
2109 } else
2110 CGF.EmitTrapCheck(NotOverflow, CheckHandler);
2111
2112 Value *CmpResult = CGF.Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
2113 return CGF.Builder.CreateSelect(CmpResult, Result, ArgValue, "abs");
2114}
2115
2116/// Get the argument type for arguments to os_log_helper.
2118 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
2119 return C.getCanonicalType(UnsignedTy);
2120}
2121
2124 CharUnits BufferAlignment) {
2125 ASTContext &Ctx = getContext();
2126
2128 {
2129 raw_svector_ostream OS(Name);
2130 OS << "__os_log_helper";
2131 OS << "_" << BufferAlignment.getQuantity();
2132 OS << "_" << int(Layout.getSummaryByte());
2133 OS << "_" << int(Layout.getNumArgsByte());
2134 for (const auto &Item : Layout.Items)
2135 OS << "_" << int(Item.getSizeByte()) << "_"
2136 << int(Item.getDescriptorByte());
2137 }
2138
2139 if (llvm::Function *F = CGM.getModule().getFunction(Name))
2140 return F;
2141
2143 FunctionArgList Args;
2144 Args.push_back(ImplicitParamDecl::Create(
2145 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
2147 ArgTys.emplace_back(Ctx.VoidPtrTy);
2148
2149 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
2150 char Size = Layout.Items[I].getSizeByte();
2151 if (!Size)
2152 continue;
2153
2154 QualType ArgTy = getOSLogArgType(Ctx, Size);
2155 Args.push_back(ImplicitParamDecl::Create(
2156 Ctx, nullptr, SourceLocation(),
2157 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
2159 ArgTys.emplace_back(ArgTy);
2160 }
2161
2162 QualType ReturnTy = Ctx.VoidTy;
2163
2164 // The helper function has linkonce_odr linkage to enable the linker to merge
2165 // identical functions. To ensure the merging always happens, 'noinline' is
2166 // attached to the function when compiling with -Oz.
2167 const CGFunctionInfo &FI =
2168 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
2169 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
2170 llvm::Function *Fn = llvm::Function::Create(
2171 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
2172 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
2173 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn, /*IsThunk=*/false);
2174 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
2175 Fn->setDoesNotThrow();
2176
2177 // Attach 'noinline' at -Oz.
2178 if (CGM.getCodeGenOpts().OptimizeSize == 2)
2179 Fn->addFnAttr(llvm::Attribute::NoInline);
2180
2181 auto NL = ApplyDebugLocation::CreateEmpty(*this);
2182 StartFunction(GlobalDecl(), ReturnTy, Fn, FI, Args);
2183
2184 // Create a scope with an artificial location for the body of this function.
2185 auto AL = ApplyDebugLocation::CreateArtificial(*this);
2186
2187 CharUnits Offset;
2189 Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), Ctx.VoidTy,
2190 BufferAlignment);
2191 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
2192 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
2193 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
2194 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
2195
2196 unsigned I = 1;
2197 for (const auto &Item : Layout.Items) {
2198 Builder.CreateStore(
2199 Builder.getInt8(Item.getDescriptorByte()),
2200 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
2201 Builder.CreateStore(
2202 Builder.getInt8(Item.getSizeByte()),
2203 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
2204
2205 CharUnits Size = Item.size();
2206 if (!Size.getQuantity())
2207 continue;
2208
2209 Address Arg = GetAddrOfLocalVar(Args[I]);
2210 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
2211 Addr = Addr.withElementType(Arg.getElementType());
2212 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
2213 Offset += Size;
2214 ++I;
2215 }
2216
2218
2219 return Fn;
2220}
2221
2223 assert(E.getNumArgs() >= 2 &&
2224 "__builtin_os_log_format takes at least 2 arguments");
2225 ASTContext &Ctx = getContext();
2228 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
2229
2230 // Ignore argument 1, the format string. It is not currently used.
2231 CallArgList Args;
2232 Args.add(RValue::get(BufAddr.emitRawPointer(*this)), Ctx.VoidPtrTy);
2233
2234 for (const auto &Item : Layout.Items) {
2235 int Size = Item.getSizeByte();
2236 if (!Size)
2237 continue;
2238
2239 llvm::Value *ArgVal;
2240
2241 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
2242 uint64_t Val = 0;
2243 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
2244 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
2245 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
2246 } else if (const Expr *TheExpr = Item.getExpr()) {
2247 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
2248
2249 // If a temporary object that requires destruction after the full
2250 // expression is passed, push a lifetime-extended cleanup to extend its
2251 // lifetime to the end of the enclosing block scope.
2252 auto LifetimeExtendObject = [&](const Expr *E) {
2253 E = E->IgnoreParenCasts();
2254 // Extend lifetimes of objects returned by function calls and message
2255 // sends.
2256
2257 // FIXME: We should do this in other cases in which temporaries are
2258 // created including arguments of non-ARC types (e.g., C++
2259 // temporaries).
2261 return true;
2262 return false;
2263 };
2264
2265 if (TheExpr->getType()->isObjCRetainableType() &&
2266 getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
2267 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
2268 "Only scalar can be a ObjC retainable type");
2269 if (!isa<Constant>(ArgVal)) {
2270 CleanupKind Cleanup = getARCCleanupKind();
2271 QualType Ty = TheExpr->getType();
2273 RawAddress Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
2274 ArgVal = EmitARCRetain(Ty, ArgVal);
2275 Builder.CreateStore(ArgVal, Addr);
2276 pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
2278 Cleanup & EHCleanup);
2279
2280 // Push a clang.arc.use call to ensure ARC optimizer knows that the
2281 // argument has to be alive.
2282 if (CGM.getCodeGenOpts().OptimizationLevel != 0)
2284 }
2285 }
2286 } else {
2287 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
2288 }
2289
2290 unsigned ArgValSize =
2291 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
2292 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
2293 ArgValSize);
2294 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
2295 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
2296 // If ArgVal has type x86_fp80, zero-extend ArgVal.
2297 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
2298 Args.add(RValue::get(ArgVal), ArgTy);
2299 }
2300
2301 const CGFunctionInfo &FI =
2302 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
2303 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
2304 Layout, BufAddr.getAlignment());
2306 return RValue::get(BufAddr, *this);
2307}
2308
2310 unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info,
2311 WidthAndSignedness ResultInfo) {
2312 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2313 Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width &&
2314 !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed;
2315}
2316
2318 CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info,
2319 const clang::Expr *Op2, WidthAndSignedness Op2Info,
2320 const clang::Expr *ResultArg, QualType ResultQTy,
2321 WidthAndSignedness ResultInfo) {
2323 Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&
2324 "Cannot specialize this multiply");
2325
2326 llvm::Value *V1 = CGF.EmitScalarExpr(Op1);
2327 llvm::Value *V2 = CGF.EmitScalarExpr(Op2);
2328
2329 llvm::Value *HasOverflow;
2330 llvm::Value *Result = EmitOverflowIntrinsic(
2331 CGF, Intrinsic::umul_with_overflow, V1, V2, HasOverflow);
2332
2333 // The intrinsic call will detect overflow when the value is > UINT_MAX,
2334 // however, since the original builtin had a signed result, we need to report
2335 // an overflow when the result is greater than INT_MAX.
2336 auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width);
2337 llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax);
2338
2339 llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(Result, IntMaxValue);
2340 HasOverflow = CGF.Builder.CreateOr(HasOverflow, IntMaxOverflow);
2341
2342 bool isVolatile =
2343 ResultArg->getType()->getPointeeType().isVolatileQualified();
2344 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2345 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2346 isVolatile);
2347 return RValue::get(HasOverflow);
2348}
2349
2350/// Determine if a binop is a checked mixed-sign multiply we can specialize.
2351static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
2352 WidthAndSignedness Op1Info,
2353 WidthAndSignedness Op2Info,
2354 WidthAndSignedness ResultInfo) {
2355 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2356 std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
2357 Op1Info.Signed != Op2Info.Signed;
2358}
2359
2360/// Emit a checked mixed-sign multiply. This is a cheaper specialization of
2361/// the generic checked-binop irgen.
2362static RValue
2364 WidthAndSignedness Op1Info, const clang::Expr *Op2,
2365 WidthAndSignedness Op2Info,
2366 const clang::Expr *ResultArg, QualType ResultQTy,
2367 WidthAndSignedness ResultInfo) {
2368 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,
2369 Op2Info, ResultInfo) &&
2370 "Not a mixed-sign multipliction we can specialize");
2371
2372 // Emit the signed and unsigned operands.
2373 const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
2374 const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
2375 llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
2376 llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
2377 unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
2378 unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
2379
2380 // One of the operands may be smaller than the other. If so, [s|z]ext it.
2381 if (SignedOpWidth < UnsignedOpWidth)
2382 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
2383 if (UnsignedOpWidth < SignedOpWidth)
2384 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
2385
2386 llvm::Type *OpTy = Signed->getType();
2387 llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
2388 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2389 llvm::Type *ResTy = CGF.getTypes().ConvertType(ResultQTy);
2390 unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
2391
2392 // Take the absolute value of the signed operand.
2393 llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
2394 llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
2395 llvm::Value *AbsSigned =
2396 CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
2397
2398 // Perform a checked unsigned multiplication.
2399 llvm::Value *UnsignedOverflow;
2400 llvm::Value *UnsignedResult =
2401 EmitOverflowIntrinsic(CGF, Intrinsic::umul_with_overflow, AbsSigned,
2402 Unsigned, UnsignedOverflow);
2403
2404 llvm::Value *Overflow, *Result;
2405 if (ResultInfo.Signed) {
2406 // Signed overflow occurs if the result is greater than INT_MAX or lesser
2407 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
2408 auto IntMax =
2409 llvm::APInt::getSignedMaxValue(ResultInfo.Width).zext(OpWidth);
2410 llvm::Value *MaxResult =
2411 CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
2412 CGF.Builder.CreateZExt(IsNegative, OpTy));
2413 llvm::Value *SignedOverflow =
2414 CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
2415 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
2416
2417 // Prepare the signed result (possibly by negating it).
2418 llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
2419 llvm::Value *SignedResult =
2420 CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
2421 Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
2422 } else {
2423 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
2424 llvm::Value *Underflow = CGF.Builder.CreateAnd(
2425 IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
2426 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
2427 if (ResultInfo.Width < OpWidth) {
2428 auto IntMax =
2429 llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
2430 llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
2431 UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
2432 Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
2433 }
2434
2435 // Negate the product if it would be negative in infinite precision.
2436 Result = CGF.Builder.CreateSelect(
2437 IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
2438
2439 Result = CGF.Builder.CreateTrunc(Result, ResTy);
2440 }
2441 assert(Overflow && Result && "Missing overflow or result");
2442
2443 bool isVolatile =
2444 ResultArg->getType()->getPointeeType().isVolatileQualified();
2445 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2446 isVolatile);
2447 return RValue::get(Overflow);
2448}
2449
2450static bool
2452 llvm::SmallPtrSetImpl<const Decl *> &Seen) {
2453 if (const auto *Arr = Ctx.getAsArrayType(Ty))
2454 Ty = Ctx.getBaseElementType(Arr);
2455
2456 const auto *Record = Ty->getAsCXXRecordDecl();
2457 if (!Record)
2458 return false;
2459
2460 // We've already checked this type, or are in the process of checking it.
2461 if (!Seen.insert(Record).second)
2462 return false;
2463
2464 assert(Record->hasDefinition() &&
2465 "Incomplete types should already be diagnosed");
2466
2467 if (Record->isDynamicClass())
2468 return true;
2469
2470 for (FieldDecl *F : Record->fields()) {
2471 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
2472 return true;
2473 }
2474 return false;
2475}
2476
2477/// Determine if the specified type requires laundering by checking if it is a
2478/// dynamic class type or contains a subobject which is a dynamic class type.
2480 if (!CGM.getCodeGenOpts().StrictVTablePointers)
2481 return false;
2483 return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
2484}
2485
2486RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
2487 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
2488 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
2489
2490 // The builtin's shift arg may have a different type than the source arg and
2491 // result, but the LLVM intrinsic uses the same type for all values.
2492 llvm::Type *Ty = Src->getType();
2493 ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
2494
2495 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
2496 unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
2497 Function *F = CGM.getIntrinsic(IID, Ty);
2498 return RValue::get(Builder.CreateCall(F, { Src, Src, ShiftAmt }));
2499}
2500
2501// Map math builtins for long-double to f128 version.
2502static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {
2503 switch (BuiltinID) {
2504#define MUTATE_LDBL(func) \
2505 case Builtin::BI__builtin_##func##l: \
2506 return Builtin::BI__builtin_##func##f128;
2537 MUTATE_LDBL(nans)
2538 MUTATE_LDBL(inf)
2557 MUTATE_LDBL(huge_val)
2567#undef MUTATE_LDBL
2568 default:
2569 return BuiltinID;
2570 }
2571}
2572
2573static Value *tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID,
2574 Value *V) {
2575 if (CGF.Builder.getIsFPConstrained() &&
2576 CGF.Builder.getDefaultConstrainedExcept() != fp::ebIgnore) {
2577 if (Value *Result =
2578 CGF.getTargetHooks().testFPKind(V, BuiltinID, CGF.Builder, CGF.CGM))
2579 return Result;
2580 }
2581 return nullptr;
2582}
2583
2585 const FunctionDecl *FD) {
2586 auto Name = FD->getNameAsString() + "__hipstdpar_unsupported";
2587 auto FnTy = CGF->CGM.getTypes().GetFunctionType(FD);
2588 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2589
2591 for (auto &&FormalTy : FnTy->params())
2592 Args.push_back(llvm::PoisonValue::get(FormalTy));
2593
2594 return RValue::get(CGF->Builder.CreateCall(UBF, Args));
2595}
2596
2598 const CallExpr *E,
2600 assert(!getContext().BuiltinInfo.isImmediate(BuiltinID) &&
2601 "Should not codegen for consteval builtins");
2602
2603 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
2604 // See if we can constant fold this builtin. If so, don't emit it at all.
2605 // TODO: Extend this handling to all builtin calls that we can constant-fold.
2607 if (E->isPRValue() && E->EvaluateAsRValue(Result, CGM.getContext()) &&
2608 !Result.hasSideEffects()) {
2609 if (Result.Val.isInt())
2610 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
2611 Result.Val.getInt()));
2612 if (Result.Val.isFloat())
2613 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
2614 Result.Val.getFloat()));
2615 }
2616
2617 // If current long-double semantics is IEEE 128-bit, replace math builtins
2618 // of long-double with f128 equivalent.
2619 // TODO: This mutation should also be applied to other targets other than PPC,
2620 // after backend supports IEEE 128-bit style libcalls.
2621 if (getTarget().getTriple().isPPC64() &&
2622 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
2623 BuiltinID = mutateLongDoubleBuiltin(BuiltinID);
2624
2625 // If the builtin has been declared explicitly with an assembler label,
2626 // disable the specialized emitting below. Ideally we should communicate the
2627 // rename in IR, or at least avoid generating the intrinsic calls that are
2628 // likely to get lowered to the renamed library functions.
2629 const unsigned BuiltinIDIfNoAsmLabel =
2630 FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID;
2631
2632 std::optional<bool> ErrnoOverriden;
2633 // ErrnoOverriden is true if math-errno is overriden via the
2634 // '#pragma float_control(precise, on)'. This pragma disables fast-math,
2635 // which implies math-errno.
2636 if (E->hasStoredFPFeatures()) {
2638 if (OP.hasMathErrnoOverride())
2639 ErrnoOverriden = OP.getMathErrnoOverride();
2640 }
2641 // True if 'attribute__((optnone))' is used. This attribute overrides
2642 // fast-math which implies math-errno.
2643 bool OptNone = CurFuncDecl && CurFuncDecl->hasAttr<OptimizeNoneAttr>();
2644
2645 bool IsOptimizationEnabled = CGM.getCodeGenOpts().OptimizationLevel != 0;
2646
2647 bool GenerateFPMathIntrinsics =
2649 BuiltinID, CGM.getTriple(), ErrnoOverriden, getLangOpts().MathErrno,
2650 OptNone, IsOptimizationEnabled);
2651
2652 if (GenerateFPMathIntrinsics) {
2653 switch (BuiltinIDIfNoAsmLabel) {
2654 case Builtin::BIacos:
2655 case Builtin::BIacosf:
2656 case Builtin::BIacosl:
2657 case Builtin::BI__builtin_acos:
2658 case Builtin::BI__builtin_acosf:
2659 case Builtin::BI__builtin_acosf16:
2660 case Builtin::BI__builtin_acosl:
2661 case Builtin::BI__builtin_acosf128:
2662 case Builtin::BI__builtin_elementwise_acos:
2664 *this, E, Intrinsic::acos, Intrinsic::experimental_constrained_acos));
2665
2666 case Builtin::BIasin:
2667 case Builtin::BIasinf:
2668 case Builtin::BIasinl:
2669 case Builtin::BI__builtin_asin:
2670 case Builtin::BI__builtin_asinf:
2671 case Builtin::BI__builtin_asinf16:
2672 case Builtin::BI__builtin_asinl:
2673 case Builtin::BI__builtin_asinf128:
2674 case Builtin::BI__builtin_elementwise_asin:
2676 *this, E, Intrinsic::asin, Intrinsic::experimental_constrained_asin));
2677
2678 case Builtin::BIatan:
2679 case Builtin::BIatanf:
2680 case Builtin::BIatanl:
2681 case Builtin::BI__builtin_atan:
2682 case Builtin::BI__builtin_atanf:
2683 case Builtin::BI__builtin_atanf16:
2684 case Builtin::BI__builtin_atanl:
2685 case Builtin::BI__builtin_atanf128:
2686 case Builtin::BI__builtin_elementwise_atan:
2688 *this, E, Intrinsic::atan, Intrinsic::experimental_constrained_atan));
2689
2690 case Builtin::BIatan2:
2691 case Builtin::BIatan2f:
2692 case Builtin::BIatan2l:
2693 case Builtin::BI__builtin_atan2:
2694 case Builtin::BI__builtin_atan2f:
2695 case Builtin::BI__builtin_atan2f16:
2696 case Builtin::BI__builtin_atan2l:
2697 case Builtin::BI__builtin_atan2f128:
2698 case Builtin::BI__builtin_elementwise_atan2:
2700 *this, E, Intrinsic::atan2,
2701 Intrinsic::experimental_constrained_atan2));
2702
2703 case Builtin::BIceil:
2704 case Builtin::BIceilf:
2705 case Builtin::BIceill:
2706 case Builtin::BI__builtin_ceil:
2707 case Builtin::BI__builtin_ceilf:
2708 case Builtin::BI__builtin_ceilf16:
2709 case Builtin::BI__builtin_ceill:
2710 case Builtin::BI__builtin_ceilf128:
2711 case Builtin::BI__builtin_elementwise_ceil:
2713 Intrinsic::ceil,
2714 Intrinsic::experimental_constrained_ceil));
2715
2716 case Builtin::BIcopysign:
2717 case Builtin::BIcopysignf:
2718 case Builtin::BIcopysignl:
2719 case Builtin::BI__builtin_copysign:
2720 case Builtin::BI__builtin_copysignf:
2721 case Builtin::BI__builtin_copysignf16:
2722 case Builtin::BI__builtin_copysignl:
2723 case Builtin::BI__builtin_copysignf128:
2724 return RValue::get(
2725 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::copysign));
2726
2727 case Builtin::BIcos:
2728 case Builtin::BIcosf:
2729 case Builtin::BIcosl:
2730 case Builtin::BI__builtin_cos:
2731 case Builtin::BI__builtin_cosf:
2732 case Builtin::BI__builtin_cosf16:
2733 case Builtin::BI__builtin_cosl:
2734 case Builtin::BI__builtin_cosf128:
2735 case Builtin::BI__builtin_elementwise_cos:
2737 Intrinsic::cos,
2738 Intrinsic::experimental_constrained_cos));
2739
2740 case Builtin::BIcosh:
2741 case Builtin::BIcoshf:
2742 case Builtin::BIcoshl:
2743 case Builtin::BI__builtin_cosh:
2744 case Builtin::BI__builtin_coshf:
2745 case Builtin::BI__builtin_coshf16:
2746 case Builtin::BI__builtin_coshl:
2747 case Builtin::BI__builtin_coshf128:
2748 case Builtin::BI__builtin_elementwise_cosh:
2750 *this, E, Intrinsic::cosh, Intrinsic::experimental_constrained_cosh));
2751
2752 case Builtin::BIexp:
2753 case Builtin::BIexpf:
2754 case Builtin::BIexpl:
2755 case Builtin::BI__builtin_exp:
2756 case Builtin::BI__builtin_expf:
2757 case Builtin::BI__builtin_expf16:
2758 case Builtin::BI__builtin_expl:
2759 case Builtin::BI__builtin_expf128:
2760 case Builtin::BI__builtin_elementwise_exp:
2762 Intrinsic::exp,
2763 Intrinsic::experimental_constrained_exp));
2764
2765 case Builtin::BIexp2:
2766 case Builtin::BIexp2f:
2767 case Builtin::BIexp2l:
2768 case Builtin::BI__builtin_exp2:
2769 case Builtin::BI__builtin_exp2f:
2770 case Builtin::BI__builtin_exp2f16:
2771 case Builtin::BI__builtin_exp2l:
2772 case Builtin::BI__builtin_exp2f128:
2773 case Builtin::BI__builtin_elementwise_exp2:
2775 Intrinsic::exp2,
2776 Intrinsic::experimental_constrained_exp2));
2777 case Builtin::BI__builtin_exp10:
2778 case Builtin::BI__builtin_exp10f:
2779 case Builtin::BI__builtin_exp10f16:
2780 case Builtin::BI__builtin_exp10l:
2781 case Builtin::BI__builtin_exp10f128:
2782 case Builtin::BI__builtin_elementwise_exp10: {
2783 // TODO: strictfp support
2784 if (Builder.getIsFPConstrained())
2785 break;
2786 return RValue::get(
2787 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::exp10));
2788 }
2789 case Builtin::BIfabs:
2790 case Builtin::BIfabsf:
2791 case Builtin::BIfabsl:
2792 case Builtin::BI__builtin_fabs:
2793 case Builtin::BI__builtin_fabsf:
2794 case Builtin::BI__builtin_fabsf16:
2795 case Builtin::BI__builtin_fabsl:
2796 case Builtin::BI__builtin_fabsf128:
2797 return RValue::get(
2798 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::fabs));
2799
2800 case Builtin::BIfloor:
2801 case Builtin::BIfloorf:
2802 case Builtin::BIfloorl:
2803 case Builtin::BI__builtin_floor:
2804 case Builtin::BI__builtin_floorf:
2805 case Builtin::BI__builtin_floorf16:
2806 case Builtin::BI__builtin_floorl:
2807 case Builtin::BI__builtin_floorf128:
2808 case Builtin::BI__builtin_elementwise_floor:
2810 Intrinsic::floor,
2811 Intrinsic::experimental_constrained_floor));
2812
2813 case Builtin::BIfma:
2814 case Builtin::BIfmaf:
2815 case Builtin::BIfmal:
2816 case Builtin::BI__builtin_fma:
2817 case Builtin::BI__builtin_fmaf:
2818 case Builtin::BI__builtin_fmaf16:
2819 case Builtin::BI__builtin_fmal:
2820 case Builtin::BI__builtin_fmaf128:
2821 case Builtin::BI__builtin_elementwise_fma:
2823 Intrinsic::fma,
2824 Intrinsic::experimental_constrained_fma));
2825
2826 case Builtin::BIfmax:
2827 case Builtin::BIfmaxf:
2828 case Builtin::BIfmaxl:
2829 case Builtin::BI__builtin_fmax:
2830 case Builtin::BI__builtin_fmaxf:
2831 case Builtin::BI__builtin_fmaxf16:
2832 case Builtin::BI__builtin_fmaxl:
2833 case Builtin::BI__builtin_fmaxf128:
2835 Intrinsic::maxnum,
2836 Intrinsic::experimental_constrained_maxnum));
2837
2838 case Builtin::BIfmin:
2839 case Builtin::BIfminf:
2840 case Builtin::BIfminl:
2841 case Builtin::BI__builtin_fmin:
2842 case Builtin::BI__builtin_fminf:
2843 case Builtin::BI__builtin_fminf16:
2844 case Builtin::BI__builtin_fminl:
2845 case Builtin::BI__builtin_fminf128:
2847 Intrinsic::minnum,
2848 Intrinsic::experimental_constrained_minnum));
2849
2850 case Builtin::BIfmaximum_num:
2851 case Builtin::BIfmaximum_numf:
2852 case Builtin::BIfmaximum_numl:
2853 case Builtin::BI__builtin_fmaximum_num:
2854 case Builtin::BI__builtin_fmaximum_numf:
2855 case Builtin::BI__builtin_fmaximum_numf16:
2856 case Builtin::BI__builtin_fmaximum_numl:
2857 case Builtin::BI__builtin_fmaximum_numf128:
2858 return RValue::get(
2859 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::maximumnum));
2860
2861 case Builtin::BIfminimum_num:
2862 case Builtin::BIfminimum_numf:
2863 case Builtin::BIfminimum_numl:
2864 case Builtin::BI__builtin_fminimum_num:
2865 case Builtin::BI__builtin_fminimum_numf:
2866 case Builtin::BI__builtin_fminimum_numf16:
2867 case Builtin::BI__builtin_fminimum_numl:
2868 case Builtin::BI__builtin_fminimum_numf128:
2869 return RValue::get(
2870 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::minimumnum));
2871
2872 // fmod() is a special-case. It maps to the frem instruction rather than an
2873 // LLVM intrinsic.
2874 case Builtin::BIfmod:
2875 case Builtin::BIfmodf:
2876 case Builtin::BIfmodl:
2877 case Builtin::BI__builtin_fmod:
2878 case Builtin::BI__builtin_fmodf:
2879 case Builtin::BI__builtin_fmodf16:
2880 case Builtin::BI__builtin_fmodl:
2881 case Builtin::BI__builtin_fmodf128:
2882 case Builtin::BI__builtin_elementwise_fmod: {
2883 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2884 Value *Arg1 = EmitScalarExpr(E->getArg(0));
2885 Value *Arg2 = EmitScalarExpr(E->getArg(1));
2886 if (Builder.getIsFPConstrained()) {
2887 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_frem,
2888 Arg1->getType());
2889 return RValue::get(Builder.CreateConstrainedFPCall(F, {Arg1, Arg2}));
2890 } else {
2891 return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
2892 }
2893 }
2894
2895 case Builtin::BIlog:
2896 case Builtin::BIlogf:
2897 case Builtin::BIlogl:
2898 case Builtin::BI__builtin_log:
2899 case Builtin::BI__builtin_logf:
2900 case Builtin::BI__builtin_logf16:
2901 case Builtin::BI__builtin_logl:
2902 case Builtin::BI__builtin_logf128:
2903 case Builtin::BI__builtin_elementwise_log:
2905 Intrinsic::log,
2906 Intrinsic::experimental_constrained_log));
2907
2908 case Builtin::BIlog10:
2909 case Builtin::BIlog10f:
2910 case Builtin::BIlog10l:
2911 case Builtin::BI__builtin_log10:
2912 case Builtin::BI__builtin_log10f:
2913 case Builtin::BI__builtin_log10f16:
2914 case Builtin::BI__builtin_log10l:
2915 case Builtin::BI__builtin_log10f128:
2916 case Builtin::BI__builtin_elementwise_log10:
2918 Intrinsic::log10,
2919 Intrinsic::experimental_constrained_log10));
2920
2921 case Builtin::BIlog2:
2922 case Builtin::BIlog2f:
2923 case Builtin::BIlog2l:
2924 case Builtin::BI__builtin_log2:
2925 case Builtin::BI__builtin_log2f:
2926 case Builtin::BI__builtin_log2f16:
2927 case Builtin::BI__builtin_log2l:
2928 case Builtin::BI__builtin_log2f128:
2929 case Builtin::BI__builtin_elementwise_log2:
2931 Intrinsic::log2,
2932 Intrinsic::experimental_constrained_log2));
2933
2934 case Builtin::BInearbyint:
2935 case Builtin::BInearbyintf:
2936 case Builtin::BInearbyintl:
2937 case Builtin::BI__builtin_nearbyint:
2938 case Builtin::BI__builtin_nearbyintf:
2939 case Builtin::BI__builtin_nearbyintl:
2940 case Builtin::BI__builtin_nearbyintf128:
2941 case Builtin::BI__builtin_elementwise_nearbyint:
2943 Intrinsic::nearbyint,
2944 Intrinsic::experimental_constrained_nearbyint));
2945
2946 case Builtin::BIpow:
2947 case Builtin::BIpowf:
2948 case Builtin::BIpowl:
2949 case Builtin::BI__builtin_pow:
2950 case Builtin::BI__builtin_powf:
2951 case Builtin::BI__builtin_powf16:
2952 case Builtin::BI__builtin_powl:
2953 case Builtin::BI__builtin_powf128:
2954 case Builtin::BI__builtin_elementwise_pow:
2956 Intrinsic::pow,
2957 Intrinsic::experimental_constrained_pow));
2958
2959 case Builtin::BIrint:
2960 case Builtin::BIrintf:
2961 case Builtin::BIrintl:
2962 case Builtin::BI__builtin_rint:
2963 case Builtin::BI__builtin_rintf:
2964 case Builtin::BI__builtin_rintf16:
2965 case Builtin::BI__builtin_rintl:
2966 case Builtin::BI__builtin_rintf128:
2967 case Builtin::BI__builtin_elementwise_rint:
2969 Intrinsic::rint,
2970 Intrinsic::experimental_constrained_rint));
2971
2972 case Builtin::BIround:
2973 case Builtin::BIroundf:
2974 case Builtin::BIroundl:
2975 case Builtin::BI__builtin_round:
2976 case Builtin::BI__builtin_roundf:
2977 case Builtin::BI__builtin_roundf16:
2978 case Builtin::BI__builtin_roundl:
2979 case Builtin::BI__builtin_roundf128:
2980 case Builtin::BI__builtin_elementwise_round:
2982 Intrinsic::round,
2983 Intrinsic::experimental_constrained_round));
2984
2985 case Builtin::BIroundeven:
2986 case Builtin::BIroundevenf:
2987 case Builtin::BIroundevenl:
2988 case Builtin::BI__builtin_roundeven:
2989 case Builtin::BI__builtin_roundevenf:
2990 case Builtin::BI__builtin_roundevenf16:
2991 case Builtin::BI__builtin_roundevenl:
2992 case Builtin::BI__builtin_roundevenf128:
2993 case Builtin::BI__builtin_elementwise_roundeven:
2995 Intrinsic::roundeven,
2996 Intrinsic::experimental_constrained_roundeven));
2997
2998 case Builtin::BIsin:
2999 case Builtin::BIsinf:
3000 case Builtin::BIsinl:
3001 case Builtin::BI__builtin_sin:
3002 case Builtin::BI__builtin_sinf:
3003 case Builtin::BI__builtin_sinf16:
3004 case Builtin::BI__builtin_sinl:
3005 case Builtin::BI__builtin_sinf128:
3006 case Builtin::BI__builtin_elementwise_sin:
3008 Intrinsic::sin,
3009 Intrinsic::experimental_constrained_sin));
3010
3011 case Builtin::BIsinh:
3012 case Builtin::BIsinhf:
3013 case Builtin::BIsinhl:
3014 case Builtin::BI__builtin_sinh:
3015 case Builtin::BI__builtin_sinhf:
3016 case Builtin::BI__builtin_sinhf16:
3017 case Builtin::BI__builtin_sinhl:
3018 case Builtin::BI__builtin_sinhf128:
3019 case Builtin::BI__builtin_elementwise_sinh:
3021 *this, E, Intrinsic::sinh, Intrinsic::experimental_constrained_sinh));
3022
3023 case Builtin::BI__builtin_sincospi:
3024 case Builtin::BI__builtin_sincospif:
3025 case Builtin::BI__builtin_sincospil:
3026 if (Builder.getIsFPConstrained())
3027 break; // TODO: Emit constrained sincospi intrinsic once one exists.
3028 emitSincosBuiltin(*this, E, Intrinsic::sincospi);
3029 return RValue::get(nullptr);
3030
3031 case Builtin::BIsincos:
3032 case Builtin::BIsincosf:
3033 case Builtin::BIsincosl:
3034 case Builtin::BI__builtin_sincos:
3035 case Builtin::BI__builtin_sincosf:
3036 case Builtin::BI__builtin_sincosf16:
3037 case Builtin::BI__builtin_sincosl:
3038 case Builtin::BI__builtin_sincosf128:
3039 if (Builder.getIsFPConstrained())
3040 break; // TODO: Emit constrained sincos intrinsic once one exists.
3041 emitSincosBuiltin(*this, E, Intrinsic::sincos);
3042 return RValue::get(nullptr);
3043
3044 case Builtin::BIsqrt:
3045 case Builtin::BIsqrtf:
3046 case Builtin::BIsqrtl:
3047 case Builtin::BI__builtin_sqrt:
3048 case Builtin::BI__builtin_sqrtf:
3049 case Builtin::BI__builtin_sqrtf16:
3050 case Builtin::BI__builtin_sqrtl:
3051 case Builtin::BI__builtin_sqrtf128:
3052 case Builtin::BI__builtin_elementwise_sqrt: {
3054 *this, E, Intrinsic::sqrt, Intrinsic::experimental_constrained_sqrt);
3056 return RValue::get(Call);
3057 }
3058
3059 case Builtin::BItan:
3060 case Builtin::BItanf:
3061 case Builtin::BItanl:
3062 case Builtin::BI__builtin_tan:
3063 case Builtin::BI__builtin_tanf:
3064 case Builtin::BI__builtin_tanf16:
3065 case Builtin::BI__builtin_tanl:
3066 case Builtin::BI__builtin_tanf128:
3067 case Builtin::BI__builtin_elementwise_tan:
3069 *this, E, Intrinsic::tan, Intrinsic::experimental_constrained_tan));
3070
3071 case Builtin::BItanh:
3072 case Builtin::BItanhf:
3073 case Builtin::BItanhl:
3074 case Builtin::BI__builtin_tanh:
3075 case Builtin::BI__builtin_tanhf:
3076 case Builtin::BI__builtin_tanhf16:
3077 case Builtin::BI__builtin_tanhl:
3078 case Builtin::BI__builtin_tanhf128:
3079 case Builtin::BI__builtin_elementwise_tanh:
3081 *this, E, Intrinsic::tanh, Intrinsic::experimental_constrained_tanh));
3082
3083 case Builtin::BItrunc:
3084 case Builtin::BItruncf:
3085 case Builtin::BItruncl:
3086 case Builtin::BI__builtin_trunc:
3087 case Builtin::BI__builtin_truncf:
3088 case Builtin::BI__builtin_truncf16:
3089 case Builtin::BI__builtin_truncl:
3090 case Builtin::BI__builtin_truncf128:
3091 case Builtin::BI__builtin_elementwise_trunc:
3093 Intrinsic::trunc,
3094 Intrinsic::experimental_constrained_trunc));
3095
3096 case Builtin::BIlround:
3097 case Builtin::BIlroundf:
3098 case Builtin::BIlroundl:
3099 case Builtin::BI__builtin_lround:
3100 case Builtin::BI__builtin_lroundf:
3101 case Builtin::BI__builtin_lroundl:
3102 case Builtin::BI__builtin_lroundf128:
3104 *this, E, Intrinsic::lround,
3105 Intrinsic::experimental_constrained_lround));
3106
3107 case Builtin::BIllround:
3108 case Builtin::BIllroundf:
3109 case Builtin::BIllroundl:
3110 case Builtin::BI__builtin_llround:
3111 case Builtin::BI__builtin_llroundf:
3112 case Builtin::BI__builtin_llroundl:
3113 case Builtin::BI__builtin_llroundf128:
3115 *this, E, Intrinsic::llround,
3116 Intrinsic::experimental_constrained_llround));
3117
3118 case Builtin::BIlrint:
3119 case Builtin::BIlrintf:
3120 case Builtin::BIlrintl:
3121 case Builtin::BI__builtin_lrint:
3122 case Builtin::BI__builtin_lrintf:
3123 case Builtin::BI__builtin_lrintl:
3124 case Builtin::BI__builtin_lrintf128:
3126 *this, E, Intrinsic::lrint,
3127 Intrinsic::experimental_constrained_lrint));
3128
3129 case Builtin::BIllrint:
3130 case Builtin::BIllrintf:
3131 case Builtin::BIllrintl:
3132 case Builtin::BI__builtin_llrint:
3133 case Builtin::BI__builtin_llrintf:
3134 case Builtin::BI__builtin_llrintl:
3135 case Builtin::BI__builtin_llrintf128:
3137 *this, E, Intrinsic::llrint,
3138 Intrinsic::experimental_constrained_llrint));
3139 case Builtin::BI__builtin_ldexp:
3140 case Builtin::BI__builtin_ldexpf:
3141 case Builtin::BI__builtin_ldexpl:
3142 case Builtin::BI__builtin_ldexpf16:
3143 case Builtin::BI__builtin_ldexpf128:
3144 case Builtin::BI__builtin_elementwise_ldexp:
3146 *this, E, Intrinsic::ldexp,
3147 Intrinsic::experimental_constrained_ldexp));
3148 default:
3149 break;
3150 }
3151 }
3152
3153 // Check NonnullAttribute/NullabilityArg and Alignment.
3154 auto EmitArgCheck = [&](TypeCheckKind Kind, Address A, const Expr *Arg,
3155 unsigned ParmNum) {
3156 Value *Val = A.emitRawPointer(*this);
3157 EmitNonNullArgCheck(RValue::get(Val), Arg->getType(), Arg->getExprLoc(), FD,
3158 ParmNum);
3159
3160 if (SanOpts.has(SanitizerKind::Alignment)) {
3161 SanitizerSet SkippedChecks;
3162 SkippedChecks.set(SanitizerKind::All);
3163 SkippedChecks.clear(SanitizerKind::Alignment);
3164 SourceLocation Loc = Arg->getExprLoc();
3165 // Strip an implicit cast.
3166 if (auto *CE = dyn_cast<ImplicitCastExpr>(Arg))
3167 if (CE->getCastKind() == CK_BitCast)
3168 Arg = CE->getSubExpr();
3169 EmitTypeCheck(Kind, Loc, Val, Arg->getType(), A.getAlignment(),
3170 SkippedChecks);
3171 }
3172 };
3173
3174 switch (BuiltinIDIfNoAsmLabel) {
3175 default: break;
3176 case Builtin::BI__builtin___CFStringMakeConstantString:
3177 case Builtin::BI__builtin___NSStringMakeConstantString:
3178 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
3179 case Builtin::BI__builtin_stdarg_start:
3180 case Builtin::BI__builtin_va_start:
3181 case Builtin::BI__va_start:
3182 case Builtin::BI__builtin_c23_va_start:
3183 case Builtin::BI__builtin_va_end:
3184 EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
3185 ? EmitScalarExpr(E->getArg(0))
3186 : EmitVAListRef(E->getArg(0)).emitRawPointer(*this),
3187 BuiltinID != Builtin::BI__builtin_va_end);
3188 return RValue::get(nullptr);
3189 case Builtin::BI__builtin_va_copy: {
3190 Value *DstPtr = EmitVAListRef(E->getArg(0)).emitRawPointer(*this);
3191 Value *SrcPtr = EmitVAListRef(E->getArg(1)).emitRawPointer(*this);
3192 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy, {DstPtr->getType()}),
3193 {DstPtr, SrcPtr});
3194 return RValue::get(nullptr);
3195 }
3196 case Builtin::BIabs:
3197 case Builtin::BIlabs:
3198 case Builtin::BIllabs:
3199 case Builtin::BI__builtin_abs:
3200 case Builtin::BI__builtin_labs:
3201 case Builtin::BI__builtin_llabs: {
3202 bool SanitizeOverflow = SanOpts.has(SanitizerKind::SignedIntegerOverflow);
3203
3204 Value *Result;
3205 switch (getLangOpts().getSignedOverflowBehavior()) {
3207 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), false);
3208 break;
3210 if (!SanitizeOverflow) {
3211 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), true);
3212 break;
3213 }
3214 [[fallthrough]];
3216 // TODO: Somehow handle the corner case when the address of abs is taken.
3217 Result = EmitOverflowCheckedAbs(*this, E, SanitizeOverflow);
3218 break;
3219 }
3220 return RValue::get(Result);
3221 }
3222 case Builtin::BI__builtin_complex: {
3223 Value *Real = EmitScalarExpr(E->getArg(0));
3224 Value *Imag = EmitScalarExpr(E->getArg(1));
3225 return RValue::getComplex({Real, Imag});
3226 }
3227 case Builtin::BI__builtin_conj:
3228 case Builtin::BI__builtin_conjf:
3229 case Builtin::BI__builtin_conjl:
3230 case Builtin::BIconj:
3231 case Builtin::BIconjf:
3232 case Builtin::BIconjl: {
3233 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3234 Value *Real = ComplexVal.first;
3235 Value *Imag = ComplexVal.second;
3236 Imag = Builder.CreateFNeg(Imag, "neg");
3237 return RValue::getComplex(std::make_pair(Real, Imag));
3238 }
3239 case Builtin::BI__builtin_creal:
3240 case Builtin::BI__builtin_crealf:
3241 case Builtin::BI__builtin_creall:
3242 case Builtin::BIcreal:
3243 case Builtin::BIcrealf:
3244 case Builtin::BIcreall: {
3245 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3246 return RValue::get(ComplexVal.first);
3247 }
3248
3249 case Builtin::BI__builtin_preserve_access_index: {
3250 // Only enabled preserved access index region when debuginfo
3251 // is available as debuginfo is needed to preserve user-level
3252 // access pattern.
3253 if (!getDebugInfo()) {
3254 CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
3255 return RValue::get(EmitScalarExpr(E->getArg(0)));
3256 }
3257
3258 // Nested builtin_preserve_access_index() not supported
3260 CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
3261 return RValue::get(EmitScalarExpr(E->getArg(0)));
3262 }
3263
3264 IsInPreservedAIRegion = true;
3265 Value *Res = EmitScalarExpr(E->getArg(0));
3266 IsInPreservedAIRegion = false;
3267 return RValue::get(Res);
3268 }
3269
3270 case Builtin::BI__builtin_cimag:
3271 case Builtin::BI__builtin_cimagf:
3272 case Builtin::BI__builtin_cimagl:
3273 case Builtin::BIcimag:
3274 case Builtin::BIcimagf:
3275 case Builtin::BIcimagl: {
3276 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3277 return RValue::get(ComplexVal.second);
3278 }
3279
3280 case Builtin::BI__builtin_clrsb:
3281 case Builtin::BI__builtin_clrsbl:
3282 case Builtin::BI__builtin_clrsbll: {
3283 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
3284 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3285
3286 llvm::Type *ArgType = ArgValue->getType();
3287 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3288
3289 llvm::Type *ResultType = ConvertType(E->getType());
3290 Value *Zero = llvm::Constant::getNullValue(ArgType);
3291 Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
3292 Value *Inverse = Builder.CreateNot(ArgValue, "not");
3293 Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
3294 Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
3295 Value *Result =
3296 Builder.CreateNUWSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
3297 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3298 "cast");
3299 return RValue::get(Result);
3300 }
3301 case Builtin::BI__builtin_ctzs:
3302 case Builtin::BI__builtin_ctz:
3303 case Builtin::BI__builtin_ctzl:
3304 case Builtin::BI__builtin_ctzll:
3305 case Builtin::BI__builtin_ctzg:
3306 case Builtin::BI__builtin_elementwise_ctzg: {
3307 bool HasFallback =
3308 (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_ctzg ||
3309 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_ctzg) &&
3310 E->getNumArgs() > 1;
3311
3312 Value *ArgValue =
3313 HasFallback ? EmitBitCountExpr(*this, E->getArg(0))
3315
3316 llvm::Type *ArgType = ArgValue->getType();
3317 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
3318
3319 llvm::Type *ResultType = ConvertType(E->getType());
3320 // The elementwise builtins always exhibit zero-is-undef behaviour
3321 Value *ZeroUndef = Builder.getInt1(
3322 HasFallback || getTarget().isCLZForZeroUndef() ||
3323 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_ctzg);
3324 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
3325 if (Result->getType() != ResultType)
3326 Result =
3327 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3328 if (!HasFallback)
3329 return RValue::get(Result);
3330
3331 Value *Zero = Constant::getNullValue(ArgType);
3332 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3333 Value *FallbackValue = EmitScalarExpr(E->getArg(1));
3334 Value *ResultOrFallback =
3335 Builder.CreateSelect(IsZero, FallbackValue, Result, "ctzg");
3336 return RValue::get(ResultOrFallback);
3337 }
3338 case Builtin::BI__builtin_clzs:
3339 case Builtin::BI__builtin_clz:
3340 case Builtin::BI__builtin_clzl:
3341 case Builtin::BI__builtin_clzll:
3342 case Builtin::BI__builtin_clzg:
3343 case Builtin::BI__builtin_elementwise_clzg: {
3344 bool HasFallback =
3345 (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_clzg ||
3346 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_clzg) &&
3347 E->getNumArgs() > 1;
3348
3349 Value *ArgValue =
3350 HasFallback ? EmitBitCountExpr(*this, E->getArg(0))
3352
3353 llvm::Type *ArgType = ArgValue->getType();
3354 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3355
3356 llvm::Type *ResultType = ConvertType(E->getType());
3357 // The elementwise builtins always exhibit zero-is-undef behaviour
3358 Value *ZeroUndef = Builder.getInt1(
3359 HasFallback || getTarget().isCLZForZeroUndef() ||
3360 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_clzg);
3361 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
3362 if (Result->getType() != ResultType)
3363 Result =
3364 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3365 if (!HasFallback)
3366 return RValue::get(Result);
3367
3368 Value *Zero = Constant::getNullValue(ArgType);
3369 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3370 Value *FallbackValue = EmitScalarExpr(E->getArg(1));
3371 Value *ResultOrFallback =
3372 Builder.CreateSelect(IsZero, FallbackValue, Result, "clzg");
3373 return RValue::get(ResultOrFallback);
3374 }
3375 case Builtin::BI__builtin_ffs:
3376 case Builtin::BI__builtin_ffsl:
3377 case Builtin::BI__builtin_ffsll: {
3378 // ffs(x) -> x ? cttz(x) + 1 : 0
3379 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3380
3381 llvm::Type *ArgType = ArgValue->getType();
3382 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
3383
3384 llvm::Type *ResultType = ConvertType(E->getType());
3385 Value *Tmp =
3386 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
3387 llvm::ConstantInt::get(ArgType, 1));
3388 Value *Zero = llvm::Constant::getNullValue(ArgType);
3389 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3390 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
3391 if (Result->getType() != ResultType)
3392 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3393 "cast");
3394 return RValue::get(Result);
3395 }
3396 case Builtin::BI__builtin_parity:
3397 case Builtin::BI__builtin_parityl:
3398 case Builtin::BI__builtin_parityll: {
3399 // parity(x) -> ctpop(x) & 1
3400 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3401
3402 llvm::Type *ArgType = ArgValue->getType();
3403 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3404
3405 llvm::Type *ResultType = ConvertType(E->getType());
3406 Value *Tmp = Builder.CreateCall(F, ArgValue);
3407 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
3408 if (Result->getType() != ResultType)
3409 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3410 "cast");
3411 return RValue::get(Result);
3412 }
3413 case Builtin::BI__lzcnt16:
3414 case Builtin::BI__lzcnt:
3415 case Builtin::BI__lzcnt64: {
3416 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3417
3418 llvm::Type *ArgType = ArgValue->getType();
3419 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3420
3421 llvm::Type *ResultType = ConvertType(E->getType());
3422 Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
3423 if (Result->getType() != ResultType)
3424 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3425 "cast");
3426 return RValue::get(Result);
3427 }
3428 case Builtin::BI__popcnt16:
3429 case Builtin::BI__popcnt:
3430 case Builtin::BI__popcnt64:
3431 case Builtin::BI__builtin_popcount:
3432 case Builtin::BI__builtin_popcountl:
3433 case Builtin::BI__builtin_popcountll:
3434 case Builtin::BI__builtin_popcountg: {
3435 Value *ArgValue = EmitBitCountExpr(*this, E->getArg(0));
3436
3437 llvm::Type *ArgType = ArgValue->getType();
3438 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3439
3440 llvm::Type *ResultType = ConvertType(E->getType());
3441 Value *Result = Builder.CreateCall(F, ArgValue);
3442 if (Result->getType() != ResultType)
3443 Result =
3444 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3445 return RValue::get(Result);
3446 }
3447 case Builtin::BI__builtin_unpredictable: {
3448 // Always return the argument of __builtin_unpredictable. LLVM does not
3449 // handle this builtin. Metadata for this builtin should be added directly
3450 // to instructions such as branches or switches that use it.
3451 return RValue::get(EmitScalarExpr(E->getArg(0)));
3452 }
3453 case Builtin::BI__builtin_expect: {
3454 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3455 llvm::Type *ArgType = ArgValue->getType();
3456
3457 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
3458 // Don't generate llvm.expect on -O0 as the backend won't use it for
3459 // anything.
3460 // Note, we still IRGen ExpectedValue because it could have side-effects.
3461 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3462 return RValue::get(ArgValue);
3463
3464 Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
3465 Value *Result =
3466 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
3467 return RValue::get(Result);
3468 }
3469 case Builtin::BI__builtin_expect_with_probability: {
3470 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3471 llvm::Type *ArgType = ArgValue->getType();
3472
3473 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
3474 llvm::APFloat Probability(0.0);
3475 const Expr *ProbArg = E->getArg(2);
3476 bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
3477 assert(EvalSucceed && "probability should be able to evaluate as float");
3478 (void)EvalSucceed;
3479 bool LoseInfo = false;
3480 Probability.convert(llvm::APFloat::IEEEdouble(),
3481 llvm::RoundingMode::Dynamic, &LoseInfo);
3482 llvm::Type *Ty = ConvertType(ProbArg->getType());
3483 Constant *Confidence = ConstantFP::get(Ty, Probability);
3484 // Don't generate llvm.expect.with.probability on -O0 as the backend
3485 // won't use it for anything.
3486 // Note, we still IRGen ExpectedValue because it could have side-effects.
3487 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3488 return RValue::get(ArgValue);
3489
3490 Function *FnExpect =
3491 CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
3492 Value *Result = Builder.CreateCall(
3493 FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
3494 return RValue::get(Result);
3495 }
3496 case Builtin::BI__builtin_assume_aligned: {
3497 const Expr *Ptr = E->getArg(0);
3498 Value *PtrValue = EmitScalarExpr(Ptr);
3499 Value *OffsetValue =
3500 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
3501
3502 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
3503 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
3504 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
3505 AlignmentCI = ConstantInt::get(AlignmentCI->getIntegerType(),
3506 llvm::Value::MaximumAlignment);
3507
3508 emitAlignmentAssumption(PtrValue, Ptr,
3509 /*The expr loc is sufficient.*/ SourceLocation(),
3510 AlignmentCI, OffsetValue);
3511 return RValue::get(PtrValue);
3512 }
3513 case Builtin::BI__builtin_assume_dereferenceable: {
3514 const Expr *Ptr = E->getArg(0);
3515 const Expr *Size = E->getArg(1);
3516 Value *PtrValue = EmitScalarExpr(Ptr);
3517 Value *SizeValue = EmitScalarExpr(Size);
3518 if (SizeValue->getType() != IntPtrTy)
3519 SizeValue =
3520 Builder.CreateIntCast(SizeValue, IntPtrTy, false, "casted.size");
3521 Builder.CreateDereferenceableAssumption(PtrValue, SizeValue);
3522 return RValue::get(nullptr);
3523 }
3524 case Builtin::BI__assume:
3525 case Builtin::BI__builtin_assume: {
3526 if (E->getArg(0)->HasSideEffects(getContext()))
3527 return RValue::get(nullptr);
3528
3529 Value *ArgValue = EmitCheckedArgForAssume(E->getArg(0));
3530 Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
3531 Builder.CreateCall(FnAssume, ArgValue);
3532 return RValue::get(nullptr);
3533 }
3534 case Builtin::BI__builtin_assume_separate_storage: {
3535 const Expr *Arg0 = E->getArg(0);
3536 const Expr *Arg1 = E->getArg(1);
3537
3538 Value *Value0 = EmitScalarExpr(Arg0);
3539 Value *Value1 = EmitScalarExpr(Arg1);
3540
3541 Value *Values[] = {Value0, Value1};
3542 OperandBundleDefT<Value *> OBD("separate_storage", Values);
3543 Builder.CreateAssumption(ConstantInt::getTrue(getLLVMContext()), {OBD});
3544 return RValue::get(nullptr);
3545 }
3546 case Builtin::BI__builtin_allow_runtime_check: {
3547 StringRef Kind =
3548 cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts())->getString();
3549 LLVMContext &Ctx = CGM.getLLVMContext();
3550 llvm::Value *Allow = Builder.CreateCall(
3551 CGM.getIntrinsic(Intrinsic::allow_runtime_check),
3552 llvm::MetadataAsValue::get(Ctx, llvm::MDString::get(Ctx, Kind)));
3553 return RValue::get(Allow);
3554 }
3555 case Builtin::BI__builtin_allow_sanitize_check: {
3556 Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
3557 StringRef Name =
3558 cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts())->getString();
3559
3560 // We deliberately allow the use of kernel- and non-kernel names
3561 // interchangably, even when one or the other is enabled. This is consistent
3562 // with the no_sanitize-attribute, which allows either kernel- or non-kernel
3563 // name to disable instrumentation (see CodeGenFunction::StartFunction).
3564 if (getLangOpts().Sanitize.hasOneOf(SanitizerKind::Address |
3565 SanitizerKind::KernelAddress) &&
3566 (Name == "address" || Name == "kernel-address")) {
3567 IntrID = Intrinsic::allow_sanitize_address;
3568 } else if (getLangOpts().Sanitize.has(SanitizerKind::Thread) &&
3569 Name == "thread") {
3570 IntrID = Intrinsic::allow_sanitize_thread;
3571 } else if (getLangOpts().Sanitize.hasOneOf(SanitizerKind::Memory |
3572 SanitizerKind::KernelMemory) &&
3573 (Name == "memory" || Name == "kernel-memory")) {
3574 IntrID = Intrinsic::allow_sanitize_memory;
3575 } else if (getLangOpts().Sanitize.hasOneOf(
3576 SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress) &&
3577 (Name == "hwaddress" || Name == "kernel-hwaddress")) {
3578 IntrID = Intrinsic::allow_sanitize_hwaddress;
3579 }
3580
3581 if (IntrID != Intrinsic::not_intrinsic) {
3582 llvm::Value *Allow = Builder.CreateCall(CGM.getIntrinsic(IntrID));
3583 return RValue::get(Allow);
3584 }
3585 // If the checked sanitizer is not enabled, we can safely lower to false
3586 // right away. This is also more efficient, since the LowerAllowCheckPass
3587 // must not always be enabled if none of the above sanitizers are enabled.
3588 return RValue::get(Builder.getFalse());
3589 }
3590 case Builtin::BI__arithmetic_fence: {
3591 // Create the builtin call if FastMath is selected, and the target
3592 // supports the builtin, otherwise just return the argument.
3593 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3594 llvm::FastMathFlags FMF = Builder.getFastMathFlags();
3595 bool isArithmeticFenceEnabled =
3596 FMF.allowReassoc() &&
3598 QualType ArgType = E->getArg(0)->getType();
3599 if (ArgType->isComplexType()) {
3600 if (isArithmeticFenceEnabled) {
3601 QualType ElementType = ArgType->castAs<ComplexType>()->getElementType();
3602 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3603 Value *Real = Builder.CreateArithmeticFence(ComplexVal.first,
3604 ConvertType(ElementType));
3605 Value *Imag = Builder.CreateArithmeticFence(ComplexVal.second,
3606 ConvertType(ElementType));
3607 return RValue::getComplex(std::make_pair(Real, Imag));
3608 }
3609 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3610 Value *Real = ComplexVal.first;
3611 Value *Imag = ComplexVal.second;
3612 return RValue::getComplex(std::make_pair(Real, Imag));
3613 }
3614 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3615 if (isArithmeticFenceEnabled)
3616 return RValue::get(
3617 Builder.CreateArithmeticFence(ArgValue, ConvertType(ArgType)));
3618 return RValue::get(ArgValue);
3619 }
3620 case Builtin::BI__builtin_bswapg: {
3621 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3622 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(ArgValue->getType());
3623 assert(IntTy && "LLVM's __builtin_bswapg only supports integer variants");
3624 if (IntTy->getBitWidth() == 1 || IntTy->getBitWidth() == 8)
3625 return RValue::get(ArgValue);
3626 assert(((IntTy->getBitWidth() % 16 == 0 && IntTy->getBitWidth() != 0)) &&
3627 "LLVM's __builtin_bswapg only supports integer variants that has a "
3628 "multiple of 16 bits as well as a single byte");
3629 return RValue::get(
3630 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bswap));
3631 }
3632 case Builtin::BI__builtin_bswap16:
3633 case Builtin::BI__builtin_bswap32:
3634 case Builtin::BI__builtin_bswap64:
3635 case Builtin::BI_byteswap_ushort:
3636 case Builtin::BI_byteswap_ulong:
3637 case Builtin::BI_byteswap_uint64: {
3638 return RValue::get(
3639 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bswap));
3640 }
3641 case Builtin::BI__builtin_bitreverse8:
3642 case Builtin::BI__builtin_bitreverse16:
3643 case Builtin::BI__builtin_bitreverse32:
3644 case Builtin::BI__builtin_bitreverse64: {
3645 return RValue::get(
3646 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bitreverse));
3647 }
3648 case Builtin::BI__builtin_rotateleft8:
3649 case Builtin::BI__builtin_rotateleft16:
3650 case Builtin::BI__builtin_rotateleft32:
3651 case Builtin::BI__builtin_rotateleft64:
3652 case Builtin::BI_rotl8: // Microsoft variants of rotate left
3653 case Builtin::BI_rotl16:
3654 case Builtin::BI_rotl:
3655 case Builtin::BI_lrotl:
3656 case Builtin::BI_rotl64:
3657 return emitRotate(E, false);
3658
3659 case Builtin::BI__builtin_rotateright8:
3660 case Builtin::BI__builtin_rotateright16:
3661 case Builtin::BI__builtin_rotateright32:
3662 case Builtin::BI__builtin_rotateright64:
3663 case Builtin::BI_rotr8: // Microsoft variants of rotate right
3664 case Builtin::BI_rotr16:
3665 case Builtin::BI_rotr:
3666 case Builtin::BI_lrotr:
3667 case Builtin::BI_rotr64:
3668 return emitRotate(E, true);
3669
3670 case Builtin::BI__builtin_constant_p: {
3671 llvm::Type *ResultType = ConvertType(E->getType());
3672
3673 const Expr *Arg = E->getArg(0);
3674 QualType ArgType = Arg->getType();
3675 // FIXME: The allowance for Obj-C pointers and block pointers is historical
3676 // and likely a mistake.
3677 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
3678 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
3679 // Per the GCC documentation, only numeric constants are recognized after
3680 // inlining.
3681 return RValue::get(ConstantInt::get(ResultType, 0));
3682
3683 if (Arg->HasSideEffects(getContext()))
3684 // The argument is unevaluated, so be conservative if it might have
3685 // side-effects.
3686 return RValue::get(ConstantInt::get(ResultType, 0));
3687
3688 Value *ArgValue = EmitScalarExpr(Arg);
3689 if (ArgType->isObjCObjectPointerType()) {
3690 // Convert Objective-C objects to id because we cannot distinguish between
3691 // LLVM types for Obj-C classes as they are opaque.
3692 ArgType = CGM.getContext().getObjCIdType();
3693 ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
3694 }
3695 Function *F =
3696 CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
3697 Value *Result = Builder.CreateCall(F, ArgValue);
3698 if (Result->getType() != ResultType)
3699 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
3700 return RValue::get(Result);
3701 }
3702 case Builtin::BI__builtin_dynamic_object_size:
3703 case Builtin::BI__builtin_object_size: {
3704 unsigned Type =
3705 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
3706 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
3707
3708 // We pass this builtin onto the optimizer so that it can figure out the
3709 // object size in more complex cases.
3710 bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
3711 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
3712 /*EmittedE=*/nullptr, IsDynamic));
3713 }
3714 case Builtin::BI__builtin_counted_by_ref: {
3715 // Default to returning '(void *) 0'.
3716 llvm::Value *Result = llvm::ConstantPointerNull::get(
3717 llvm::PointerType::getUnqual(getLLVMContext()));
3718
3719 const Expr *Arg = E->getArg(0)->IgnoreParenImpCasts();
3720
3721 if (auto *UO = dyn_cast<UnaryOperator>(Arg);
3722 UO && UO->getOpcode() == UO_AddrOf) {
3723 Arg = UO->getSubExpr()->IgnoreParenImpCasts();
3724
3725 if (auto *ASE = dyn_cast<ArraySubscriptExpr>(Arg))
3726 Arg = ASE->getBase()->IgnoreParenImpCasts();
3727 }
3728
3729 if (const MemberExpr *ME = dyn_cast_if_present<MemberExpr>(Arg)) {
3730 if (auto *CATy =
3732 CATy && CATy->getKind() == CountAttributedType::CountedBy) {
3733 const auto *MemberDecl = cast<FieldDecl>(ME->getMemberDecl());
3734 if (const FieldDecl *CountFD = MemberDecl->findCountedByField())
3735 Result = GetCountedByFieldExprGEP(Arg, MemberDecl, CountFD);
3736 else
3737 llvm::report_fatal_error("Cannot find the counted_by 'count' field");
3738 }
3739 }
3740
3741 return RValue::get(Result);
3742 }
3743 case Builtin::BI__builtin_prefetch: {
3744 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
3745 // FIXME: Technically these constants should of type 'int', yes?
3746 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
3747 llvm::ConstantInt::get(Int32Ty, 0);
3748 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
3749 llvm::ConstantInt::get(Int32Ty, 3);
3750 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
3751 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
3752 Builder.CreateCall(F, {Address, RW, Locality, Data});
3753 return RValue::get(nullptr);
3754 }
3755 case Builtin::BI__builtin_readcyclecounter: {
3756 Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
3757 return RValue::get(Builder.CreateCall(F));
3758 }
3759 case Builtin::BI__builtin_readsteadycounter: {
3760 Function *F = CGM.getIntrinsic(Intrinsic::readsteadycounter);
3761 return RValue::get(Builder.CreateCall(F));
3762 }
3763 case Builtin::BI__builtin___clear_cache: {
3764 Value *Begin = EmitScalarExpr(E->getArg(0));
3765 Value *End = EmitScalarExpr(E->getArg(1));
3766 Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
3767 return RValue::get(Builder.CreateCall(F, {Begin, End}));
3768 }
3769 case Builtin::BI__builtin_trap:
3770 EmitTrapCall(Intrinsic::trap);
3771 return RValue::get(nullptr);
3772 case Builtin::BI__builtin_verbose_trap: {
3773 llvm::DILocation *TrapLocation = Builder.getCurrentDebugLocation();
3774 if (getDebugInfo()) {
3775 TrapLocation = getDebugInfo()->CreateTrapFailureMessageFor(
3776 TrapLocation, *E->getArg(0)->tryEvaluateString(getContext()),
3778 }
3779 ApplyDebugLocation ApplyTrapDI(*this, TrapLocation);
3780 // Currently no attempt is made to prevent traps from being merged.
3781 EmitTrapCall(Intrinsic::trap);
3782 return RValue::get(nullptr);
3783 }
3784 case Builtin::BI__debugbreak:
3785 EmitTrapCall(Intrinsic::debugtrap);
3786 return RValue::get(nullptr);
3787 case Builtin::BI__builtin_unreachable: {
3789
3790 // We do need to preserve an insertion point.
3791 EmitBlock(createBasicBlock("unreachable.cont"));
3792
3793 return RValue::get(nullptr);
3794 }
3795
3796 case Builtin::BI__builtin_powi:
3797 case Builtin::BI__builtin_powif:
3798 case Builtin::BI__builtin_powil: {
3799 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
3800 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
3801
3802 if (Builder.getIsFPConstrained()) {
3803 // FIXME: llvm.powi has 2 mangling types,
3804 // llvm.experimental.constrained.powi has one.
3805 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3806 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi,
3807 Src0->getType());
3808 return RValue::get(Builder.CreateConstrainedFPCall(F, { Src0, Src1 }));
3809 }
3810
3811 Function *F = CGM.getIntrinsic(Intrinsic::powi,
3812 { Src0->getType(), Src1->getType() });
3813 return RValue::get(Builder.CreateCall(F, { Src0, Src1 }));
3814 }
3815 case Builtin::BI__builtin_frexpl: {
3816 // Linux PPC will not be adding additional PPCDoubleDouble support.
3817 // WIP to switch default to IEEE long double. Will emit libcall for
3818 // frexpl instead of legalizing this type in the BE.
3819 if (&getTarget().getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble())
3820 break;
3821 [[fallthrough]];
3822 }
3823 case Builtin::BI__builtin_frexp:
3824 case Builtin::BI__builtin_frexpf:
3825 case Builtin::BI__builtin_frexpf128:
3826 case Builtin::BI__builtin_frexpf16:
3827 return RValue::get(emitFrexpBuiltin(*this, E, Intrinsic::frexp));
3828 case Builtin::BImodf:
3829 case Builtin::BImodff:
3830 case Builtin::BImodfl:
3831 case Builtin::BI__builtin_modf:
3832 case Builtin::BI__builtin_modff:
3833 case Builtin::BI__builtin_modfl:
3834 if (Builder.getIsFPConstrained())
3835 break; // TODO: Emit constrained modf intrinsic once one exists.
3836 return RValue::get(emitModfBuiltin(*this, E, Intrinsic::modf));
3837 case Builtin::BI__builtin_isgreater:
3838 case Builtin::BI__builtin_isgreaterequal:
3839 case Builtin::BI__builtin_isless:
3840 case Builtin::BI__builtin_islessequal:
3841 case Builtin::BI__builtin_islessgreater:
3842 case Builtin::BI__builtin_isunordered: {
3843 // Ordered comparisons: we know the arguments to these are matching scalar
3844 // floating point values.
3845 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3846 Value *LHS = EmitScalarExpr(E->getArg(0));
3847 Value *RHS = EmitScalarExpr(E->getArg(1));
3848
3849 switch (BuiltinID) {
3850 default: llvm_unreachable("Unknown ordered comparison");
3851 case Builtin::BI__builtin_isgreater:
3852 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
3853 break;
3854 case Builtin::BI__builtin_isgreaterequal:
3855 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
3856 break;
3857 case Builtin::BI__builtin_isless:
3858 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
3859 break;
3860 case Builtin::BI__builtin_islessequal:
3861 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
3862 break;
3863 case Builtin::BI__builtin_islessgreater:
3864 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
3865 break;
3866 case Builtin::BI__builtin_isunordered:
3867 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
3868 break;
3869 }
3870 // ZExt bool to int type.
3871 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
3872 }
3873
3874 case Builtin::BI__builtin_isnan: {
3875 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3876 Value *V = EmitScalarExpr(E->getArg(0));
3877 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3878 return RValue::get(Result);
3879 return RValue::get(
3880 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNan),
3881 ConvertType(E->getType())));
3882 }
3883
3884 case Builtin::BI__builtin_issignaling: {
3885 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3886 Value *V = EmitScalarExpr(E->getArg(0));
3887 return RValue::get(
3888 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSNan),
3889 ConvertType(E->getType())));
3890 }
3891
3892 case Builtin::BI__builtin_isinf: {
3893 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3894 Value *V = EmitScalarExpr(E->getArg(0));
3895 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3896 return RValue::get(Result);
3897 return RValue::get(
3898 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcInf),
3899 ConvertType(E->getType())));
3900 }
3901
3902 case Builtin::BIfinite:
3903 case Builtin::BI__finite:
3904 case Builtin::BIfinitef:
3905 case Builtin::BI__finitef:
3906 case Builtin::BIfinitel:
3907 case Builtin::BI__finitel:
3908 case Builtin::BI__builtin_isfinite: {
3909 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3910 Value *V = EmitScalarExpr(E->getArg(0));
3911 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3912 return RValue::get(Result);
3913 return RValue::get(
3914 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcFinite),
3915 ConvertType(E->getType())));
3916 }
3917
3918 case Builtin::BI__builtin_isnormal: {
3919 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3920 Value *V = EmitScalarExpr(E->getArg(0));
3921 return RValue::get(
3922 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNormal),
3923 ConvertType(E->getType())));
3924 }
3925
3926 case Builtin::BI__builtin_issubnormal: {
3927 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3928 Value *V = EmitScalarExpr(E->getArg(0));
3929 return RValue::get(
3930 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSubnormal),
3931 ConvertType(E->getType())));
3932 }
3933
3934 case Builtin::BI__builtin_iszero: {
3935 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3936 Value *V = EmitScalarExpr(E->getArg(0));
3937 return RValue::get(
3938 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcZero),
3939 ConvertType(E->getType())));
3940 }
3941
3942 case Builtin::BI__builtin_isfpclass: {
3944 if (!E->getArg(1)->EvaluateAsInt(Result, CGM.getContext()))
3945 break;
3946 uint64_t Test = Result.Val.getInt().getLimitedValue();
3947 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3948 Value *V = EmitScalarExpr(E->getArg(0));
3949 return RValue::get(Builder.CreateZExt(Builder.createIsFPClass(V, Test),
3950 ConvertType(E->getType())));
3951 }
3952
3953 case Builtin::BI__builtin_nondeterministic_value: {
3954 llvm::Type *Ty = ConvertType(E->getArg(0)->getType());
3955
3956 Value *Result = PoisonValue::get(Ty);
3957 Result = Builder.CreateFreeze(Result);
3958
3959 return RValue::get(Result);
3960 }
3961
3962 case Builtin::BI__builtin_elementwise_abs: {
3963 Value *Result;
3964 QualType QT = E->getArg(0)->getType();
3965
3966 if (auto *VecTy = QT->getAs<VectorType>())
3967 QT = VecTy->getElementType();
3968 if (QT->isIntegerType())
3969 Result = Builder.CreateBinaryIntrinsic(
3970 Intrinsic::abs, EmitScalarExpr(E->getArg(0)), Builder.getFalse(),
3971 nullptr, "elt.abs");
3972 else
3973 Result = emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::fabs,
3974 "elt.abs");
3975
3976 return RValue::get(Result);
3977 }
3978 case Builtin::BI__builtin_elementwise_bitreverse:
3980 *this, E, Intrinsic::bitreverse, "elt.bitreverse"));
3981 case Builtin::BI__builtin_elementwise_popcount:
3983 *this, E, Intrinsic::ctpop, "elt.ctpop"));
3984 case Builtin::BI__builtin_elementwise_canonicalize:
3986 *this, E, Intrinsic::canonicalize, "elt.canonicalize"));
3987 case Builtin::BI__builtin_elementwise_copysign:
3988 return RValue::get(
3989 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::copysign));
3990 case Builtin::BI__builtin_elementwise_fshl:
3991 return RValue::get(
3992 emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fshl));
3993 case Builtin::BI__builtin_elementwise_fshr:
3994 return RValue::get(
3995 emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fshr));
3996
3997 case Builtin::BI__builtin_elementwise_add_sat:
3998 case Builtin::BI__builtin_elementwise_sub_sat: {
3999 Value *Op0 = EmitScalarExpr(E->getArg(0));
4000 Value *Op1 = EmitScalarExpr(E->getArg(1));
4001 Value *Result;
4002 assert(Op0->getType()->isIntOrIntVectorTy() && "integer type expected");
4003 QualType Ty = E->getArg(0)->getType();
4004 if (auto *VecTy = Ty->getAs<VectorType>())
4005 Ty = VecTy->getElementType();
4006 bool IsSigned = Ty->isSignedIntegerType();
4007 unsigned Opc;
4008 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_add_sat)
4009 Opc = IsSigned ? Intrinsic::sadd_sat : Intrinsic::uadd_sat;
4010 else
4011 Opc = IsSigned ? Intrinsic::ssub_sat : Intrinsic::usub_sat;
4012 Result = Builder.CreateBinaryIntrinsic(Opc, Op0, Op1, nullptr, "elt.sat");
4013 return RValue::get(Result);
4014 }
4015
4016 case Builtin::BI__builtin_elementwise_max: {
4017 Value *Op0 = EmitScalarExpr(E->getArg(0));
4018 Value *Op1 = EmitScalarExpr(E->getArg(1));
4019 Value *Result;
4020 if (Op0->getType()->isIntOrIntVectorTy()) {
4021 QualType Ty = E->getArg(0)->getType();
4022 if (auto *VecTy = Ty->getAs<VectorType>())
4023 Ty = VecTy->getElementType();
4024 Result = Builder.CreateBinaryIntrinsic(
4025 Ty->isSignedIntegerType() ? Intrinsic::smax : Intrinsic::umax, Op0,
4026 Op1, nullptr, "elt.max");
4027 } else
4028 Result = Builder.CreateMaxNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.max");
4029 return RValue::get(Result);
4030 }
4031 case Builtin::BI__builtin_elementwise_min: {
4032 Value *Op0 = EmitScalarExpr(E->getArg(0));
4033 Value *Op1 = EmitScalarExpr(E->getArg(1));
4034 Value *Result;
4035 if (Op0->getType()->isIntOrIntVectorTy()) {
4036 QualType Ty = E->getArg(0)->getType();
4037 if (auto *VecTy = Ty->getAs<VectorType>())
4038 Ty = VecTy->getElementType();
4039 Result = Builder.CreateBinaryIntrinsic(
4040 Ty->isSignedIntegerType() ? Intrinsic::smin : Intrinsic::umin, Op0,
4041 Op1, nullptr, "elt.min");
4042 } else
4043 Result = Builder.CreateMinNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.min");
4044 return RValue::get(Result);
4045 }
4046
4047 case Builtin::BI__builtin_elementwise_maxnum: {
4048 Value *Op0 = EmitScalarExpr(E->getArg(0));
4049 Value *Op1 = EmitScalarExpr(E->getArg(1));
4050 Value *Result = Builder.CreateBinaryIntrinsic(llvm::Intrinsic::maxnum, Op0,
4051 Op1, nullptr, "elt.maxnum");
4052 return RValue::get(Result);
4053 }
4054
4055 case Builtin::BI__builtin_elementwise_minnum: {
4056 Value *Op0 = EmitScalarExpr(E->getArg(0));
4057 Value *Op1 = EmitScalarExpr(E->getArg(1));
4058 Value *Result = Builder.CreateBinaryIntrinsic(llvm::Intrinsic::minnum, Op0,
4059 Op1, nullptr, "elt.minnum");
4060 return RValue::get(Result);
4061 }
4062
4063 case Builtin::BI__builtin_elementwise_maximum: {
4064 Value *Op0 = EmitScalarExpr(E->getArg(0));
4065 Value *Op1 = EmitScalarExpr(E->getArg(1));
4066 Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::maximum, Op0, Op1,
4067 nullptr, "elt.maximum");
4068 return RValue::get(Result);
4069 }
4070
4071 case Builtin::BI__builtin_elementwise_minimum: {
4072 Value *Op0 = EmitScalarExpr(E->getArg(0));
4073 Value *Op1 = EmitScalarExpr(E->getArg(1));
4074 Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::minimum, Op0, Op1,
4075 nullptr, "elt.minimum");
4076 return RValue::get(Result);
4077 }
4078
4079 case Builtin::BI__builtin_elementwise_maximumnum: {
4080 Value *Op0 = EmitScalarExpr(E->getArg(0));
4081 Value *Op1 = EmitScalarExpr(E->getArg(1));
4082 Value *Result = Builder.CreateBinaryIntrinsic(
4083 Intrinsic::maximumnum, Op0, Op1, nullptr, "elt.maximumnum");
4084 return RValue::get(Result);
4085 }
4086
4087 case Builtin::BI__builtin_elementwise_minimumnum: {
4088 Value *Op0 = EmitScalarExpr(E->getArg(0));
4089 Value *Op1 = EmitScalarExpr(E->getArg(1));
4090 Value *Result = Builder.CreateBinaryIntrinsic(
4091 Intrinsic::minimumnum, Op0, Op1, nullptr, "elt.minimumnum");
4092 return RValue::get(Result);
4093 }
4094
4095 case Builtin::BI__builtin_reduce_max: {
4096 auto GetIntrinsicID = [this](QualType QT) {
4097 if (auto *VecTy = QT->getAs<VectorType>())
4098 QT = VecTy->getElementType();
4099 else if (QT->isSizelessVectorType())
4100 QT = QT->getSizelessVectorEltType(CGM.getContext());
4101
4102 if (QT->isSignedIntegerType())
4103 return Intrinsic::vector_reduce_smax;
4104 if (QT->isUnsignedIntegerType())
4105 return Intrinsic::vector_reduce_umax;
4106 assert(QT->isFloatingType() && "must have a float here");
4107 return Intrinsic::vector_reduce_fmax;
4108 };
4110 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
4111 }
4112
4113 case Builtin::BI__builtin_reduce_min: {
4114 auto GetIntrinsicID = [this](QualType QT) {
4115 if (auto *VecTy = QT->getAs<VectorType>())
4116 QT = VecTy->getElementType();
4117 else if (QT->isSizelessVectorType())
4118 QT = QT->getSizelessVectorEltType(CGM.getContext());
4119
4120 if (QT->isSignedIntegerType())
4121 return Intrinsic::vector_reduce_smin;
4122 if (QT->isUnsignedIntegerType())
4123 return Intrinsic::vector_reduce_umin;
4124 assert(QT->isFloatingType() && "must have a float here");
4125 return Intrinsic::vector_reduce_fmin;
4126 };
4127
4129 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
4130 }
4131
4132 case Builtin::BI__builtin_reduce_add:
4134 *this, E, Intrinsic::vector_reduce_add, "rdx.add"));
4135 case Builtin::BI__builtin_reduce_mul:
4137 *this, E, Intrinsic::vector_reduce_mul, "rdx.mul"));
4138 case Builtin::BI__builtin_reduce_xor:
4140 *this, E, Intrinsic::vector_reduce_xor, "rdx.xor"));
4141 case Builtin::BI__builtin_reduce_or:
4143 *this, E, Intrinsic::vector_reduce_or, "rdx.or"));
4144 case Builtin::BI__builtin_reduce_and:
4146 *this, E, Intrinsic::vector_reduce_and, "rdx.and"));
4147 case Builtin::BI__builtin_reduce_maximum:
4149 *this, E, Intrinsic::vector_reduce_fmaximum, "rdx.maximum"));
4150 case Builtin::BI__builtin_reduce_minimum:
4152 *this, E, Intrinsic::vector_reduce_fminimum, "rdx.minimum"));
4153
4154 case Builtin::BI__builtin_matrix_transpose: {
4155 auto *MatrixTy = E->getArg(0)->getType()->castAs<ConstantMatrixType>();
4156 Value *MatValue = EmitScalarExpr(E->getArg(0));
4157 MatrixBuilder MB(Builder);
4158 Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
4159 MatrixTy->getNumColumns());
4160 return RValue::get(Result);
4161 }
4162
4163 case Builtin::BI__builtin_matrix_column_major_load: {
4164 MatrixBuilder MB(Builder);
4165 // Emit everything that isn't dependent on the first parameter type
4166 Value *Stride = EmitScalarExpr(E->getArg(3));
4167 const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
4168 auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
4169 assert(PtrTy && "arg0 must be of pointer type");
4170 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
4171
4174 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4175 0);
4176 Value *Result = MB.CreateColumnMajorLoad(
4177 Src.getElementType(), Src.emitRawPointer(*this),
4178 Align(Src.getAlignment().getQuantity()), Stride, IsVolatile,
4179 ResultTy->getNumRows(), ResultTy->getNumColumns(), "matrix");
4180 return RValue::get(Result);
4181 }
4182
4183 case Builtin::BI__builtin_matrix_column_major_store: {
4184 MatrixBuilder MB(Builder);
4185 Value *Matrix = EmitScalarExpr(E->getArg(0));
4187 Value *Stride = EmitScalarExpr(E->getArg(2));
4188
4189 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
4190 auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
4191 assert(PtrTy && "arg1 must be of pointer type");
4192 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
4193
4195 E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
4196 0);
4197 Value *Result = MB.CreateColumnMajorStore(
4198 Matrix, Dst.emitRawPointer(*this),
4199 Align(Dst.getAlignment().getQuantity()), Stride, IsVolatile,
4200 MatrixTy->getNumRows(), MatrixTy->getNumColumns());
4202 return RValue::get(Result);
4203 }
4204
4205 case Builtin::BI__builtin_masked_load:
4206 case Builtin::BI__builtin_masked_expand_load: {
4207 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4208 llvm::Value *Ptr = EmitScalarExpr(E->getArg(1));
4209
4210 llvm::Type *RetTy = CGM.getTypes().ConvertType(E->getType());
4211 llvm::Value *PassThru = llvm::PoisonValue::get(RetTy);
4212 if (E->getNumArgs() > 2)
4213 PassThru = EmitScalarExpr(E->getArg(2));
4214
4215 CharUnits Align = CGM.getNaturalTypeAlignment(
4216 E->getType()->getAs<VectorType>()->getElementType(), nullptr);
4217
4218 llvm::Value *Result;
4219 if (BuiltinID == Builtin::BI__builtin_masked_load) {
4220 Result = Builder.CreateMaskedLoad(RetTy, Ptr, Align.getAsAlign(), Mask,
4221 PassThru, "masked_load");
4222 } else {
4223 Function *F = CGM.getIntrinsic(Intrinsic::masked_expandload, {RetTy});
4224 Result =
4225 Builder.CreateCall(F, {Ptr, Mask, PassThru}, "masked_expand_load");
4226 }
4227 return RValue::get(Result);
4228 };
4229 case Builtin::BI__builtin_masked_gather: {
4230 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4231 llvm::Value *Idx = EmitScalarExpr(E->getArg(1));
4232 llvm::Value *Ptr = EmitScalarExpr(E->getArg(2));
4233
4234 llvm::Type *RetTy = CGM.getTypes().ConvertType(E->getType());
4235 CharUnits Align = CGM.getNaturalTypeAlignment(
4236 E->getType()->getAs<VectorType>()->getElementType(), nullptr);
4237
4238 llvm::Value *PassThru = llvm::PoisonValue::get(RetTy);
4239 if (E->getNumArgs() > 3)
4240 PassThru = EmitScalarExpr(E->getArg(3));
4241
4242 llvm::Type *ElemTy = CGM.getTypes().ConvertType(
4244 llvm::Value *PtrVec = Builder.CreateGEP(ElemTy, Ptr, Idx);
4245
4246 llvm::Value *Result = Builder.CreateMaskedGather(
4247 RetTy, PtrVec, Align.getAsAlign(), Mask, PassThru, "masked_gather");
4248 return RValue::get(Result);
4249 }
4250 case Builtin::BI__builtin_masked_store:
4251 case Builtin::BI__builtin_masked_compress_store: {
4252 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4253 llvm::Value *Val = EmitScalarExpr(E->getArg(1));
4254 llvm::Value *Ptr = EmitScalarExpr(E->getArg(2));
4255
4256 QualType ValTy = E->getArg(1)->getType();
4257 llvm::Type *ValLLTy = CGM.getTypes().ConvertType(ValTy);
4258
4259 CharUnits Align = CGM.getNaturalTypeAlignment(
4261 nullptr);
4262
4263 if (BuiltinID == Builtin::BI__builtin_masked_store) {
4264 Builder.CreateMaskedStore(Val, Ptr, Align.getAsAlign(), Mask);
4265 } else {
4266 llvm::Function *F =
4267 CGM.getIntrinsic(llvm::Intrinsic::masked_compressstore, {ValLLTy});
4268 Builder.CreateCall(F, {Val, Ptr, Mask});
4269 }
4270 return RValue::get(nullptr);
4271 }
4272 case Builtin::BI__builtin_masked_scatter: {
4273 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4274 llvm::Value *Idx = EmitScalarExpr(E->getArg(1));
4275 llvm::Value *Val = EmitScalarExpr(E->getArg(2));
4276 llvm::Value *Ptr = EmitScalarExpr(E->getArg(3));
4277
4278 CharUnits Align = CGM.getNaturalTypeAlignment(
4280 nullptr);
4281
4282 llvm::Type *ElemTy = CGM.getTypes().ConvertType(
4283 E->getArg(1)->getType()->getAs<VectorType>()->getElementType());
4284 llvm::Value *PtrVec = Builder.CreateGEP(ElemTy, Ptr, Idx);
4285
4286 Builder.CreateMaskedScatter(Val, PtrVec, Align.getAsAlign(), Mask);
4287 return RValue();
4288 }
4289 case Builtin::BI__builtin_isinf_sign: {
4290 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
4291 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4292 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
4293 Value *Arg = EmitScalarExpr(E->getArg(0));
4294 Value *AbsArg = EmitFAbs(*this, Arg);
4295 Value *IsInf = Builder.CreateFCmpOEQ(
4296 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
4297 Value *IsNeg = EmitSignBit(*this, Arg);
4298
4299 llvm::Type *IntTy = ConvertType(E->getType());
4300 Value *Zero = Constant::getNullValue(IntTy);
4301 Value *One = ConstantInt::get(IntTy, 1);
4302 Value *NegativeOne = ConstantInt::getAllOnesValue(IntTy);
4303 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
4304 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
4305 return RValue::get(Result);
4306 }
4307
4308 case Builtin::BI__builtin_flt_rounds: {
4309 Function *F = CGM.getIntrinsic(Intrinsic::get_rounding);
4310
4311 llvm::Type *ResultType = ConvertType(E->getType());
4312 Value *Result = Builder.CreateCall(F);
4313 if (Result->getType() != ResultType)
4314 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
4315 "cast");
4316 return RValue::get(Result);
4317 }
4318
4319 case Builtin::BI__builtin_set_flt_rounds: {
4320 Function *F = CGM.getIntrinsic(Intrinsic::set_rounding);
4321
4322 Value *V = EmitScalarExpr(E->getArg(0));
4323 Builder.CreateCall(F, V);
4324 return RValue::get(nullptr);
4325 }
4326
4327 case Builtin::BI__builtin_fpclassify: {
4328 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4329 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
4330 Value *V = EmitScalarExpr(E->getArg(5));
4331 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
4332
4333 // Create Result
4334 BasicBlock *Begin = Builder.GetInsertBlock();
4335 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
4336 Builder.SetInsertPoint(End);
4337 PHINode *Result =
4338 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
4339 "fpclassify_result");
4340
4341 // if (V==0) return FP_ZERO
4342 Builder.SetInsertPoint(Begin);
4343 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
4344 "iszero");
4345 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
4346 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
4347 Builder.CreateCondBr(IsZero, End, NotZero);
4348 Result->addIncoming(ZeroLiteral, Begin);
4349
4350 // if (V != V) return FP_NAN
4351 Builder.SetInsertPoint(NotZero);
4352 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
4353 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
4354 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
4355 Builder.CreateCondBr(IsNan, End, NotNan);
4356 Result->addIncoming(NanLiteral, NotZero);
4357
4358 // if (fabs(V) == infinity) return FP_INFINITY
4359 Builder.SetInsertPoint(NotNan);
4360 Value *VAbs = EmitFAbs(*this, V);
4361 Value *IsInf =
4362 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
4363 "isinf");
4364 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
4365 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
4366 Builder.CreateCondBr(IsInf, End, NotInf);
4367 Result->addIncoming(InfLiteral, NotNan);
4368
4369 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
4370 Builder.SetInsertPoint(NotInf);
4371 APFloat Smallest = APFloat::getSmallestNormalized(
4372 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
4373 Value *IsNormal =
4374 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
4375 "isnormal");
4376 Value *NormalResult =
4377 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
4378 EmitScalarExpr(E->getArg(3)));
4379 Builder.CreateBr(End);
4380 Result->addIncoming(NormalResult, NotInf);
4381
4382 // return Result
4383 Builder.SetInsertPoint(End);
4384 return RValue::get(Result);
4385 }
4386
4387 // An alloca will always return a pointer to the alloca (stack) address
4388 // space. This address space need not be the same as the AST / Language
4389 // default (e.g. in C / C++ auto vars are in the generic address space). At
4390 // the AST level this is handled within CreateTempAlloca et al., but for the
4391 // builtin / dynamic alloca we have to handle it here. We use an explicit cast
4392 // instead of passing an AS to CreateAlloca so as to not inhibit optimisation.
4393 case Builtin::BIalloca:
4394 case Builtin::BI_alloca:
4395 case Builtin::BI__builtin_alloca_uninitialized:
4396 case Builtin::BI__builtin_alloca: {
4397 Value *Size = EmitScalarExpr(E->getArg(0));
4398 const TargetInfo &TI = getContext().getTargetInfo();
4399 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
4400 const Align SuitableAlignmentInBytes =
4401 CGM.getContext()
4402 .toCharUnitsFromBits(TI.getSuitableAlign())
4403 .getAsAlign();
4404 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
4405 AI->setAlignment(SuitableAlignmentInBytes);
4406 if (BuiltinID != Builtin::BI__builtin_alloca_uninitialized)
4407 initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
4410 if (AAS != EAS) {
4411 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
4412 return RValue::get(
4413 getTargetHooks().performAddrSpaceCast(*this, AI, AAS, Ty));
4414 }
4415 return RValue::get(AI);
4416 }
4417
4418 case Builtin::BI__builtin_alloca_with_align_uninitialized:
4419 case Builtin::BI__builtin_alloca_with_align: {
4420 Value *Size = EmitScalarExpr(E->getArg(0));
4421 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
4422 auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
4423 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
4424 const Align AlignmentInBytes =
4425 CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
4426 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
4427 AI->setAlignment(AlignmentInBytes);
4428 if (BuiltinID != Builtin::BI__builtin_alloca_with_align_uninitialized)
4429 initializeAlloca(*this, AI, Size, AlignmentInBytes);
4432 if (AAS != EAS) {
4433 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
4434 return RValue::get(
4435 getTargetHooks().performAddrSpaceCast(*this, AI, AAS, Ty));
4436 }
4437 return RValue::get(AI);
4438 }
4439
4440 case Builtin::BI__builtin_infer_alloc_token: {
4441 llvm::MDNode *MDN = buildAllocToken(E);
4442 llvm::Value *MDV = MetadataAsValue::get(getLLVMContext(), MDN);
4443 llvm::Function *F =
4444 CGM.getIntrinsic(llvm::Intrinsic::alloc_token_id, {IntPtrTy});
4445 llvm::CallBase *TokenID = Builder.CreateCall(F, MDV);
4446 return RValue::get(TokenID);
4447 }
4448
4449 case Builtin::BIbzero:
4450 case Builtin::BI__builtin_bzero: {
4452 Value *SizeVal = EmitScalarExpr(E->getArg(1));
4453 EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
4454 E->getArg(0)->getExprLoc(), FD, 0);
4455 auto *I = Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
4456 addInstToNewSourceAtom(I, nullptr);
4457 return RValue::get(nullptr);
4458 }
4459
4460 case Builtin::BIbcopy:
4461 case Builtin::BI__builtin_bcopy: {
4464 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4466 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4467 0);
4469 E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
4470 0);
4471 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4472 addInstToNewSourceAtom(I, nullptr);
4473 return RValue::get(nullptr);
4474 }
4475
4476 case Builtin::BImemcpy:
4477 case Builtin::BI__builtin_memcpy:
4478 case Builtin::BImempcpy:
4479 case Builtin::BI__builtin_mempcpy: {
4482 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4483 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4484 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4485 auto *I = Builder.CreateMemCpy(Dest, Src, SizeVal, false);
4486 addInstToNewSourceAtom(I, nullptr);
4487 if (BuiltinID == Builtin::BImempcpy ||
4488 BuiltinID == Builtin::BI__builtin_mempcpy)
4489 return RValue::get(Builder.CreateInBoundsGEP(
4490 Dest.getElementType(), Dest.emitRawPointer(*this), SizeVal));
4491 else
4492 return RValue::get(Dest, *this);
4493 }
4494
4495 case Builtin::BI__builtin_memcpy_inline: {
4498 uint64_t Size =
4499 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
4500 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4501 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4502 auto *I = Builder.CreateMemCpyInline(Dest, Src, Size);
4503 addInstToNewSourceAtom(I, nullptr);
4504 return RValue::get(nullptr);
4505 }
4506
4507 case Builtin::BI__builtin_char_memchr:
4508 BuiltinID = Builtin::BI__builtin_memchr;
4509 break;
4510
4511 case Builtin::BI__builtin___memcpy_chk: {
4512 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
4513 Expr::EvalResult SizeResult, DstSizeResult;
4514 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4515 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4516 break;
4517 llvm::APSInt Size = SizeResult.Val.getInt();
4518 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4519 if (Size.ugt(DstSize))
4520 break;
4523 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4524 auto *I = Builder.CreateMemCpy(Dest, Src, SizeVal, false);
4525 addInstToNewSourceAtom(I, nullptr);
4526 return RValue::get(Dest, *this);
4527 }
4528
4529 case Builtin::BI__builtin_objc_memmove_collectable: {
4530 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
4531 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
4532 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4533 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
4534 DestAddr, SrcAddr, SizeVal);
4535 return RValue::get(DestAddr, *this);
4536 }
4537
4538 case Builtin::BI__builtin___memmove_chk: {
4539 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
4540 Expr::EvalResult SizeResult, DstSizeResult;
4541 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4542 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4543 break;
4544 llvm::APSInt Size = SizeResult.Val.getInt();
4545 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4546 if (Size.ugt(DstSize))
4547 break;
4550 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4551 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4552 addInstToNewSourceAtom(I, nullptr);
4553 return RValue::get(Dest, *this);
4554 }
4555
4556 case Builtin::BI__builtin_trivially_relocate:
4557 case Builtin::BImemmove:
4558 case Builtin::BI__builtin_memmove: {
4561 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4562 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_trivially_relocate)
4563 SizeVal = Builder.CreateMul(
4564 SizeVal,
4565 ConstantInt::get(
4566 SizeVal->getType(),
4567 getContext()
4568 .getTypeSizeInChars(E->getArg(0)->getType()->getPointeeType())
4569 .getQuantity()));
4570 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4571 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4572 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4573 addInstToNewSourceAtom(I, nullptr);
4574 return RValue::get(Dest, *this);
4575 }
4576 case Builtin::BImemset:
4577 case Builtin::BI__builtin_memset: {
4579 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
4580 Builder.getInt8Ty());
4581 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4582 EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
4583 E->getArg(0)->getExprLoc(), FD, 0);
4584 auto *I = Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
4585 addInstToNewSourceAtom(I, ByteVal);
4586 return RValue::get(Dest, *this);
4587 }
4588 case Builtin::BI__builtin_memset_inline: {
4590 Value *ByteVal =
4591 Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty());
4592 uint64_t Size =
4593 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
4595 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4596 0);
4597 auto *I = Builder.CreateMemSetInline(Dest, ByteVal, Size);
4598 addInstToNewSourceAtom(I, nullptr);
4599 return RValue::get(nullptr);
4600 }
4601 case Builtin::BI__builtin___memset_chk: {
4602 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
4603 Expr::EvalResult SizeResult, DstSizeResult;
4604 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4605 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4606 break;
4607 llvm::APSInt Size = SizeResult.Val.getInt();
4608 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4609 if (Size.ugt(DstSize))
4610 break;
4612 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
4613 Builder.getInt8Ty());
4614 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4615 auto *I = Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
4616 addInstToNewSourceAtom(I, nullptr);
4617 return RValue::get(Dest, *this);
4618 }
4619 case Builtin::BI__builtin_wmemchr: {
4620 // The MSVC runtime library does not provide a definition of wmemchr, so we
4621 // need an inline implementation.
4622 if (!getTarget().getTriple().isOSMSVCRT())
4623 break;
4624
4625 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
4626 Value *Str = EmitScalarExpr(E->getArg(0));
4627 Value *Chr = EmitScalarExpr(E->getArg(1));
4628 Value *Size = EmitScalarExpr(E->getArg(2));
4629
4630 BasicBlock *Entry = Builder.GetInsertBlock();
4631 BasicBlock *CmpEq = createBasicBlock("wmemchr.eq");
4632 BasicBlock *Next = createBasicBlock("wmemchr.next");
4633 BasicBlock *Exit = createBasicBlock("wmemchr.exit");
4634 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
4635 Builder.CreateCondBr(SizeEq0, Exit, CmpEq);
4636
4637 EmitBlock(CmpEq);
4638 PHINode *StrPhi = Builder.CreatePHI(Str->getType(), 2);
4639 StrPhi->addIncoming(Str, Entry);
4640 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
4641 SizePhi->addIncoming(Size, Entry);
4642 CharUnits WCharAlign =
4644 Value *StrCh = Builder.CreateAlignedLoad(WCharTy, StrPhi, WCharAlign);
4645 Value *FoundChr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 0);
4646 Value *StrEqChr = Builder.CreateICmpEQ(StrCh, Chr);
4647 Builder.CreateCondBr(StrEqChr, Exit, Next);
4648
4649 EmitBlock(Next);
4650 Value *NextStr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 1);
4651 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
4652 Value *NextSizeEq0 =
4653 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
4654 Builder.CreateCondBr(NextSizeEq0, Exit, CmpEq);
4655 StrPhi->addIncoming(NextStr, Next);
4656 SizePhi->addIncoming(NextSize, Next);
4657
4658 EmitBlock(Exit);
4659 PHINode *Ret = Builder.CreatePHI(Str->getType(), 3);
4660 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Entry);
4661 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Next);
4662 Ret->addIncoming(FoundChr, CmpEq);
4663 return RValue::get(Ret);
4664 }
4665 case Builtin::BI__builtin_wmemcmp: {
4666 // The MSVC runtime library does not provide a definition of wmemcmp, so we
4667 // need an inline implementation.
4668 if (!getTarget().getTriple().isOSMSVCRT())
4669 break;
4670
4671 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
4672
4673 Value *Dst = EmitScalarExpr(E->getArg(0));
4674 Value *Src = EmitScalarExpr(E->getArg(1));
4675 Value *Size = EmitScalarExpr(E->getArg(2));
4676
4677 BasicBlock *Entry = Builder.GetInsertBlock();
4678 BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
4679 BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
4680 BasicBlock *Next = createBasicBlock("wmemcmp.next");
4681 BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
4682 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
4683 Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
4684
4685 EmitBlock(CmpGT);
4686 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
4687 DstPhi->addIncoming(Dst, Entry);
4688 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
4689 SrcPhi->addIncoming(Src, Entry);
4690 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
4691 SizePhi->addIncoming(Size, Entry);
4692 CharUnits WCharAlign =
4694 Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
4695 Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
4696 Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
4697 Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
4698
4699 EmitBlock(CmpLT);
4700 Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
4701 Builder.CreateCondBr(DstLtSrc, Exit, Next);
4702
4703 EmitBlock(Next);
4704 Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
4705 Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
4706 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
4707 Value *NextSizeEq0 =
4708 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
4709 Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
4710 DstPhi->addIncoming(NextDst, Next);
4711 SrcPhi->addIncoming(NextSrc, Next);
4712 SizePhi->addIncoming(NextSize, Next);
4713
4714 EmitBlock(Exit);
4715 PHINode *Ret = Builder.CreatePHI(IntTy, 4);
4716 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
4717 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
4718 Ret->addIncoming(ConstantInt::getAllOnesValue(IntTy), CmpLT);
4719 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
4720 return RValue::get(Ret);
4721 }
4722 case Builtin::BI__builtin_dwarf_cfa: {
4723 // The offset in bytes from the first argument to the CFA.
4724 //
4725 // Why on earth is this in the frontend? Is there any reason at
4726 // all that the backend can't reasonably determine this while
4727 // lowering llvm.eh.dwarf.cfa()?
4728 //
4729 // TODO: If there's a satisfactory reason, add a target hook for
4730 // this instead of hard-coding 0, which is correct for most targets.
4731 int32_t Offset = 0;
4732
4733 Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
4734 return RValue::get(Builder.CreateCall(F,
4735 llvm::ConstantInt::get(Int32Ty, Offset)));
4736 }
4737 case Builtin::BI__builtin_return_address: {
4738 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
4739 getContext().UnsignedIntTy);
4740 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
4741 return RValue::get(Builder.CreateCall(F, Depth));
4742 }
4743 case Builtin::BI_ReturnAddress: {
4744 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
4745 return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
4746 }
4747 case Builtin::BI__builtin_frame_address: {
4748 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
4749 getContext().UnsignedIntTy);
4750 Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
4751 return RValue::get(Builder.CreateCall(F, Depth));
4752 }
4753 case Builtin::BI__builtin_extract_return_addr: {
4756 return RValue::get(Result);
4757 }
4758 case Builtin::BI__builtin_frob_return_addr: {
4761 return RValue::get(Result);
4762 }
4763 case Builtin::BI__builtin_dwarf_sp_column: {
4764 llvm::IntegerType *Ty
4767 if (Column == -1) {
4768 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
4769 return RValue::get(llvm::UndefValue::get(Ty));
4770 }
4771 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
4772 }
4773 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
4775 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
4776 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
4777 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
4778 }
4779 case Builtin::BI__builtin_eh_return: {
4780 Value *Int = EmitScalarExpr(E->getArg(0));
4781 Value *Ptr = EmitScalarExpr(E->getArg(1));
4782
4783 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
4784 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
4785 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
4786 Function *F =
4787 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
4788 : Intrinsic::eh_return_i64);
4789 Builder.CreateCall(F, {Int, Ptr});
4790 Builder.CreateUnreachable();
4791
4792 // We do need to preserve an insertion point.
4793 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
4794
4795 return RValue::get(nullptr);
4796 }
4797 case Builtin::BI__builtin_unwind_init: {
4798 Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
4799 Builder.CreateCall(F);
4800 return RValue::get(nullptr);
4801 }
4802 case Builtin::BI__builtin_extend_pointer: {
4803 // Extends a pointer to the size of an _Unwind_Word, which is
4804 // uint64_t on all platforms. Generally this gets poked into a
4805 // register and eventually used as an address, so if the
4806 // addressing registers are wider than pointers and the platform
4807 // doesn't implicitly ignore high-order bits when doing
4808 // addressing, we need to make sure we zext / sext based on
4809 // the platform's expectations.
4810 //
4811 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
4812
4813 // Cast the pointer to intptr_t.
4814 Value *Ptr = EmitScalarExpr(E->getArg(0));
4815 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
4816
4817 // If that's 64 bits, we're done.
4818 if (IntPtrTy->getBitWidth() == 64)
4819 return RValue::get(Result);
4820
4821 // Otherwise, ask the codegen data what to do.
4822 if (getTargetHooks().extendPointerWithSExt())
4823 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
4824 else
4825 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
4826 }
4827 case Builtin::BI__builtin_setjmp: {
4828 // Buffer is a void**.
4830
4831 if (getTarget().getTriple().getArch() == llvm::Triple::systemz) {
4832 // On this target, the back end fills in the context buffer completely.
4833 // It doesn't really matter if the frontend stores to the buffer before
4834 // calling setjmp, the back-end is going to overwrite them anyway.
4835 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
4836 return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
4837 }
4838
4839 // Store the frame pointer to the setjmp buffer.
4840 Value *FrameAddr = Builder.CreateCall(
4841 CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
4842 ConstantInt::get(Int32Ty, 0));
4843 Builder.CreateStore(FrameAddr, Buf);
4844
4845 // Store the stack pointer to the setjmp buffer.
4846 Value *StackAddr = Builder.CreateStackSave();
4847 assert(Buf.emitRawPointer(*this)->getType() == StackAddr->getType());
4848
4849 Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
4850 Builder.CreateStore(StackAddr, StackSaveSlot);
4851
4852 // Call LLVM's EH setjmp, which is lightweight.
4853 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
4854 return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
4855 }
4856 case Builtin::BI__builtin_longjmp: {
4857 Value *Buf = EmitScalarExpr(E->getArg(0));
4858
4859 // Call LLVM's EH longjmp, which is lightweight.
4860 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
4861
4862 // longjmp doesn't return; mark this as unreachable.
4863 Builder.CreateUnreachable();
4864
4865 // We do need to preserve an insertion point.
4866 EmitBlock(createBasicBlock("longjmp.cont"));
4867
4868 return RValue::get(nullptr);
4869 }
4870 case Builtin::BI__builtin_launder: {
4871 const Expr *Arg = E->getArg(0);
4872 QualType ArgTy = Arg->getType()->getPointeeType();
4873 Value *Ptr = EmitScalarExpr(Arg);
4874 if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
4875 Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
4876
4877 return RValue::get(Ptr);
4878 }
4879 case Builtin::BI__sync_fetch_and_add:
4880 case Builtin::BI__sync_fetch_and_sub:
4881 case Builtin::BI__sync_fetch_and_or:
4882 case Builtin::BI__sync_fetch_and_and:
4883 case Builtin::BI__sync_fetch_and_xor:
4884 case Builtin::BI__sync_fetch_and_nand:
4885 case Builtin::BI__sync_add_and_fetch:
4886 case Builtin::BI__sync_sub_and_fetch:
4887 case Builtin::BI__sync_and_and_fetch:
4888 case Builtin::BI__sync_or_and_fetch:
4889 case Builtin::BI__sync_xor_and_fetch:
4890 case Builtin::BI__sync_nand_and_fetch:
4891 case Builtin::BI__sync_val_compare_and_swap:
4892 case Builtin::BI__sync_bool_compare_and_swap:
4893 case Builtin::BI__sync_lock_test_and_set:
4894 case Builtin::BI__sync_lock_release:
4895 case Builtin::BI__sync_swap:
4896 llvm_unreachable("Shouldn't make it through sema");
4897 case Builtin::BI__sync_fetch_and_add_1:
4898 case Builtin::BI__sync_fetch_and_add_2:
4899 case Builtin::BI__sync_fetch_and_add_4:
4900 case Builtin::BI__sync_fetch_and_add_8:
4901 case Builtin::BI__sync_fetch_and_add_16:
4902 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
4903 case Builtin::BI__sync_fetch_and_sub_1:
4904 case Builtin::BI__sync_fetch_and_sub_2:
4905 case Builtin::BI__sync_fetch_and_sub_4:
4906 case Builtin::BI__sync_fetch_and_sub_8:
4907 case Builtin::BI__sync_fetch_and_sub_16:
4908 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
4909 case Builtin::BI__sync_fetch_and_or_1:
4910 case Builtin::BI__sync_fetch_and_or_2:
4911 case Builtin::BI__sync_fetch_and_or_4:
4912 case Builtin::BI__sync_fetch_and_or_8:
4913 case Builtin::BI__sync_fetch_and_or_16:
4914 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
4915 case Builtin::BI__sync_fetch_and_and_1:
4916 case Builtin::BI__sync_fetch_and_and_2:
4917 case Builtin::BI__sync_fetch_and_and_4:
4918 case Builtin::BI__sync_fetch_and_and_8:
4919 case Builtin::BI__sync_fetch_and_and_16:
4920 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
4921 case Builtin::BI__sync_fetch_and_xor_1:
4922 case Builtin::BI__sync_fetch_and_xor_2:
4923 case Builtin::BI__sync_fetch_and_xor_4:
4924 case Builtin::BI__sync_fetch_and_xor_8:
4925 case Builtin::BI__sync_fetch_and_xor_16:
4926 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
4927 case Builtin::BI__sync_fetch_and_nand_1:
4928 case Builtin::BI__sync_fetch_and_nand_2:
4929 case Builtin::BI__sync_fetch_and_nand_4:
4930 case Builtin::BI__sync_fetch_and_nand_8:
4931 case Builtin::BI__sync_fetch_and_nand_16:
4932 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
4933
4934 // Clang extensions: not overloaded yet.
4935 case Builtin::BI__sync_fetch_and_min:
4936 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
4937 case Builtin::BI__sync_fetch_and_max:
4938 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
4939 case Builtin::BI__sync_fetch_and_umin:
4940 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
4941 case Builtin::BI__sync_fetch_and_umax:
4942 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
4943
4944 case Builtin::BI__sync_add_and_fetch_1:
4945 case Builtin::BI__sync_add_and_fetch_2:
4946 case Builtin::BI__sync_add_and_fetch_4:
4947 case Builtin::BI__sync_add_and_fetch_8:
4948 case Builtin::BI__sync_add_and_fetch_16:
4949 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
4950 llvm::Instruction::Add);
4951 case Builtin::BI__sync_sub_and_fetch_1:
4952 case Builtin::BI__sync_sub_and_fetch_2:
4953 case Builtin::BI__sync_sub_and_fetch_4:
4954 case Builtin::BI__sync_sub_and_fetch_8:
4955 case Builtin::BI__sync_sub_and_fetch_16:
4956 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
4957 llvm::Instruction::Sub);
4958 case Builtin::BI__sync_and_and_fetch_1:
4959 case Builtin::BI__sync_and_and_fetch_2:
4960 case Builtin::BI__sync_and_and_fetch_4:
4961 case Builtin::BI__sync_and_and_fetch_8:
4962 case Builtin::BI__sync_and_and_fetch_16:
4963 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
4964 llvm::Instruction::And);
4965 case Builtin::BI__sync_or_and_fetch_1:
4966 case Builtin::BI__sync_or_and_fetch_2:
4967 case Builtin::BI__sync_or_and_fetch_4:
4968 case Builtin::BI__sync_or_and_fetch_8:
4969 case Builtin::BI__sync_or_and_fetch_16:
4970 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
4971 llvm::Instruction::Or);
4972 case Builtin::BI__sync_xor_and_fetch_1:
4973 case Builtin::BI__sync_xor_and_fetch_2:
4974 case Builtin::BI__sync_xor_and_fetch_4:
4975 case Builtin::BI__sync_xor_and_fetch_8:
4976 case Builtin::BI__sync_xor_and_fetch_16:
4977 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
4978 llvm::Instruction::Xor);
4979 case Builtin::BI__sync_nand_and_fetch_1:
4980 case Builtin::BI__sync_nand_and_fetch_2:
4981 case Builtin::BI__sync_nand_and_fetch_4:
4982 case Builtin::BI__sync_nand_and_fetch_8:
4983 case Builtin::BI__sync_nand_and_fetch_16:
4984 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
4985 llvm::Instruction::And, true);
4986
4987 case Builtin::BI__sync_val_compare_and_swap_1:
4988 case Builtin::BI__sync_val_compare_and_swap_2:
4989 case Builtin::BI__sync_val_compare_and_swap_4:
4990 case Builtin::BI__sync_val_compare_and_swap_8:
4991 case Builtin::BI__sync_val_compare_and_swap_16:
4992 return RValue::get(MakeAtomicCmpXchgValue(*this, E, false));
4993
4994 case Builtin::BI__sync_bool_compare_and_swap_1:
4995 case Builtin::BI__sync_bool_compare_and_swap_2:
4996 case Builtin::BI__sync_bool_compare_and_swap_4:
4997 case Builtin::BI__sync_bool_compare_and_swap_8:
4998 case Builtin::BI__sync_bool_compare_and_swap_16:
4999 return RValue::get(MakeAtomicCmpXchgValue(*this, E, true));
5000
5001 case Builtin::BI__sync_swap_1:
5002 case Builtin::BI__sync_swap_2:
5003 case Builtin::BI__sync_swap_4:
5004 case Builtin::BI__sync_swap_8:
5005 case Builtin::BI__sync_swap_16:
5006 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
5007
5008 case Builtin::BI__sync_lock_test_and_set_1:
5009 case Builtin::BI__sync_lock_test_and_set_2:
5010 case Builtin::BI__sync_lock_test_and_set_4:
5011 case Builtin::BI__sync_lock_test_and_set_8:
5012 case Builtin::BI__sync_lock_test_and_set_16:
5013 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
5014
5015 case Builtin::BI__sync_lock_release_1:
5016 case Builtin::BI__sync_lock_release_2:
5017 case Builtin::BI__sync_lock_release_4:
5018 case Builtin::BI__sync_lock_release_8:
5019 case Builtin::BI__sync_lock_release_16: {
5020 Address Ptr = CheckAtomicAlignment(*this, E);
5021 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
5022
5023 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
5024 getContext().getTypeSize(ElTy));
5025 llvm::StoreInst *Store =
5026 Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
5027 Store->setAtomic(llvm::AtomicOrdering::Release);
5028 return RValue::get(nullptr);
5029 }
5030
5031 case Builtin::BI__sync_synchronize: {
5032 // We assume this is supposed to correspond to a C++0x-style
5033 // sequentially-consistent fence (i.e. this is only usable for
5034 // synchronization, not device I/O or anything like that). This intrinsic
5035 // is really badly designed in the sense that in theory, there isn't
5036 // any way to safely use it... but in practice, it mostly works
5037 // to use it with non-atomic loads and stores to get acquire/release
5038 // semantics.
5039 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
5040 return RValue::get(nullptr);
5041 }
5042
5043 case Builtin::BI__builtin_nontemporal_load:
5044 return RValue::get(EmitNontemporalLoad(*this, E));
5045 case Builtin::BI__builtin_nontemporal_store:
5046 return RValue::get(EmitNontemporalStore(*this, E));
5047 case Builtin::BI__c11_atomic_is_lock_free:
5048 case Builtin::BI__atomic_is_lock_free: {
5049 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
5050 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
5051 // _Atomic(T) is always properly-aligned.
5052 const char *LibCallName = "__atomic_is_lock_free";
5053 CallArgList Args;
5054 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
5055 getContext().getSizeType());
5056 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
5057 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
5059 else
5060 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
5062 const CGFunctionInfo &FuncInfo =
5063 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
5064 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
5065 llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
5066 return EmitCall(FuncInfo, CGCallee::forDirect(Func),
5067 ReturnValueSlot(), Args);
5068 }
5069
5070 case Builtin::BI__atomic_thread_fence:
5071 case Builtin::BI__atomic_signal_fence:
5072 case Builtin::BI__c11_atomic_thread_fence:
5073 case Builtin::BI__c11_atomic_signal_fence: {
5074 llvm::SyncScope::ID SSID;
5075 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
5076 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
5077 SSID = llvm::SyncScope::SingleThread;
5078 else
5079 SSID = llvm::SyncScope::System;
5080 Value *Order = EmitScalarExpr(E->getArg(0));
5081 if (isa<llvm::ConstantInt>(Order)) {
5082 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
5083 switch (ord) {
5084 case 0: // memory_order_relaxed
5085 default: // invalid order
5086 break;
5087 case 1: // memory_order_consume
5088 case 2: // memory_order_acquire
5089 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
5090 break;
5091 case 3: // memory_order_release
5092 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
5093 break;
5094 case 4: // memory_order_acq_rel
5095 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
5096 break;
5097 case 5: // memory_order_seq_cst
5098 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
5099 break;
5100 }
5101 return RValue::get(nullptr);
5102 }
5103
5104 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
5105 AcquireBB = createBasicBlock("acquire", CurFn);
5106 ReleaseBB = createBasicBlock("release", CurFn);
5107 AcqRelBB = createBasicBlock("acqrel", CurFn);
5108 SeqCstBB = createBasicBlock("seqcst", CurFn);
5109 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
5110
5111 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
5112 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
5113
5114 Builder.SetInsertPoint(AcquireBB);
5115 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
5116 Builder.CreateBr(ContBB);
5117 SI->addCase(Builder.getInt32(1), AcquireBB);
5118 SI->addCase(Builder.getInt32(2), AcquireBB);
5119
5120 Builder.SetInsertPoint(ReleaseBB);
5121 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
5122 Builder.CreateBr(ContBB);
5123 SI->addCase(Builder.getInt32(3), ReleaseBB);
5124
5125 Builder.SetInsertPoint(AcqRelBB);
5126 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
5127 Builder.CreateBr(ContBB);
5128 SI->addCase(Builder.getInt32(4), AcqRelBB);
5129
5130 Builder.SetInsertPoint(SeqCstBB);
5131 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
5132 Builder.CreateBr(ContBB);
5133 SI->addCase(Builder.getInt32(5), SeqCstBB);
5134
5135 Builder.SetInsertPoint(ContBB);
5136 return RValue::get(nullptr);
5137 }
5138 case Builtin::BI__scoped_atomic_thread_fence: {
5140
5141 Value *Order = EmitScalarExpr(E->getArg(0));
5142 Value *Scope = EmitScalarExpr(E->getArg(1));
5143 auto Ord = dyn_cast<llvm::ConstantInt>(Order);
5144 auto Scp = dyn_cast<llvm::ConstantInt>(Scope);
5145 if (Ord && Scp) {
5146 SyncScope SS = ScopeModel->isValid(Scp->getZExtValue())
5147 ? ScopeModel->map(Scp->getZExtValue())
5148 : ScopeModel->map(ScopeModel->getFallBackValue());
5149 switch (Ord->getZExtValue()) {
5150 case 0: // memory_order_relaxed
5151 default: // invalid order
5152 break;
5153 case 1: // memory_order_consume
5154 case 2: // memory_order_acquire
5155 Builder.CreateFence(
5156 llvm::AtomicOrdering::Acquire,
5157 getTargetHooks().getLLVMSyncScopeID(getLangOpts(), SS,
5158 llvm::AtomicOrdering::Acquire,
5159 getLLVMContext()));
5160 break;
5161 case 3: // memory_order_release
5162 Builder.CreateFence(
5163 llvm::AtomicOrdering::Release,
5164 getTargetHooks().getLLVMSyncScopeID(getLangOpts(), SS,
5165 llvm::AtomicOrdering::Release,
5166 getLLVMContext()));
5167 break;
5168 case 4: // memory_order_acq_rel
5169 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease,
5170 getTargetHooks().getLLVMSyncScopeID(
5171 getLangOpts(), SS,
5172 llvm::AtomicOrdering::AcquireRelease,
5173 getLLVMContext()));
5174 break;
5175 case 5: // memory_order_seq_cst
5176 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
5177 getTargetHooks().getLLVMSyncScopeID(
5178 getLangOpts(), SS,
5179 llvm::AtomicOrdering::SequentiallyConsistent,
5180 getLLVMContext()));
5181 break;
5182 }
5183 return RValue::get(nullptr);
5184 }
5185
5186 llvm::BasicBlock *ContBB = createBasicBlock("atomic.scope.continue", CurFn);
5187
5189 OrderBBs;
5190 if (Ord) {
5191 switch (Ord->getZExtValue()) {
5192 case 0: // memory_order_relaxed
5193 default: // invalid order
5194 ContBB->eraseFromParent();
5195 return RValue::get(nullptr);
5196 case 1: // memory_order_consume
5197 case 2: // memory_order_acquire
5198 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5199 llvm::AtomicOrdering::Acquire);
5200 break;
5201 case 3: // memory_order_release
5202 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5203 llvm::AtomicOrdering::Release);
5204 break;
5205 case 4: // memory_order_acq_rel
5206 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5207 llvm::AtomicOrdering::AcquireRelease);
5208 break;
5209 case 5: // memory_order_seq_cst
5210 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5211 llvm::AtomicOrdering::SequentiallyConsistent);
5212 break;
5213 }
5214 } else {
5215 llvm::BasicBlock *AcquireBB = createBasicBlock("acquire", CurFn);
5216 llvm::BasicBlock *ReleaseBB = createBasicBlock("release", CurFn);
5217 llvm::BasicBlock *AcqRelBB = createBasicBlock("acqrel", CurFn);
5218 llvm::BasicBlock *SeqCstBB = createBasicBlock("seqcst", CurFn);
5219
5220 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
5221 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
5222 SI->addCase(Builder.getInt32(1), AcquireBB);
5223 SI->addCase(Builder.getInt32(2), AcquireBB);
5224 SI->addCase(Builder.getInt32(3), ReleaseBB);
5225 SI->addCase(Builder.getInt32(4), AcqRelBB);
5226 SI->addCase(Builder.getInt32(5), SeqCstBB);
5227
5228 OrderBBs.emplace_back(AcquireBB, llvm::AtomicOrdering::Acquire);
5229 OrderBBs.emplace_back(ReleaseBB, llvm::AtomicOrdering::Release);
5230 OrderBBs.emplace_back(AcqRelBB, llvm::AtomicOrdering::AcquireRelease);
5231 OrderBBs.emplace_back(SeqCstBB,
5232 llvm::AtomicOrdering::SequentiallyConsistent);
5233 }
5234
5235 for (auto &[OrderBB, Ordering] : OrderBBs) {
5236 Builder.SetInsertPoint(OrderBB);
5237 if (Scp) {
5238 SyncScope SS = ScopeModel->isValid(Scp->getZExtValue())
5239 ? ScopeModel->map(Scp->getZExtValue())
5240 : ScopeModel->map(ScopeModel->getFallBackValue());
5241 Builder.CreateFence(Ordering,
5242 getTargetHooks().getLLVMSyncScopeID(
5243 getLangOpts(), SS, Ordering, getLLVMContext()));
5244 Builder.CreateBr(ContBB);
5245 } else {
5246 llvm::DenseMap<unsigned, llvm::BasicBlock *> BBs;
5247 for (unsigned Scp : ScopeModel->getRuntimeValues())
5248 BBs[Scp] = createBasicBlock(getAsString(ScopeModel->map(Scp)), CurFn);
5249
5250 auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
5251 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, ContBB);
5252 for (unsigned Scp : ScopeModel->getRuntimeValues()) {
5253 auto *B = BBs[Scp];
5254 SI->addCase(Builder.getInt32(Scp), B);
5255
5256 Builder.SetInsertPoint(B);
5257 Builder.CreateFence(Ordering, getTargetHooks().getLLVMSyncScopeID(
5258 getLangOpts(), ScopeModel->map(Scp),
5259 Ordering, getLLVMContext()));
5260 Builder.CreateBr(ContBB);
5261 }
5262 }
5263 }
5264
5265 Builder.SetInsertPoint(ContBB);
5266 return RValue::get(nullptr);
5267 }
5268
5269 case Builtin::BI__builtin_signbit:
5270 case Builtin::BI__builtin_signbitf:
5271 case Builtin::BI__builtin_signbitl: {
5272 return RValue::get(
5273 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
5274 ConvertType(E->getType())));
5275 }
5276 case Builtin::BI__warn_memset_zero_len:
5277 return RValue::getIgnored();
5278 case Builtin::BI__annotation: {
5279 // Re-encode each wide string to UTF8 and make an MDString.
5281 for (const Expr *Arg : E->arguments()) {
5282 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
5283 assert(Str->getCharByteWidth() == 2);
5284 StringRef WideBytes = Str->getBytes();
5285 std::string StrUtf8;
5286 if (!convertUTF16ToUTF8String(
5287 ArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
5288 CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
5289 continue;
5290 }
5291 Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
5292 }
5293
5294 // Build and MDTuple of MDStrings and emit the intrinsic call.
5295 llvm::Function *F = CGM.getIntrinsic(Intrinsic::codeview_annotation, {});
5296 MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
5297 Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
5298 return RValue::getIgnored();
5299 }
5300 case Builtin::BI__builtin_annotation: {
5301 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
5302 llvm::Function *F = CGM.getIntrinsic(
5303 Intrinsic::annotation, {AnnVal->getType(), CGM.ConstGlobalsPtrTy});
5304
5305 // Get the annotation string, go through casts. Sema requires this to be a
5306 // non-wide string literal, potentially casted, so the cast<> is safe.
5307 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
5308 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
5309 return RValue::get(
5310 EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr));
5311 }
5312 case Builtin::BI__builtin_addcb:
5313 case Builtin::BI__builtin_addcs:
5314 case Builtin::BI__builtin_addc:
5315 case Builtin::BI__builtin_addcl:
5316 case Builtin::BI__builtin_addcll:
5317 case Builtin::BI__builtin_subcb:
5318 case Builtin::BI__builtin_subcs:
5319 case Builtin::BI__builtin_subc:
5320 case Builtin::BI__builtin_subcl:
5321 case Builtin::BI__builtin_subcll: {
5322
5323 // We translate all of these builtins from expressions of the form:
5324 // int x = ..., y = ..., carryin = ..., carryout, result;
5325 // result = __builtin_addc(x, y, carryin, &carryout);
5326 //
5327 // to LLVM IR of the form:
5328 //
5329 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
5330 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
5331 // %carry1 = extractvalue {i32, i1} %tmp1, 1
5332 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
5333 // i32 %carryin)
5334 // %result = extractvalue {i32, i1} %tmp2, 0
5335 // %carry2 = extractvalue {i32, i1} %tmp2, 1
5336 // %tmp3 = or i1 %carry1, %carry2
5337 // %tmp4 = zext i1 %tmp3 to i32
5338 // store i32 %tmp4, i32* %carryout
5339
5340 // Scalarize our inputs.
5341 llvm::Value *X = EmitScalarExpr(E->getArg(0));
5342 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
5343 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
5344 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
5345
5346 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
5347 Intrinsic::ID IntrinsicId;
5348 switch (BuiltinID) {
5349 default: llvm_unreachable("Unknown multiprecision builtin id.");
5350 case Builtin::BI__builtin_addcb:
5351 case Builtin::BI__builtin_addcs:
5352 case Builtin::BI__builtin_addc:
5353 case Builtin::BI__builtin_addcl:
5354 case Builtin::BI__builtin_addcll:
5355 IntrinsicId = Intrinsic::uadd_with_overflow;
5356 break;
5357 case Builtin::BI__builtin_subcb:
5358 case Builtin::BI__builtin_subcs:
5359 case Builtin::BI__builtin_subc:
5360 case Builtin::BI__builtin_subcl:
5361 case Builtin::BI__builtin_subcll:
5362 IntrinsicId = Intrinsic::usub_with_overflow;
5363 break;
5364 }
5365
5366 // Construct our resulting LLVM IR expression.
5367 llvm::Value *Carry1;
5368 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
5369 X, Y, Carry1);
5370 llvm::Value *Carry2;
5371 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
5372 Sum1, Carryin, Carry2);
5373 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
5374 X->getType());
5375 Builder.CreateStore(CarryOut, CarryOutPtr);
5376 return RValue::get(Sum2);
5377 }
5378
5379 case Builtin::BI__builtin_add_overflow:
5380 case Builtin::BI__builtin_sub_overflow:
5381 case Builtin::BI__builtin_mul_overflow: {
5382 const clang::Expr *LeftArg = E->getArg(0);
5383 const clang::Expr *RightArg = E->getArg(1);
5384 const clang::Expr *ResultArg = E->getArg(2);
5385
5386 clang::QualType ResultQTy =
5387 ResultArg->getType()->castAs<PointerType>()->getPointeeType();
5388
5389 WidthAndSignedness LeftInfo =
5390 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
5391 WidthAndSignedness RightInfo =
5392 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
5393 WidthAndSignedness ResultInfo =
5394 getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
5395
5396 // Handle mixed-sign multiplication as a special case, because adding
5397 // runtime or backend support for our generic irgen would be too expensive.
5398 if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
5399 return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
5400 RightInfo, ResultArg, ResultQTy,
5401 ResultInfo);
5402
5403 if (isSpecialUnsignedMultiplySignedResult(BuiltinID, LeftInfo, RightInfo,
5404 ResultInfo))
5406 *this, LeftArg, LeftInfo, RightArg, RightInfo, ResultArg, ResultQTy,
5407 ResultInfo);
5408
5409 WidthAndSignedness EncompassingInfo =
5410 EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
5411
5412 llvm::Type *EncompassingLLVMTy =
5413 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
5414
5415 llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
5416
5417 Intrinsic::ID IntrinsicId;
5418 switch (BuiltinID) {
5419 default:
5420 llvm_unreachable("Unknown overflow builtin id.");
5421 case Builtin::BI__builtin_add_overflow:
5422 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::sadd_with_overflow
5423 : Intrinsic::uadd_with_overflow;
5424 break;
5425 case Builtin::BI__builtin_sub_overflow:
5426 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::ssub_with_overflow
5427 : Intrinsic::usub_with_overflow;
5428 break;
5429 case Builtin::BI__builtin_mul_overflow:
5430 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::smul_with_overflow
5431 : Intrinsic::umul_with_overflow;
5432 break;
5433 }
5434
5435 llvm::Value *Left = EmitScalarExpr(LeftArg);
5436 llvm::Value *Right = EmitScalarExpr(RightArg);
5437 Address ResultPtr = EmitPointerWithAlignment(ResultArg);
5438
5439 // Extend each operand to the encompassing type.
5440 Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
5441 Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
5442
5443 // Perform the operation on the extended values.
5444 llvm::Value *Overflow, *Result;
5445 Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
5446
5447 if (EncompassingInfo.Width > ResultInfo.Width) {
5448 // The encompassing type is wider than the result type, so we need to
5449 // truncate it.
5450 llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
5451
5452 // To see if the truncation caused an overflow, we will extend
5453 // the result and then compare it to the original result.
5454 llvm::Value *ResultTruncExt = Builder.CreateIntCast(
5455 ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
5456 llvm::Value *TruncationOverflow =
5457 Builder.CreateICmpNE(Result, ResultTruncExt);
5458
5459 Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
5460 Result = ResultTrunc;
5461 }
5462
5463 // Finally, store the result using the pointer.
5464 bool isVolatile =
5465 ResultArg->getType()->getPointeeType().isVolatileQualified();
5466 Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
5467
5468 return RValue::get(Overflow);
5469 }
5470
5471 case Builtin::BI__builtin_uadd_overflow:
5472 case Builtin::BI__builtin_uaddl_overflow:
5473 case Builtin::BI__builtin_uaddll_overflow:
5474 case Builtin::BI__builtin_usub_overflow:
5475 case Builtin::BI__builtin_usubl_overflow:
5476 case Builtin::BI__builtin_usubll_overflow:
5477 case Builtin::BI__builtin_umul_overflow:
5478 case Builtin::BI__builtin_umull_overflow:
5479 case Builtin::BI__builtin_umulll_overflow:
5480 case Builtin::BI__builtin_sadd_overflow:
5481 case Builtin::BI__builtin_saddl_overflow:
5482 case Builtin::BI__builtin_saddll_overflow:
5483 case Builtin::BI__builtin_ssub_overflow:
5484 case Builtin::BI__builtin_ssubl_overflow:
5485 case Builtin::BI__builtin_ssubll_overflow:
5486 case Builtin::BI__builtin_smul_overflow:
5487 case Builtin::BI__builtin_smull_overflow:
5488 case Builtin::BI__builtin_smulll_overflow: {
5489
5490 // We translate all of these builtins directly to the relevant llvm IR node.
5491
5492 // Scalarize our inputs.
5493 llvm::Value *X = EmitScalarExpr(E->getArg(0));
5494 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
5495 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
5496
5497 // Decide which of the overflow intrinsics we are lowering to:
5498 Intrinsic::ID IntrinsicId;
5499 switch (BuiltinID) {
5500 default: llvm_unreachable("Unknown overflow builtin id.");
5501 case Builtin::BI__builtin_uadd_overflow:
5502 case Builtin::BI__builtin_uaddl_overflow:
5503 case Builtin::BI__builtin_uaddll_overflow:
5504 IntrinsicId = Intrinsic::uadd_with_overflow;
5505 break;
5506 case Builtin::BI__builtin_usub_overflow:
5507 case Builtin::BI__builtin_usubl_overflow:
5508 case Builtin::BI__builtin_usubll_overflow:
5509 IntrinsicId = Intrinsic::usub_with_overflow;
5510 break;
5511 case Builtin::BI__builtin_umul_overflow:
5512 case Builtin::BI__builtin_umull_overflow:
5513 case Builtin::BI__builtin_umulll_overflow:
5514 IntrinsicId = Intrinsic::umul_with_overflow;
5515 break;
5516 case Builtin::BI__builtin_sadd_overflow:
5517 case Builtin::BI__builtin_saddl_overflow:
5518 case Builtin::BI__builtin_saddll_overflow:
5519 IntrinsicId = Intrinsic::sadd_with_overflow;
5520 break;
5521 case Builtin::BI__builtin_ssub_overflow:
5522 case Builtin::BI__builtin_ssubl_overflow:
5523 case Builtin::BI__builtin_ssubll_overflow:
5524 IntrinsicId = Intrinsic::ssub_with_overflow;
5525 break;
5526 case Builtin::BI__builtin_smul_overflow:
5527 case Builtin::BI__builtin_smull_overflow:
5528 case Builtin::BI__builtin_smulll_overflow:
5529 IntrinsicId = Intrinsic::smul_with_overflow;
5530 break;
5531 }
5532
5533
5534 llvm::Value *Carry;
5535 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
5536 Builder.CreateStore(Sum, SumOutPtr);
5537
5538 return RValue::get(Carry);
5539 }
5540 case Builtin::BIaddressof:
5541 case Builtin::BI__addressof:
5542 case Builtin::BI__builtin_addressof:
5543 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
5544 case Builtin::BI__builtin_function_start:
5545 return RValue::get(CGM.GetFunctionStart(
5546 E->getArg(0)->getAsBuiltinConstantDeclRef(CGM.getContext())));
5547 case Builtin::BI__builtin_operator_new:
5549 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
5550 case Builtin::BI__builtin_operator_delete:
5552 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
5553 return RValue::get(nullptr);
5554
5555 case Builtin::BI__builtin_is_aligned:
5556 return EmitBuiltinIsAligned(E);
5557 case Builtin::BI__builtin_align_up:
5558 return EmitBuiltinAlignTo(E, true);
5559 case Builtin::BI__builtin_align_down:
5560 return EmitBuiltinAlignTo(E, false);
5561
5562 case Builtin::BI__noop:
5563 // __noop always evaluates to an integer literal zero.
5564 return RValue::get(ConstantInt::get(IntTy, 0));
5565 case Builtin::BI__builtin_call_with_static_chain: {
5566 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
5567 const Expr *Chain = E->getArg(1);
5568 return EmitCall(Call->getCallee()->getType(),
5569 EmitCallee(Call->getCallee()), Call, ReturnValue,
5570 EmitScalarExpr(Chain));
5571 }
5572 case Builtin::BI_InterlockedExchange8:
5573 case Builtin::BI_InterlockedExchange16:
5574 case Builtin::BI_InterlockedExchange:
5575 case Builtin::BI_InterlockedExchangePointer:
5576 return RValue::get(
5578 case Builtin::BI_InterlockedCompareExchangePointer:
5579 return RValue::get(
5581 case Builtin::BI_InterlockedCompareExchangePointer_nf:
5582 return RValue::get(
5584 case Builtin::BI_InterlockedCompareExchange8:
5585 case Builtin::BI_InterlockedCompareExchange16:
5586 case Builtin::BI_InterlockedCompareExchange:
5587 case Builtin::BI_InterlockedCompareExchange64:
5588 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
5589 case Builtin::BI_InterlockedIncrement16:
5590 case Builtin::BI_InterlockedIncrement:
5591 return RValue::get(
5593 case Builtin::BI_InterlockedDecrement16:
5594 case Builtin::BI_InterlockedDecrement:
5595 return RValue::get(
5597 case Builtin::BI_InterlockedAnd8:
5598 case Builtin::BI_InterlockedAnd16:
5599 case Builtin::BI_InterlockedAnd:
5601 case Builtin::BI_InterlockedExchangeAdd8:
5602 case Builtin::BI_InterlockedExchangeAdd16:
5603 case Builtin::BI_InterlockedExchangeAdd:
5604 return RValue::get(
5606 case Builtin::BI_InterlockedExchangeSub8:
5607 case Builtin::BI_InterlockedExchangeSub16:
5608 case Builtin::BI_InterlockedExchangeSub:
5609 return RValue::get(
5611 case Builtin::BI_InterlockedOr8:
5612 case Builtin::BI_InterlockedOr16:
5613 case Builtin::BI_InterlockedOr:
5615 case Builtin::BI_InterlockedXor8:
5616 case Builtin::BI_InterlockedXor16:
5617 case Builtin::BI_InterlockedXor:
5619
5620 case Builtin::BI_bittest64:
5621 case Builtin::BI_bittest:
5622 case Builtin::BI_bittestandcomplement64:
5623 case Builtin::BI_bittestandcomplement:
5624 case Builtin::BI_bittestandreset64:
5625 case Builtin::BI_bittestandreset:
5626 case Builtin::BI_bittestandset64:
5627 case Builtin::BI_bittestandset:
5628 case Builtin::BI_interlockedbittestandreset:
5629 case Builtin::BI_interlockedbittestandreset64:
5630 case Builtin::BI_interlockedbittestandreset64_acq:
5631 case Builtin::BI_interlockedbittestandreset64_rel:
5632 case Builtin::BI_interlockedbittestandreset64_nf:
5633 case Builtin::BI_interlockedbittestandset64:
5634 case Builtin::BI_interlockedbittestandset64_acq:
5635 case Builtin::BI_interlockedbittestandset64_rel:
5636 case Builtin::BI_interlockedbittestandset64_nf:
5637 case Builtin::BI_interlockedbittestandset:
5638 case Builtin::BI_interlockedbittestandset_acq:
5639 case Builtin::BI_interlockedbittestandset_rel:
5640 case Builtin::BI_interlockedbittestandset_nf:
5641 case Builtin::BI_interlockedbittestandreset_acq:
5642 case Builtin::BI_interlockedbittestandreset_rel:
5643 case Builtin::BI_interlockedbittestandreset_nf:
5644 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
5645
5646 // These builtins exist to emit regular volatile loads and stores not
5647 // affected by the -fms-volatile setting.
5648 case Builtin::BI__iso_volatile_load8:
5649 case Builtin::BI__iso_volatile_load16:
5650 case Builtin::BI__iso_volatile_load32:
5651 case Builtin::BI__iso_volatile_load64:
5652 return RValue::get(EmitISOVolatileLoad(*this, E));
5653 case Builtin::BI__iso_volatile_store8:
5654 case Builtin::BI__iso_volatile_store16:
5655 case Builtin::BI__iso_volatile_store32:
5656 case Builtin::BI__iso_volatile_store64:
5657 return RValue::get(EmitISOVolatileStore(*this, E));
5658
5659 case Builtin::BI__builtin_ptrauth_sign_constant:
5660 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
5661
5662 case Builtin::BI__builtin_ptrauth_auth:
5663 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5664 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5665 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5666 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5667 case Builtin::BI__builtin_ptrauth_strip: {
5668 // Emit the arguments.
5670 for (auto argExpr : E->arguments())
5671 Args.push_back(EmitScalarExpr(argExpr));
5672
5673 // Cast the value to intptr_t, saving its original type.
5674 llvm::Type *OrigValueType = Args[0]->getType();
5675 if (OrigValueType->isPointerTy())
5676 Args[0] = Builder.CreatePtrToInt(Args[0], IntPtrTy);
5677
5678 switch (BuiltinID) {
5679 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5680 if (Args[4]->getType()->isPointerTy())
5681 Args[4] = Builder.CreatePtrToInt(Args[4], IntPtrTy);
5682 [[fallthrough]];
5683
5684 case Builtin::BI__builtin_ptrauth_auth:
5685 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5686 if (Args[2]->getType()->isPointerTy())
5687 Args[2] = Builder.CreatePtrToInt(Args[2], IntPtrTy);
5688 break;
5689
5690 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5691 if (Args[1]->getType()->isPointerTy())
5692 Args[1] = Builder.CreatePtrToInt(Args[1], IntPtrTy);
5693 break;
5694
5695 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5696 case Builtin::BI__builtin_ptrauth_strip:
5697 break;
5698 }
5699
5700 // Call the intrinsic.
5701 auto IntrinsicID = [&]() -> unsigned {
5702 switch (BuiltinID) {
5703 case Builtin::BI__builtin_ptrauth_auth:
5704 return Intrinsic::ptrauth_auth;
5705 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5706 return Intrinsic::ptrauth_resign;
5707 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5708 return Intrinsic::ptrauth_blend;
5709 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5710 return Intrinsic::ptrauth_sign_generic;
5711 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5712 return Intrinsic::ptrauth_sign;
5713 case Builtin::BI__builtin_ptrauth_strip:
5714 return Intrinsic::ptrauth_strip;
5715 }
5716 llvm_unreachable("bad ptrauth intrinsic");
5717 }();
5718 auto Intrinsic = CGM.getIntrinsic(IntrinsicID);
5719 llvm::Value *Result = EmitRuntimeCall(Intrinsic, Args);
5720
5721 if (BuiltinID != Builtin::BI__builtin_ptrauth_sign_generic_data &&
5722 BuiltinID != Builtin::BI__builtin_ptrauth_blend_discriminator &&
5723 OrigValueType->isPointerTy()) {
5724 Result = Builder.CreateIntToPtr(Result, OrigValueType);
5725 }
5726 return RValue::get(Result);
5727 }
5728
5729 case Builtin::BI__builtin_get_vtable_pointer: {
5730 const Expr *Target = E->getArg(0);
5731 QualType TargetType = Target->getType();
5732 const CXXRecordDecl *Decl = TargetType->getPointeeCXXRecordDecl();
5733 assert(Decl);
5734 auto ThisAddress = EmitPointerWithAlignment(Target);
5735 assert(ThisAddress.isValid());
5736 llvm::Value *VTablePointer =
5738 return RValue::get(VTablePointer);
5739 }
5740
5741 case Builtin::BI__exception_code:
5742 case Builtin::BI_exception_code:
5744 case Builtin::BI__exception_info:
5745 case Builtin::BI_exception_info:
5747 case Builtin::BI__abnormal_termination:
5748 case Builtin::BI_abnormal_termination:
5750 case Builtin::BI_setjmpex:
5751 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
5752 E->getArg(0)->getType()->isPointerType())
5753 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
5754 break;
5755 case Builtin::BI_setjmp:
5756 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
5757 E->getArg(0)->getType()->isPointerType()) {
5758 if (getTarget().getTriple().getArch() == llvm::Triple::x86)
5759 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
5760 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
5761 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
5762 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
5763 }
5764 break;
5765
5766 // C++ std:: builtins.
5767 case Builtin::BImove:
5768 case Builtin::BImove_if_noexcept:
5769 case Builtin::BIforward:
5770 case Builtin::BIforward_like:
5771 case Builtin::BIas_const:
5772 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
5773 case Builtin::BI__GetExceptionInfo: {
5774 if (llvm::GlobalVariable *GV =
5775 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
5776 return RValue::get(GV);
5777 break;
5778 }
5779
5780 case Builtin::BI__fastfail:
5782
5783 case Builtin::BI__builtin_coro_id:
5784 return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
5785 case Builtin::BI__builtin_coro_promise:
5786 return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
5787 case Builtin::BI__builtin_coro_resume:
5788 EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
5789 return RValue::get(nullptr);
5790 case Builtin::BI__builtin_coro_frame:
5791 return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
5792 case Builtin::BI__builtin_coro_noop:
5793 return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
5794 case Builtin::BI__builtin_coro_free:
5795 return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
5796 case Builtin::BI__builtin_coro_destroy:
5797 EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
5798 return RValue::get(nullptr);
5799 case Builtin::BI__builtin_coro_done:
5800 return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
5801 case Builtin::BI__builtin_coro_alloc:
5802 return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
5803 case Builtin::BI__builtin_coro_begin:
5804 return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
5805 case Builtin::BI__builtin_coro_end:
5806 return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
5807 case Builtin::BI__builtin_coro_suspend:
5808 return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
5809 case Builtin::BI__builtin_coro_size:
5810 return EmitCoroutineIntrinsic(E, Intrinsic::coro_size);
5811 case Builtin::BI__builtin_coro_align:
5812 return EmitCoroutineIntrinsic(E, Intrinsic::coro_align);
5813
5814 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
5815 case Builtin::BIread_pipe:
5816 case Builtin::BIwrite_pipe: {
5817 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5818 *Arg1 = EmitScalarExpr(E->getArg(1));
5819 CGOpenCLRuntime OpenCLRT(CGM);
5820 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5821 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5822
5823 // Type of the generic packet parameter.
5824 unsigned GenericAS =
5826 llvm::Type *I8PTy = llvm::PointerType::get(getLLVMContext(), GenericAS);
5827
5828 // Testing which overloaded version we should generate the call for.
5829 if (2U == E->getNumArgs()) {
5830 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
5831 : "__write_pipe_2";
5832 // Creating a generic function type to be able to call with any builtin or
5833 // user defined type.
5834 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
5835 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5836 Value *ACast = Builder.CreateAddrSpaceCast(Arg1, I8PTy);
5837 return RValue::get(
5838 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5839 {Arg0, ACast, PacketSize, PacketAlign}));
5840 } else {
5841 assert(4 == E->getNumArgs() &&
5842 "Illegal number of parameters to pipe function");
5843 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
5844 : "__write_pipe_4";
5845
5846 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
5847 Int32Ty, Int32Ty};
5848 Value *Arg2 = EmitScalarExpr(E->getArg(2)),
5849 *Arg3 = EmitScalarExpr(E->getArg(3));
5850 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5851 Value *ACast = Builder.CreateAddrSpaceCast(Arg3, I8PTy);
5852 // We know the third argument is an integer type, but we may need to cast
5853 // it to i32.
5854 if (Arg2->getType() != Int32Ty)
5855 Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
5856 return RValue::get(
5857 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5858 {Arg0, Arg1, Arg2, ACast, PacketSize, PacketAlign}));
5859 }
5860 }
5861 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
5862 // functions
5863 case Builtin::BIreserve_read_pipe:
5864 case Builtin::BIreserve_write_pipe:
5865 case Builtin::BIwork_group_reserve_read_pipe:
5866 case Builtin::BIwork_group_reserve_write_pipe:
5867 case Builtin::BIsub_group_reserve_read_pipe:
5868 case Builtin::BIsub_group_reserve_write_pipe: {
5869 // Composing the mangled name for the function.
5870 const char *Name;
5871 if (BuiltinID == Builtin::BIreserve_read_pipe)
5872 Name = "__reserve_read_pipe";
5873 else if (BuiltinID == Builtin::BIreserve_write_pipe)
5874 Name = "__reserve_write_pipe";
5875 else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
5876 Name = "__work_group_reserve_read_pipe";
5877 else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
5878 Name = "__work_group_reserve_write_pipe";
5879 else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
5880 Name = "__sub_group_reserve_read_pipe";
5881 else
5882 Name = "__sub_group_reserve_write_pipe";
5883
5884 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5885 *Arg1 = EmitScalarExpr(E->getArg(1));
5886 llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
5887 CGOpenCLRuntime OpenCLRT(CGM);
5888 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5889 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5890
5891 // Building the generic function prototype.
5892 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
5893 llvm::FunctionType *FTy =
5894 llvm::FunctionType::get(ReservedIDTy, ArgTys, false);
5895 // We know the second argument is an integer type, but we may need to cast
5896 // it to i32.
5897 if (Arg1->getType() != Int32Ty)
5898 Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
5899 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5900 {Arg0, Arg1, PacketSize, PacketAlign}));
5901 }
5902 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
5903 // functions
5904 case Builtin::BIcommit_read_pipe:
5905 case Builtin::BIcommit_write_pipe:
5906 case Builtin::BIwork_group_commit_read_pipe:
5907 case Builtin::BIwork_group_commit_write_pipe:
5908 case Builtin::BIsub_group_commit_read_pipe:
5909 case Builtin::BIsub_group_commit_write_pipe: {
5910 const char *Name;
5911 if (BuiltinID == Builtin::BIcommit_read_pipe)
5912 Name = "__commit_read_pipe";
5913 else if (BuiltinID == Builtin::BIcommit_write_pipe)
5914 Name = "__commit_write_pipe";
5915 else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
5916 Name = "__work_group_commit_read_pipe";
5917 else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
5918 Name = "__work_group_commit_write_pipe";
5919 else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
5920 Name = "__sub_group_commit_read_pipe";
5921 else
5922 Name = "__sub_group_commit_write_pipe";
5923
5924 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5925 *Arg1 = EmitScalarExpr(E->getArg(1));
5926 CGOpenCLRuntime OpenCLRT(CGM);
5927 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5928 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5929
5930 // Building the generic function prototype.
5931 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
5932 llvm::FunctionType *FTy = llvm::FunctionType::get(
5933 llvm::Type::getVoidTy(getLLVMContext()), ArgTys, false);
5934
5935 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5936 {Arg0, Arg1, PacketSize, PacketAlign}));
5937 }
5938 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
5939 case Builtin::BIget_pipe_num_packets:
5940 case Builtin::BIget_pipe_max_packets: {
5941 const char *BaseName;
5942 const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
5943 if (BuiltinID == Builtin::BIget_pipe_num_packets)
5944 BaseName = "__get_pipe_num_packets";
5945 else
5946 BaseName = "__get_pipe_max_packets";
5947 std::string Name = std::string(BaseName) +
5948 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
5949
5950 // Building the generic function prototype.
5951 Value *Arg0 = EmitScalarExpr(E->getArg(0));
5952 CGOpenCLRuntime OpenCLRT(CGM);
5953 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5954 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5955 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
5956 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5957
5958 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5959 {Arg0, PacketSize, PacketAlign}));
5960 }
5961
5962 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
5963 case Builtin::BIto_global:
5964 case Builtin::BIto_local:
5965 case Builtin::BIto_private: {
5966 auto Arg0 = EmitScalarExpr(E->getArg(0));
5967 auto NewArgT = llvm::PointerType::get(
5969 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
5970 auto NewRetT = llvm::PointerType::get(
5972 CGM.getContext().getTargetAddressSpace(
5974 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
5975 llvm::Value *NewArg;
5976 if (Arg0->getType()->getPointerAddressSpace() !=
5977 NewArgT->getPointerAddressSpace())
5978 NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
5979 else
5980 NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
5981 auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
5982 auto NewCall =
5983 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
5984 return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
5985 ConvertType(E->getType())));
5986 }
5987
5988 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
5989 // Table 6.13.17.1 specifies four overload forms of enqueue_kernel.
5990 // The code below expands the builtin call to a call to one of the following
5991 // functions that an OpenCL runtime library will have to provide:
5992 // __enqueue_kernel_basic
5993 // __enqueue_kernel_varargs
5994 // __enqueue_kernel_basic_events
5995 // __enqueue_kernel_events_varargs
5996 case Builtin::BIenqueue_kernel: {
5997 StringRef Name; // Generated function call name
5998 unsigned NumArgs = E->getNumArgs();
5999
6000 llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
6001 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6002 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6003
6004 llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
6005 llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
6006 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
6007 llvm::Value *Range = NDRangeL.getAddress().emitRawPointer(*this);
6008
6009 // FIXME: Look through the addrspacecast which may exist to the stack
6010 // temporary as a hack.
6011 //
6012 // This is hardcoding the assumed ABI of the target function. This assumes
6013 // direct passing for every argument except NDRange, which is assumed to be
6014 // byval or byref indirect passed.
6015 //
6016 // This should be fixed to query a signature from CGOpenCLRuntime, and go
6017 // through EmitCallArgs to get the correct target ABI.
6018 Range = Range->stripPointerCasts();
6019
6020 llvm::Type *RangePtrTy = Range->getType();
6021
6022 if (NumArgs == 4) {
6023 // The most basic form of the call with parameters:
6024 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
6025 Name = "__enqueue_kernel_basic";
6026 llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangePtrTy, GenericVoidPtrTy,
6027 GenericVoidPtrTy};
6028 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6029
6030 auto Info =
6031 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
6032 llvm::Value *Kernel =
6033 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6034 llvm::Value *Block =
6035 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6036
6037 auto RTCall = EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6038 {Queue, Flags, Range, Kernel, Block});
6039 return RValue::get(RTCall);
6040 }
6041 assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
6042
6043 // Create a temporary array to hold the sizes of local pointer arguments
6044 // for the block. \p First is the position of the first size argument.
6045 auto CreateArrayForSizeVar =
6046 [=](unsigned First) -> std::pair<llvm::Value *, llvm::Value *> {
6047 llvm::APInt ArraySize(32, NumArgs - First);
6049 getContext().getSizeType(), ArraySize, nullptr,
6051 /*IndexTypeQuals=*/0);
6052 auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
6053 llvm::Value *TmpPtr = Tmp.getPointer();
6054 // The EmitLifetime* pair expect a naked Alloca as their last argument,
6055 // however for cases where the default AS is not the Alloca AS, Tmp is
6056 // actually the Alloca ascasted to the default AS, hence the
6057 // stripPointerCasts()
6058 llvm::Value *Alloca = TmpPtr->stripPointerCasts();
6059 llvm::Value *ElemPtr;
6060 EmitLifetimeStart(Alloca);
6061 // Each of the following arguments specifies the size of the corresponding
6062 // argument passed to the enqueued block.
6063 auto *Zero = llvm::ConstantInt::get(IntTy, 0);
6064 for (unsigned I = First; I < NumArgs; ++I) {
6065 auto *Index = llvm::ConstantInt::get(IntTy, I - First);
6066 auto *GEP =
6067 Builder.CreateGEP(Tmp.getElementType(), Alloca, {Zero, Index});
6068 if (I == First)
6069 ElemPtr = GEP;
6070 auto *V =
6071 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
6072 Builder.CreateAlignedStore(
6073 V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
6074 }
6075 // Return the Alloca itself rather than a potential ascast as this is only
6076 // used by the paired EmitLifetimeEnd.
6077 return {ElemPtr, Alloca};
6078 };
6079
6080 // Could have events and/or varargs.
6081 if (E->getArg(3)->getType()->isBlockPointerType()) {
6082 // No events passed, but has variadic arguments.
6083 Name = "__enqueue_kernel_varargs";
6084 auto Info =
6085 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
6086 llvm::Value *Kernel =
6087 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6088 auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6089 auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(4);
6090
6091 // Create a vector of the arguments, as well as a constant value to
6092 // express to the runtime the number of variadic arguments.
6093 llvm::Value *const Args[] = {Queue, Flags,
6094 Range, Kernel,
6095 Block, ConstantInt::get(IntTy, NumArgs - 4),
6096 ElemPtr};
6097 llvm::Type *const ArgTys[] = {
6098 QueueTy, IntTy, RangePtrTy, GenericVoidPtrTy,
6099 GenericVoidPtrTy, IntTy, ElemPtr->getType()};
6100
6101 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6102 auto Call = RValue::get(
6103 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6104 EmitLifetimeEnd(TmpPtr);
6105 return Call;
6106 }
6107 // Any calls now have event arguments passed.
6108 if (NumArgs >= 7) {
6109 llvm::PointerType *PtrTy = llvm::PointerType::get(
6110 CGM.getLLVMContext(),
6111 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
6112
6113 llvm::Value *NumEvents =
6114 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
6115
6116 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
6117 // to be a null pointer constant (including `0` literal), we can take it
6118 // into account and emit null pointer directly.
6119 llvm::Value *EventWaitList = nullptr;
6120 if (E->getArg(4)->isNullPointerConstant(
6122 EventWaitList = llvm::ConstantPointerNull::get(PtrTy);
6123 } else {
6124 EventWaitList =
6125 E->getArg(4)->getType()->isArrayType()
6127 : EmitScalarExpr(E->getArg(4));
6128 // Convert to generic address space.
6129 EventWaitList = Builder.CreatePointerCast(EventWaitList, PtrTy);
6130 }
6131 llvm::Value *EventRet = nullptr;
6132 if (E->getArg(5)->isNullPointerConstant(
6134 EventRet = llvm::ConstantPointerNull::get(PtrTy);
6135 } else {
6136 EventRet =
6137 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), PtrTy);
6138 }
6139
6140 auto Info =
6141 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
6142 llvm::Value *Kernel =
6143 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6144 llvm::Value *Block =
6145 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6146
6147 std::vector<llvm::Type *> ArgTys = {
6148 QueueTy, Int32Ty, RangePtrTy, Int32Ty,
6149 PtrTy, PtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
6150
6151 std::vector<llvm::Value *> Args = {Queue, Flags, Range,
6152 NumEvents, EventWaitList, EventRet,
6153 Kernel, Block};
6154
6155 if (NumArgs == 7) {
6156 // Has events but no variadics.
6157 Name = "__enqueue_kernel_basic_events";
6158 llvm::FunctionType *FTy =
6159 llvm::FunctionType::get(Int32Ty, ArgTys, false);
6160 return RValue::get(
6161 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6162 }
6163 // Has event info and variadics
6164 // Pass the number of variadics to the runtime function too.
6165 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
6166 ArgTys.push_back(Int32Ty);
6167 Name = "__enqueue_kernel_events_varargs";
6168
6169 auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(7);
6170 Args.push_back(ElemPtr);
6171 ArgTys.push_back(ElemPtr->getType());
6172
6173 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6174 auto Call = RValue::get(
6175 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6176 EmitLifetimeEnd(TmpPtr);
6177 return Call;
6178 }
6179 llvm_unreachable("Unexpected enqueue_kernel signature");
6180 }
6181 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
6182 // parameter.
6183 case Builtin::BIget_kernel_work_group_size: {
6184 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6185 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6186 auto Info =
6187 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
6188 Value *Kernel =
6189 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6190 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6192 CGM.CreateRuntimeFunction(
6193 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
6194 false),
6195 "__get_kernel_work_group_size_impl"),
6196 {Kernel, Arg}));
6197 }
6198 case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
6199 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6200 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6201 auto Info =
6202 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
6203 Value *Kernel =
6204 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6205 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6207 CGM.CreateRuntimeFunction(
6208 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
6209 false),
6210 "__get_kernel_preferred_work_group_size_multiple_impl"),
6211 {Kernel, Arg}));
6212 }
6213 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
6214 case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
6215 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6216 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6217 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
6218 llvm::Value *NDRange = NDRangeL.getAddress().emitRawPointer(*this);
6219 auto Info =
6220 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
6221 Value *Kernel =
6222 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6223 Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6224 const char *Name =
6225 BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
6226 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
6227 : "__get_kernel_sub_group_count_for_ndrange_impl";
6229 CGM.CreateRuntimeFunction(
6230 llvm::FunctionType::get(
6231 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
6232 false),
6233 Name),
6234 {NDRange, Kernel, Block}));
6235 }
6236 case Builtin::BI__builtin_store_half:
6237 case Builtin::BI__builtin_store_halff: {
6238 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
6239 Value *Val = EmitScalarExpr(E->getArg(0));
6241 Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
6242 Builder.CreateStore(HalfVal, Address);
6243 return RValue::get(nullptr);
6244 }
6245 case Builtin::BI__builtin_load_half: {
6247 Value *HalfVal = Builder.CreateLoad(Address);
6248 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
6249 }
6250 case Builtin::BI__builtin_load_halff: {
6252 Value *HalfVal = Builder.CreateLoad(Address);
6253 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
6254 }
6255 case Builtin::BI__builtin_printf:
6256 case Builtin::BIprintf:
6257 if (getTarget().getTriple().isNVPTX() ||
6258 getTarget().getTriple().isAMDGCN() ||
6259 (getTarget().getTriple().isSPIRV() &&
6260 getTarget().getTriple().getVendor() == Triple::VendorType::AMD)) {
6261 if (getTarget().getTriple().isNVPTX())
6263 if ((getTarget().getTriple().isAMDGCN() ||
6264 getTarget().getTriple().isSPIRV()) &&
6265 getLangOpts().HIP)
6267 }
6268
6269 break;
6270 case Builtin::BI__builtin_canonicalize:
6271 case Builtin::BI__builtin_canonicalizef:
6272 case Builtin::BI__builtin_canonicalizef16:
6273 case Builtin::BI__builtin_canonicalizel:
6274 return RValue::get(
6275 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::canonicalize));
6276
6277 case Builtin::BI__builtin_thread_pointer: {
6278 if (!getContext().getTargetInfo().isTLSSupported())
6279 CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
6280
6281 return RValue::get(Builder.CreateIntrinsic(llvm::Intrinsic::thread_pointer,
6282 {GlobalsInt8PtrTy}, {}));
6283 }
6284 case Builtin::BI__builtin_os_log_format:
6285 return emitBuiltinOSLogFormat(*E);
6286
6287 case Builtin::BI__xray_customevent: {
6289 return RValue::getIgnored();
6290
6291 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
6293 return RValue::getIgnored();
6294
6295 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
6296 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
6297 return RValue::getIgnored();
6298
6299 Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
6300 auto FTy = F->getFunctionType();
6301 auto Arg0 = E->getArg(0);
6302 auto Arg0Val = EmitScalarExpr(Arg0);
6303 auto Arg0Ty = Arg0->getType();
6304 auto PTy0 = FTy->getParamType(0);
6305 if (PTy0 != Arg0Val->getType()) {
6306 if (Arg0Ty->isArrayType())
6307 Arg0Val = EmitArrayToPointerDecay(Arg0).emitRawPointer(*this);
6308 else
6309 Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
6310 }
6311 auto Arg1 = EmitScalarExpr(E->getArg(1));
6312 auto PTy1 = FTy->getParamType(1);
6313 if (PTy1 != Arg1->getType())
6314 Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
6315 return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
6316 }
6317
6318 case Builtin::BI__xray_typedevent: {
6319 // TODO: There should be a way to always emit events even if the current
6320 // function is not instrumented. Losing events in a stream can cripple
6321 // a trace.
6323 return RValue::getIgnored();
6324
6325 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
6327 return RValue::getIgnored();
6328
6329 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
6330 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
6331 return RValue::getIgnored();
6332
6333 Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
6334 auto FTy = F->getFunctionType();
6335 auto Arg0 = EmitScalarExpr(E->getArg(0));
6336 auto PTy0 = FTy->getParamType(0);
6337 if (PTy0 != Arg0->getType())
6338 Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
6339 auto Arg1 = E->getArg(1);
6340 auto Arg1Val = EmitScalarExpr(Arg1);
6341 auto Arg1Ty = Arg1->getType();
6342 auto PTy1 = FTy->getParamType(1);
6343 if (PTy1 != Arg1Val->getType()) {
6344 if (Arg1Ty->isArrayType())
6345 Arg1Val = EmitArrayToPointerDecay(Arg1).emitRawPointer(*this);
6346 else
6347 Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
6348 }
6349 auto Arg2 = EmitScalarExpr(E->getArg(2));
6350 auto PTy2 = FTy->getParamType(2);
6351 if (PTy2 != Arg2->getType())
6352 Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
6353 return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
6354 }
6355
6356 case Builtin::BI__builtin_ms_va_start:
6357 case Builtin::BI__builtin_ms_va_end:
6358 return RValue::get(
6360 BuiltinID == Builtin::BI__builtin_ms_va_start));
6361
6362 case Builtin::BI__builtin_ms_va_copy: {
6363 // Lower this manually. We can't reliably determine whether or not any
6364 // given va_copy() is for a Win64 va_list from the calling convention
6365 // alone, because it's legal to do this from a System V ABI function.
6366 // With opaque pointer types, we won't have enough information in LLVM
6367 // IR to determine this from the argument types, either. Best to do it
6368 // now, while we have enough information.
6369 Address DestAddr = EmitMSVAListRef(E->getArg(0));
6370 Address SrcAddr = EmitMSVAListRef(E->getArg(1));
6371
6372 DestAddr = DestAddr.withElementType(Int8PtrTy);
6373 SrcAddr = SrcAddr.withElementType(Int8PtrTy);
6374
6375 Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
6376 return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
6377 }
6378
6379 case Builtin::BI__builtin_get_device_side_mangled_name: {
6380 auto Name = CGM.getCUDARuntime().getDeviceSideName(
6381 cast<DeclRefExpr>(E->getArg(0)->IgnoreImpCasts())->getDecl());
6382 auto Str = CGM.GetAddrOfConstantCString(Name, "");
6383 return RValue::get(Str.getPointer());
6384 }
6385 }
6386
6387 // If this is an alias for a lib function (e.g. __builtin_sin), emit
6388 // the call using the normal call path, but using the unmangled
6389 // version of the function name.
6390 const auto &BI = getContext().BuiltinInfo;
6391 if (!shouldEmitBuiltinAsIR(BuiltinID, BI, *this) &&
6392 BI.isLibFunction(BuiltinID))
6393 return emitLibraryCall(*this, FD, E,
6394 CGM.getBuiltinLibFunction(FD, BuiltinID));
6395
6396 // If this is a predefined lib function (e.g. malloc), emit the call
6397 // using exactly the normal call path.
6398 if (BI.isPredefinedLibFunction(BuiltinID))
6399 return emitLibraryCall(*this, FD, E, CGM.getRawFunctionPointer(FD));
6400
6401 // Check that a call to a target specific builtin has the correct target
6402 // features.
6403 // This is down here to avoid non-target specific builtins, however, if
6404 // generic builtins start to require generic target features then we
6405 // can move this up to the beginning of the function.
6406 checkTargetFeatures(E, FD);
6407
6408 if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
6409 LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
6410
6411 // See if we have a target specific intrinsic.
6412 std::string Name = getContext().BuiltinInfo.getName(BuiltinID);
6413 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
6414 StringRef Prefix =
6415 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
6416 if (!Prefix.empty()) {
6417 IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin(Prefix.data(), Name);
6418 if (IntrinsicID == Intrinsic::not_intrinsic && Prefix == "spv" &&
6419 getTarget().getTriple().getOS() == llvm::Triple::OSType::AMDHSA)
6420 IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin("amdgcn", Name);
6421 // NOTE we don't need to perform a compatibility flag check here since the
6422 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
6423 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
6424 if (IntrinsicID == Intrinsic::not_intrinsic)
6425 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
6426 }
6427
6428 if (IntrinsicID != Intrinsic::not_intrinsic) {
6430
6431 // Find out if any arguments are required to be integer constant
6432 // expressions.
6433 unsigned ICEArguments = 0;
6435 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
6436 assert(Error == ASTContext::GE_None && "Should not codegen an error");
6437
6438 Function *F = CGM.getIntrinsic(IntrinsicID);
6439 llvm::FunctionType *FTy = F->getFunctionType();
6440
6441 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
6442 Value *ArgValue = EmitScalarOrConstFoldImmArg(ICEArguments, i, E);
6443 // If the intrinsic arg type is different from the builtin arg type
6444 // we need to do a bit cast.
6445 llvm::Type *PTy = FTy->getParamType(i);
6446 if (PTy != ArgValue->getType()) {
6447 // XXX - vector of pointers?
6448 if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
6449 if (PtrTy->getAddressSpace() !=
6450 ArgValue->getType()->getPointerAddressSpace()) {
6451 ArgValue = Builder.CreateAddrSpaceCast(
6452 ArgValue, llvm::PointerType::get(getLLVMContext(),
6453 PtrTy->getAddressSpace()));
6454 }
6455 }
6456
6457 // Cast vector type (e.g., v256i32) to x86_amx, this only happen
6458 // in amx intrinsics.
6459 if (PTy->isX86_AMXTy())
6460 ArgValue = Builder.CreateIntrinsic(Intrinsic::x86_cast_vector_to_tile,
6461 {ArgValue->getType()}, {ArgValue});
6462 else
6463 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
6464 }
6465
6466 Args.push_back(ArgValue);
6467 }
6468
6469 Value *V = Builder.CreateCall(F, Args);
6470 QualType BuiltinRetType = E->getType();
6471
6472 llvm::Type *RetTy = VoidTy;
6473 if (!BuiltinRetType->isVoidType())
6474 RetTy = ConvertType(BuiltinRetType);
6475
6476 if (RetTy != V->getType()) {
6477 // XXX - vector of pointers?
6478 if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
6479 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
6480 V = Builder.CreateAddrSpaceCast(
6481 V, llvm::PointerType::get(getLLVMContext(),
6482 PtrTy->getAddressSpace()));
6483 }
6484 }
6485
6486 // Cast x86_amx to vector type (e.g., v256i32), this only happen
6487 // in amx intrinsics.
6488 if (V->getType()->isX86_AMXTy())
6489 V = Builder.CreateIntrinsic(Intrinsic::x86_cast_tile_to_vector, {RetTy},
6490 {V});
6491 else
6492 V = Builder.CreateBitCast(V, RetTy);
6493 }
6494
6495 if (RetTy->isVoidTy())
6496 return RValue::get(nullptr);
6497
6498 return RValue::get(V);
6499 }
6500
6501 // Some target-specific builtins can have aggregate return values, e.g.
6502 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
6503 // ReturnValue to be non-null, so that the target-specific emission code can
6504 // always just emit into it.
6506 if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) {
6507 Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp");
6508 ReturnValue = ReturnValueSlot(DestPtr, false);
6509 }
6510
6511 // Now see if we can emit a target-specific builtin.
6512 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
6513 switch (EvalKind) {
6514 case TEK_Scalar:
6515 if (V->getType()->isVoidTy())
6516 return RValue::get(nullptr);
6517 return RValue::get(V);
6518 case TEK_Aggregate:
6519 return RValue::getAggregate(ReturnValue.getAddress(),
6520 ReturnValue.isVolatile());
6521 case TEK_Complex:
6522 llvm_unreachable("No current target builtin returns complex");
6523 }
6524 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
6525 }
6526
6527 // EmitHLSLBuiltinExpr will check getLangOpts().HLSL
6528 if (Value *V = EmitHLSLBuiltinExpr(BuiltinID, E, ReturnValue)) {
6529 switch (EvalKind) {
6530 case TEK_Scalar:
6531 if (V->getType()->isVoidTy())
6532 return RValue::get(nullptr);
6533 return RValue::get(V);
6534 case TEK_Aggregate:
6535 return RValue::getAggregate(ReturnValue.getAddress(),
6536 ReturnValue.isVolatile());
6537 case TEK_Complex:
6538 llvm_unreachable("No current hlsl builtin returns complex");
6539 }
6540 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
6541 }
6542
6543 if (getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice)
6544 return EmitHipStdParUnsupportedBuiltin(this, FD);
6545
6546 ErrorUnsupported(E, "builtin function");
6547
6548 // Unknown builtin, for now just dump it out and return undef.
6549 return GetUndefRValue(E->getType());
6550}
6551
6552namespace {
6553struct BuiltinAlignArgs {
6554 llvm::Value *Src = nullptr;
6555 llvm::Type *SrcType = nullptr;
6556 llvm::Value *Alignment = nullptr;
6557 llvm::Value *Mask = nullptr;
6558 llvm::IntegerType *IntType = nullptr;
6559
6560 BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
6561 QualType AstType = E->getArg(0)->getType();
6562 if (AstType->isArrayType())
6563 Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(CGF);
6564 else
6565 Src = CGF.EmitScalarExpr(E->getArg(0));
6566 SrcType = Src->getType();
6567 if (SrcType->isPointerTy()) {
6568 IntType = IntegerType::get(
6569 CGF.getLLVMContext(),
6570 CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType));
6571 } else {
6572 assert(SrcType->isIntegerTy());
6573 IntType = cast<llvm::IntegerType>(SrcType);
6574 }
6575 Alignment = CGF.EmitScalarExpr(E->getArg(1));
6576 Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment");
6577 auto *One = llvm::ConstantInt::get(IntType, 1);
6578 Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
6579 }
6580};
6581} // namespace
6582
6583/// Generate (x & (y-1)) == 0.
6585 BuiltinAlignArgs Args(E, *this);
6586 llvm::Value *SrcAddress = Args.Src;
6587 if (Args.SrcType->isPointerTy())
6588 SrcAddress =
6589 Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr");
6590 return RValue::get(Builder.CreateICmpEQ(
6591 Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"),
6592 llvm::Constant::getNullValue(Args.IntType), "is_aligned"));
6593}
6594
6595/// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
6596/// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
6597/// llvm.ptrmask intrinsic (with a GEP before in the align_up case).
6599 BuiltinAlignArgs Args(E, *this);
6600 llvm::Value *SrcForMask = Args.Src;
6601 if (AlignUp) {
6602 // When aligning up we have to first add the mask to ensure we go over the
6603 // next alignment value and then align down to the next valid multiple.
6604 // By adding the mask, we ensure that align_up on an already aligned
6605 // value will not change the value.
6606 if (Args.Src->getType()->isPointerTy()) {
6607 if (getLangOpts().PointerOverflowDefined)
6608 SrcForMask =
6609 Builder.CreateGEP(Int8Ty, SrcForMask, Args.Mask, "over_boundary");
6610 else
6611 SrcForMask = EmitCheckedInBoundsGEP(Int8Ty, SrcForMask, Args.Mask,
6612 /*SignedIndices=*/true,
6613 /*isSubtraction=*/false,
6614 E->getExprLoc(), "over_boundary");
6615 } else {
6616 SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
6617 }
6618 }
6619 // Invert the mask to only clear the lower bits.
6620 llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
6621 llvm::Value *Result = nullptr;
6622 if (Args.Src->getType()->isPointerTy()) {
6623 Result = Builder.CreateIntrinsic(
6624 Intrinsic::ptrmask, {Args.SrcType, Args.IntType},
6625 {SrcForMask, InvertedMask}, nullptr, "aligned_result");
6626 } else {
6627 Result = Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
6628 }
6629 assert(Result->getType() == Args.SrcType);
6630 return RValue::get(Result);
6631}
#define V(N, I)
static char bitActionToX86BTCode(BitTest::ActionKind A)
static Value * EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering SuccessOrdering)
static void emitSincosBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static CanQualType getOSLogArgType(ASTContext &C, int Size)
Get the argument type for arguments to os_log_helper.
static Value * EmitOverflowCheckedAbs(CodeGenFunction &CGF, const CallExpr *E, bool SanitizeOverflow)
static llvm::Value * EmitBitCountExpr(CodeGenFunction &CGF, const Expr *E)
static Value * tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID, Value *V)
static bool areBOSTypesCompatible(int From, int To)
Checks if using the result of __builtin_object_size(p, From) in place of __builtin_object_size(p,...
static std::pair< llvm::Value *, llvm::Value * > GetCountFieldAndIndex(CodeGenFunction &CGF, const MemberExpr *ME, const FieldDecl *ArrayFD, const FieldDecl *CountFD, const Expr *Idx, llvm::IntegerType *ResType, bool IsSigned)
Value * EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, QualType T, llvm::Type *ResultType)
static bool TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty, llvm::SmallPtrSetImpl< const Decl * > &Seen)
static Value * EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)
static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind, const CallExpr *E)
MSVC handles setjmp a bit differently on different platforms.
#define MUTATE_LDBL(func)
static Value * emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty)
Determine if the specified type requires laundering by checking if it is a dynamic class type or cont...
static Value * EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E)
static Value * EmitTargetArchBuiltinExpr(CodeGenFunction *CGF, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
Definition CGBuiltin.cpp:72
static Value * emitBinaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, Instruction::BinaryOps Op, bool Invert=false)
Utility to insert an atomic instruction based Intrinsic::ID and the expression node,...
static bool HasNoIndirectArgumentsOrResults(CGFunctionInfo const &FnInfo)
Checks no arguments or results are passed indirectly in the ABI (i.e.
Value * EmitToInt(CodeGenFunction &CGF, llvm::Value *V, QualType T, llvm::IntegerType *IntType)
Emit the conversions required to turn the given value into an integer of the given size.
static Value * emitBinaryExpMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID, Intrinsic::ID ConstrainedIntrinsicID)
static llvm::Value * EmitBitTestIntrinsic(CodeGenFunction &CGF, unsigned BuiltinID, const CallExpr *E)
Emit a _bittest* intrinsic.
static Value * EmitSignBit(CodeGenFunction &CGF, Value *V)
Emit the computation of the sign bit for a floating point value.
static Value * EmitFAbs(CodeGenFunction &CGF, Value *V)
EmitFAbs - Emit a call to @llvm.fabs().
static llvm::Value * EmitPositiveResultOrZero(CodeGenFunction &CGF, llvm::Value *Res, llvm::Value *Index, llvm::IntegerType *ResType, bool IsSigned)
static bool shouldEmitBuiltinAsIR(unsigned BuiltinID, const Builtin::Context &BI, const CodeGenFunction &CGF)
Some builtins do not have library implementation on some targets and are instead emitted as LLVM IRs ...
Definition CGBuiltin.cpp:48
static bool isSpecialUnsignedMultiplySignedResult(unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, WidthAndSignedness ResultInfo)
static llvm::Value * getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType)
static Value * emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static RValue EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, const clang::Expr *Op2, WidthAndSignedness Op2Info, const clang::Expr *ResultArg, QualType ResultQTy, WidthAndSignedness ResultInfo)
Emit a checked mixed-sign multiply.
static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID)
Value * MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E, bool ReturnBool)
Utility to insert an atomic cmpxchg instruction.
static RValue EmitBinaryAtomic(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E)
static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size, Align AlignmentInBytes)
static Value * EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering SuccessOrdering=AtomicOrdering::SequentiallyConsistent)
This function should be invoked to emit atomic cmpxchg for Microsoft's _InterlockedCompareExchange* i...
static bool isSpecialMixedSignMultiply(unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, WidthAndSignedness ResultInfo)
Determine if a binop is a checked mixed-sign multiply we can specialize.
static Value * emitFrexpBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static llvm::Value * emitModfBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static Value * EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E)
static const FieldDecl * FindFlexibleArrayMemberField(CodeGenFunction &CGF, ASTContext &Ctx, const RecordDecl *RD)
Find a struct's flexible array member.
static Value * EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E)
static RValue EmitHipStdParUnsupportedBuiltin(CodeGenFunction *CGF, const FunctionDecl *FD)
static llvm::Value * EmitX86BitTestIntrinsic(CodeGenFunction &CGF, BitTest BT, const CallExpr *E, Value *BitBase, Value *BitPos)
static RValue EmitCheckedUnsignedMultiplySignedResult(CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, const clang::Expr *Op2, WidthAndSignedness Op2Info, const clang::Expr *ResultArg, QualType ResultQTy, WidthAndSignedness ResultInfo)
Address CheckAtomicAlignment(CodeGenFunction &CGF, const CallExpr *E)
static Value * EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E)
static llvm::AtomicOrdering getBitTestAtomicOrdering(BitTest::InterlockingKind I)
static bool GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FD, int64_t &Offset)
Calculate the offset of a struct field.
Value * MakeBinaryAtomicValue(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, AtomicOrdering Ordering)
Utility to insert an atomic instruction based on Intrinsic::ID and the expression node.
llvm::Value * EmitOverflowIntrinsic(CodeGenFunction &CGF, const Intrinsic::ID IntrinsicID, llvm::Value *X, llvm::Value *Y, llvm::Value *&Carry)
Emit a call to llvm.
static Value * EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW)
static Value * EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)
llvm::Value * emitBuiltinWithOneOverloadedType(clang::CodeGen::CodeGenFunction &CGF, const clang::CallExpr *E, unsigned IntrinsicID, llvm::StringRef Name="")
Definition CGBuiltin.h:63
static RValue emitUnaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitLibraryCall(CIRGenFunction &cgf, const FunctionDecl *fd, const CallExpr *e, mlir::Operation *calleeValue)
static WidthAndSignedness getIntegerWidthAndSignedness(const clang::ASTContext &astContext, const clang::QualType type)
static struct WidthAndSignedness EncompassingIntegerType(ArrayRef< struct WidthAndSignedness > types)
TokenType getType() const
Returns the token's type, e.g.
FormatToken * Next
The next token in the unwrapped line.
#define X(type, name)
Definition Value.h:97
static unsigned getCharWidth(tok::TokenKind kind, const TargetInfo &Target)
llvm::MachO::Record Record
Definition MachO.h:31
SanitizerHandler
static QualType getPointeeType(const MemRegion *R)
__DEVICE__ float modf(float __x, float *__iptr)
__DEVICE__ double nan(const char *)
APSInt & getInt()
Definition APValue.h:489
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType VoidPtrTy
IdentifierTable & Idents
Definition ASTContext.h:790
Builtin::Context & BuiltinInfo
Definition ASTContext.h:792
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, const Expr *SizeExpr, ArraySizeModifier ASM, unsigned IndexTypeQuals) const
Return the unique reference to the type for a constant array of the specified element type.
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CanQualType VoidTy
QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error, unsigned *IntegerConstantArgs=nullptr) const
Return the type for the specified builtin.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:909
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
@ GE_None
No error.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
QualType getElementType() const
Definition TypeBase.h:3735
static std::unique_ptr< AtomicScopeModel > create(AtomicScopeModelKind K)
Create an atomic scope model by AtomicScopeModelKind.
Definition SyncScope.h:298
static bool isCommaOp(Opcode Opc)
Definition Expr.h:4141
Expr * getRHS() const
Definition Expr.h:4090
Holds information about both target-independent and target-specific builtins, allowing easy queries b...
Definition Builtins.h:235
bool shouldGenerateFPMathIntrinsic(unsigned BuiltinID, llvm::Triple Trip, std::optional< bool > ErrnoOverwritten, bool MathErrnoEnabled, bool HasOptNoneAttr, bool IsOptimizationEnabled) const
Determine whether we can generate LLVM intrinsics for the given builtin ID, based on whether it has s...
Definition Builtins.cpp:225
bool isConstWithoutErrnoAndExceptions(unsigned ID) const
Return true if this function has no side effects and doesn't read memory, except for possibly errno o...
Definition Builtins.h:412
std::string getName(unsigned ID) const
Return the identifier name for the specified builtin, e.g.
Definition Builtins.cpp:80
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2943
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3147
bool hasStoredFPFeatures() const
Definition Expr.h:3102
SourceLocation getBeginLoc() const
Definition Expr.h:3277
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3126
Expr * getCallee()
Definition Expr.h:3090
FPOptionsOverride getFPFeatures() const
Definition Expr.h:3242
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3134
arg_range arguments()
Definition Expr.h:3195
CastKind getCastKind() const
Definition Expr.h:3720
Expr * getSubExpr()
Definition Expr.h:3726
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
Address withAlignment(CharUnits NewAlignment) const
Return address with different alignment, but same pointer and element type.
Definition Address.h:269
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition CGBuilder.h:140
llvm::StoreInst * CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr, CharUnits Align, bool IsVolatile=false)
Definition CGBuilder.h:147
llvm::AtomicRMWInst * CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Ordering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition CGBuilder.h:184
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition CGBuilder.h:402
llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(Address Addr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition CGBuilder.h:173
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:112
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition CGBuilder.h:132
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition CGBuilder.h:350
All available information about a concrete callee.
Definition CGCall.h:63
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition CGCall.h:137
llvm::DILocation * CreateTrapFailureMessageFor(llvm::DebugLoc TrapLocation, StringRef Category, StringRef FailureMsg)
Create a debug location from TrapLocation that adds an artificial inline frame where the frame name i...
CGFunctionInfo - Class to encapsulate the information about a function definition.
MutableArrayRef< ArgInfo > arguments()
virtual llvm::Value * getPipeElemAlign(const Expr *PipeArg)
virtual llvm::Value * getPipeElemSize(const Expr *PipeArg)
llvm::StructType * getLLVMType() const
Return the "complete object" LLVM type associated with this record.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition CGCall.h:274
void add(RValue rvalue, QualType type)
Definition CGCall.h:302
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E)
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Definition CGClass.cpp:2856
RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E)
RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID)
llvm::Value * EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, const CallExpr *E)
SanitizerSet SanOpts
Sanitizers enabled for this function.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
llvm::Value * GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Definition CGExpr.cpp:1188
llvm::Type * ConvertType(QualType T)
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
BuiltinCheckKind
Specifies which type of sanitizer check to apply when handling a particular builtin.
llvm::Value * EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition SystemZ.cpp:39
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
Definition CGCall.cpp:5092
llvm::Value * EmitSEHAbnormalTermination()
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
Produce the code to do a retain.
Definition CGObjC.cpp:2328
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
llvm::Value * EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart)
Emits a call to an LLVM variable-argument intrinsic, either llvm.va_start or llvm....
llvm::Value * EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition AMDGPU.cpp:411
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3855
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:6930
RValue emitBuiltinOSLogFormat(const CallExpr &E)
Emit IR for __builtin_os_log_format.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
llvm::Function * generateBuiltinOSLogHelperFunction(const analyze_os_log::OSLogBufferLayout &Layout, CharUnits BufferAlignment)
const LangOptions & getLangOpts() const
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
llvm::Value * EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
Definition RISCV.cpp:1073
llvm::Value * EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind)
Emits an argument for a call to a builtin.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3745
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
Definition CGCall.cpp:4591
const TargetInfo & getTarget() const
RValue emitRotate(const CallExpr *E, bool IsRotateRight)
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location, const AnnotateAttr *Attr)
Emit an annotation call (intrinsic).
llvm::Value * EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
Definition ARM.cpp:2710
CGCallee EmitCallee(const Expr *E)
Definition CGExpr.cpp:6324
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression, potentially with an active ...
llvm::Value * EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition ARM.cpp:7857
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:226
llvm::Value * EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition PPC.cpp:73
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:4003
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
Definition CGDecl.cpp:1356
llvm::MDNode * buildAllocToken(QualType AllocType)
Build metadata used by the AllocToken instrumentation.
Definition CGExpr.cpp:1300
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2215
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition CGCall.cpp:5248
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp)
Emit IR for __builtin_align_up/__builtin_align_down.
void EmitLifetimeEnd(llvm::Value *Addr)
Definition CGDecl.cpp:1368
llvm::Value * EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
llvm::Value * EmitDirectXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition DirectX.cpp:22
llvm::Value * EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm::Triple::ArchType Arch)
Definition ARM.cpp:5052
llvm::Value * EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4433
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
Definition CGDecl.cpp:2331
llvm::Value * EmitSPIRVBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition SPIR.cpp:22
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
Address EmitVAListRef(const Expr *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
Definition CGExpr.cpp:1592
RValue EmitBuiltinIsAligned(const CallExpr *E)
Emit IR for __builtin_is_aligned.
RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, const CallExpr *TheCallExpr, bool IsDelete)
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition Hexagon.cpp:77
CodeGenTypes & getTypes() const
llvm::Value * EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition X86.cpp:793
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1575
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:189
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
Definition CGExpr.cpp:4418
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4345
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2245
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
Definition CGExpr.cpp:1231
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
llvm::Value * EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition NVPTX.cpp:420
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
Definition CGExpr.cpp:4333
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1691
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
EmitTargetBuiltinExpr - Emit the given builtin call.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
llvm::Value * EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Given a number of pointers, inform the optimizer that they're being intrinsically used up until this ...
Definition CGObjC.cpp:2167
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:656
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
llvm::Constant * getBuiltinLibFunction(const FunctionDecl *FD, unsigned BuiltinID)
Given a builtin id for a function like "__builtin_fabsf", return a Function* for "fabsf".
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
const llvm::Triple & getTriple() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition CGCall.cpp:1702
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
LValue - This represents an lvalue references.
Definition CGValue.h:183
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition CGValue.h:373
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
static RValue getIgnored()
Definition CGValue.h:94
static RValue get(llvm::Value *V)
Definition CGValue.h:99
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition CGValue.h:126
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition CGValue.h:109
An abstract representation of an aligned address.
Definition Address.h:42
static RawAddress invalid()
Definition Address.h:61
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition CGCall.h:379
virtual bool supportsLibCall() const
supportsLibCall - Query to whether or not target supports all lib calls.
Definition TargetInfo.h:78
virtual llvm::Value * encodeReturnAddress(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Performs the code-generation required to convert the address of an instruction into a return address ...
Definition TargetInfo.h:176
virtual llvm::Value * decodeReturnAddress(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Performs the code-generation required to convert a return address as stored by the system into the ac...
Definition TargetInfo.h:166
virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const
Determines the DWARF register number for the stack pointer, for exception-handling purposes.
Definition TargetInfo.h:148
virtual llvm::Value * testFPKind(llvm::Value *V, unsigned BuiltinID, CGBuilderTy &Builder, CodeGenModule &CGM) const
Performs a target specific test of a floating point value for things like IsNaN, Infinity,...
Definition TargetInfo.h:185
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3276
Represents a concrete matrix type with constant number of rows and columns.
Definition TypeBase.h:4388
Represents a sugar type with __counted_by or __sized_by annotations, including their _or_null variant...
Definition TypeBase.h:3437
DynamicCountPointerKind getKind() const
Definition TypeBase.h:3467
static bool isFlexibleArrayMemberLike(const ASTContext &Context, const Decl *D, QualType Ty, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, bool IgnoreTemplateOrMacroSubstitution)
Whether it resembles a flexible array member.
Definition DeclBase.cpp:459
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition DeclBase.h:593
FunctionDecl * getAsFunction() LLVM_READONLY
Returns the function itself, or the templated function if this is a function template.
Definition DeclBase.cpp:273
bool hasAttr() const
Definition DeclBase.h:577
Concrete class used by the front-end to report problems and issues.
Definition Diagnostic.h:232
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3116
Expr * IgnoreParenCasts() LLVM_READONLY
Skip past any parentheses and casts which might surround this expression until reaching a fixed point...
Definition Expr.cpp:3094
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3089
bool EvaluateAsFloat(llvm::APFloat &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsFloat - Return true if this is a constant which we can fold and convert to a floating point...
bool isPRValue() const
Definition Expr.h:285
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
Definition Expr.h:835
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3669
std::optional< std::string > tryEvaluateString(ASTContext &Ctx) const
If the current Expr can be evaluated to a pointer to a null-terminated constant string,...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3069
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Definition Expr.cpp:4047
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
bool tryEvaluateObjectSize(uint64_t &Result, ASTContext &Ctx, unsigned Type) const
If the current Expr is a pointer, this will try to statically determine the number of bytes available...
const ValueDecl * getAsBuiltinConstantDeclRef(const ASTContext &Context) const
If this expression is an unambiguous reference to a single declaration, in the style of __builtin_fun...
Definition Expr.cpp:225
Represents difference between two FPOptions values.
LangOptions::FPExceptionModeKind getExceptionMode() const
Represents a member of a struct/union/class.
Definition Decl.h:3160
const FieldDecl * findCountedByField() const
Find the FieldDecl specified in a FAM's "counted_by" attribute.
Definition Decl.cpp:4855
Represents a function declaration or definition.
Definition Decl.h:2000
const ParmVarDecl * getParamDecl(unsigned i) const
Definition Decl.h:2797
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition Decl.cpp:3762
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5269
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
IdentifierInfo & get(StringRef Name)
Return the identifier token info for the specified named identifier.
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition Decl.cpp:5597
@ FPE_Ignore
Assume that floating-point exceptions are masked.
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3364
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3447
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
std::string getNameAsString() const
Get a human-readable name for the declaration, even if it is one of the special kinds of names (C++ c...
Definition Decl.h:317
const Expr * getSubExpr() const
Definition Expr.h:2199
PipeType - OpenCL20.
Definition TypeBase.h:8110
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3329
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8376
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8418
Represents a struct/union/class.
Definition Decl.h:4324
field_range fields() const
Definition Decl.h:4527
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
Encodes a location in the source.
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:350
bool isUnion() const
Definition Decl.h:3925
Exposes information about the current target.
Definition TargetInfo.h:226
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool isBigEndian() const
virtual bool checkArithmeticFenceSupported() const
Controls if __arithmetic_fence is supported in the targeted backend.
unsigned getSuitableAlign() const
Return the alignment that is the largest alignment ever used for any scalar/SIMD data type on the tar...
Definition TargetInfo.h:747
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
The base class of the type hierarchy.
Definition TypeBase.h:1833
bool isBlockPointerType() const
Definition TypeBase.h:8549
bool isVoidType() const
Definition TypeBase.h:8891
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2206
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isArrayType() const
Definition TypeBase.h:8628
bool isCountAttributedType() const
Definition Type.cpp:742
bool isPointerType() const
Definition TypeBase.h:8529
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8935
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9178
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1910
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9111
Expr * getSubExpr() const
Definition Expr.h:2285
QualType getType() const
Definition Decl.h:723
QualType getType() const
Definition Value.cpp:237
Represents a GCC generic vector type.
Definition TypeBase.h:4176
QualType getElementType() const
Definition TypeBase.h:4190
SmallVector< OSLogBufferItem, 4 > Items
Definition OSLog.h:113
unsigned char getNumArgsByte() const
Definition OSLog.h:148
unsigned char getSummaryByte() const
Definition OSLog.h:139
Defines the clang::TargetInfo interface.
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
Definition CGValue.h:155
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:146
llvm::Constant * initializationPatternFor(CodeGenModule &, llvm::Type *)
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
constexpr XRayInstrMask Typed
Definition XRayInstr.h:42
constexpr XRayInstrMask Custom
Definition XRayInstr.h:41
bool computeOSLogBufferLayout(clang::ASTContext &Ctx, const clang::CallExpr *E, OSLogBufferLayout &layout)
Definition OSLog.cpp:192
bool Mul(InterpState &S, CodePtr OpPC)
Definition Interp.h:350
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ Success
Annotation was successful.
Definition Parser.h:65
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Expr * Cond
};
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
const FunctionProtoType * T
LangAS
Defines the address space values used by the address space qualifier of QualType.
SyncScope
Defines sync scope values used internally by clang.
Definition SyncScope.h:42
llvm::StringRef getAsString(SyncScope S)
Definition SyncScope.h:62
U cast(CodeGen::Address addr)
Definition Address.h:327
@ Other
Other implicit parameter.
Definition Decl.h:1746
unsigned long uint64_t
long int64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
void clear(SanitizerMask K=SanitizerKind::All)
Disable the sanitizers specified in K.
Definition Sanitizers.h:195
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition Sanitizers.h:187
#define sinh(__x)
Definition tgmath.h:373
#define asin(__x)
Definition tgmath.h:112
#define scalbln(__x, __y)
Definition tgmath.h:1182
#define sqrt(__x)
Definition tgmath.h:520
#define acos(__x)
Definition tgmath.h:83
#define fmin(__x, __y)
Definition tgmath.h:780
#define exp(__x)
Definition tgmath.h:431
#define ilogb(__x)
Definition tgmath.h:851
#define copysign(__x, __y)
Definition tgmath.h:618
#define erf(__x)
Definition tgmath.h:636
#define atanh(__x)
Definition tgmath.h:228
#define remquo(__x, __y, __z)
Definition tgmath.h:1111
#define nextafter(__x, __y)
Definition tgmath.h:1055
#define frexp(__x, __y)
Definition tgmath.h:816
#define asinh(__x)
Definition tgmath.h:199
#define erfc(__x)
Definition tgmath.h:653
#define atan2(__x, __y)
Definition tgmath.h:566
#define nexttoward(__x, __y)
Definition tgmath.h:1073
#define hypot(__x, __y)
Definition tgmath.h:833
#define exp2(__x)
Definition tgmath.h:670
#define sin(__x)
Definition tgmath.h:286
#define cbrt(__x)
Definition tgmath.h:584
#define log2(__x)
Definition tgmath.h:970
#define llround(__x)
Definition tgmath.h:919
#define cosh(__x)
Definition tgmath.h:344
#define trunc(__x)
Definition tgmath.h:1216
#define fmax(__x, __y)
Definition tgmath.h:762
#define ldexp(__x, __y)
Definition tgmath.h:868
#define acosh(__x)
Definition tgmath.h:170
#define tgamma(__x)
Definition tgmath.h:1199
#define scalbn(__x, __y)
Definition tgmath.h:1165
#define round(__x)
Definition tgmath.h:1148
#define fmod(__x, __y)
Definition tgmath.h:798
#define llrint(__x)
Definition tgmath.h:902
#define tan(__x)
Definition tgmath.h:315
#define cos(__x)
Definition tgmath.h:257
#define log10(__x)
Definition tgmath.h:936
#define fabs(__x)
Definition tgmath.h:549
#define pow(__x, __y)
Definition tgmath.h:490
#define log1p(__x)
Definition tgmath.h:953
#define rint(__x)
Definition tgmath.h:1131
#define expm1(__x)
Definition tgmath.h:687
#define remainder(__x, __y)
Definition tgmath.h:1090
#define fdim(__x, __y)
Definition tgmath.h:704
#define lgamma(__x)
Definition tgmath.h:885
#define tanh(__x)
Definition tgmath.h:402
#define lrint(__x)
Definition tgmath.h:1004
#define atan(__x)
Definition tgmath.h:141
#define floor(__x)
Definition tgmath.h:722
#define ceil(__x)
Definition tgmath.h:601
#define log(__x)
Definition tgmath.h:460
#define logb(__x)
Definition tgmath.h:987
#define nearbyint(__x)
Definition tgmath.h:1038
#define lround(__x)
Definition tgmath.h:1021
#define fma(__x, __y, __z)
Definition tgmath.h:742