clang 23.0.0git
CGBuiltin.cpp
Go to the documentation of this file.
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGBuiltin.h"
14#include "ABIInfo.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGDebugInfo.h"
18#include "CGObjCRuntime.h"
19#include "CGOpenCLRuntime.h"
20#include "CGRecordLayout.h"
21#include "CGValue.h"
22#include "CodeGenFunction.h"
23#include "CodeGenModule.h"
24#include "ConstantEmitter.h"
25#include "PatternInit.h"
26#include "TargetInfo.h"
27#include "clang/AST/OSLog.h"
31#include "llvm/IR/InlineAsm.h"
32#include "llvm/IR/Instruction.h"
33#include "llvm/IR/Intrinsics.h"
34#include "llvm/IR/IntrinsicsX86.h"
35#include "llvm/IR/MatrixBuilder.h"
36#include "llvm/Support/ConvertUTF.h"
37#include "llvm/Support/ScopedPrinter.h"
38#include <optional>
39#include <utility>
40
41using namespace clang;
42using namespace CodeGen;
43using namespace llvm;
44
45/// Some builtins do not have library implementation on some targets and
46/// are instead emitted as LLVM IRs by some target builtin emitters.
47/// FIXME: Remove this when library support is added
48static bool shouldEmitBuiltinAsIR(unsigned BuiltinID,
49 const Builtin::Context &BI,
50 const CodeGenFunction &CGF) {
51 if (!CGF.CGM.getLangOpts().MathErrno &&
55 switch (BuiltinID) {
56 default:
57 return false;
58 case Builtin::BIlogbf:
59 case Builtin::BI__builtin_logbf:
60 case Builtin::BIlogb:
61 case Builtin::BI__builtin_logb:
62 case Builtin::BIscalbnf:
63 case Builtin::BI__builtin_scalbnf:
64 case Builtin::BIscalbn:
65 case Builtin::BI__builtin_scalbn:
66 return true;
67 }
68 }
69 return false;
70}
71
73 unsigned BuiltinID, const CallExpr *E,
74 ReturnValueSlot ReturnValue,
75 llvm::Triple::ArchType Arch) {
76 // When compiling in HipStdPar mode we have to be conservative in rejecting
77 // target specific features in the FE, and defer the possible error to the
78 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
79 // referenced by an accelerator executable function, we emit an error.
80 // Returning nullptr here leads to the builtin being handled in
81 // EmitStdParUnsupportedBuiltin.
82 if (CGF->getLangOpts().HIPStdPar && CGF->getLangOpts().CUDAIsDevice &&
83 Arch != CGF->getTarget().getTriple().getArch())
84 return nullptr;
85
86 switch (Arch) {
87 case llvm::Triple::arm:
88 case llvm::Triple::armeb:
89 case llvm::Triple::thumb:
90 case llvm::Triple::thumbeb:
91 return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch);
92 case llvm::Triple::aarch64:
93 case llvm::Triple::aarch64_32:
94 case llvm::Triple::aarch64_be:
95 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
96 case llvm::Triple::bpfeb:
97 case llvm::Triple::bpfel:
98 return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
99 case llvm::Triple::dxil:
100 return CGF->EmitDirectXBuiltinExpr(BuiltinID, E);
101 case llvm::Triple::x86:
102 case llvm::Triple::x86_64:
103 return CGF->EmitX86BuiltinExpr(BuiltinID, E);
104 case llvm::Triple::ppc:
105 case llvm::Triple::ppcle:
106 case llvm::Triple::ppc64:
107 case llvm::Triple::ppc64le:
108 return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
109 case llvm::Triple::r600:
110 case llvm::Triple::amdgcn:
111 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
112 case llvm::Triple::systemz:
113 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
114 case llvm::Triple::nvptx:
115 case llvm::Triple::nvptx64:
116 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
117 case llvm::Triple::wasm32:
118 case llvm::Triple::wasm64:
119 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
120 case llvm::Triple::hexagon:
121 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
122 case llvm::Triple::riscv32:
123 case llvm::Triple::riscv64:
124 case llvm::Triple::riscv32be:
125 case llvm::Triple::riscv64be:
126 return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue);
127 case llvm::Triple::spirv32:
128 case llvm::Triple::spirv64:
129 if (CGF->getTarget().getTriple().getOS() == llvm::Triple::OSType::AMDHSA)
130 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
131 [[fallthrough]];
132 case llvm::Triple::spirv:
133 return CGF->EmitSPIRVBuiltinExpr(BuiltinID, E);
134 default:
135 return nullptr;
136 }
137}
138
140 const CallExpr *E,
142 if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
143 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
145 this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
146 ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
147 }
148
149 return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue,
150 getTarget().getTriple().getArch());
151}
152
153static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
154 Align AlignmentInBytes) {
155 ConstantInt *Byte;
156 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
158 // Nothing to initialize.
159 return;
161 Byte = CGF.Builder.getInt8(0x00);
162 break;
164 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
165 Byte = llvm::dyn_cast<llvm::ConstantInt>(
166 initializationPatternFor(CGF.CGM, Int8));
167 break;
168 }
169 }
170 if (CGF.CGM.stopAutoInit())
171 return;
172 auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
173 I->addAnnotationMetadata("auto-init");
174}
175
176/// getBuiltinLibFunction - Given a builtin id for a function like
177/// "__builtin_fabsf", return a Function* for "fabsf".
179 unsigned BuiltinID) {
180 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
181
182 // Get the name, skip over the __builtin_ prefix (if necessary). We may have
183 // to build this up so provide a small stack buffer to handle the vast
184 // majority of names.
186 GlobalDecl D(FD);
187
188 // TODO: This list should be expanded or refactored after all GCC-compatible
189 // std libcall builtins are implemented.
190 static const SmallDenseMap<unsigned, StringRef, 64> F128Builtins{
191 {Builtin::BI__builtin___fprintf_chk, "__fprintf_chkieee128"},
192 {Builtin::BI__builtin___printf_chk, "__printf_chkieee128"},
193 {Builtin::BI__builtin___snprintf_chk, "__snprintf_chkieee128"},
194 {Builtin::BI__builtin___sprintf_chk, "__sprintf_chkieee128"},
195 {Builtin::BI__builtin___vfprintf_chk, "__vfprintf_chkieee128"},
196 {Builtin::BI__builtin___vprintf_chk, "__vprintf_chkieee128"},
197 {Builtin::BI__builtin___vsnprintf_chk, "__vsnprintf_chkieee128"},
198 {Builtin::BI__builtin___vsprintf_chk, "__vsprintf_chkieee128"},
199 {Builtin::BI__builtin_fprintf, "__fprintfieee128"},
200 {Builtin::BI__builtin_printf, "__printfieee128"},
201 {Builtin::BI__builtin_snprintf, "__snprintfieee128"},
202 {Builtin::BI__builtin_sprintf, "__sprintfieee128"},
203 {Builtin::BI__builtin_vfprintf, "__vfprintfieee128"},
204 {Builtin::BI__builtin_vprintf, "__vprintfieee128"},
205 {Builtin::BI__builtin_vsnprintf, "__vsnprintfieee128"},
206 {Builtin::BI__builtin_vsprintf, "__vsprintfieee128"},
207 {Builtin::BI__builtin_fscanf, "__fscanfieee128"},
208 {Builtin::BI__builtin_scanf, "__scanfieee128"},
209 {Builtin::BI__builtin_sscanf, "__sscanfieee128"},
210 {Builtin::BI__builtin_vfscanf, "__vfscanfieee128"},
211 {Builtin::BI__builtin_vscanf, "__vscanfieee128"},
212 {Builtin::BI__builtin_vsscanf, "__vsscanfieee128"},
213 {Builtin::BI__builtin_nexttowardf128, "__nexttowardieee128"},
214 };
215
216 // The AIX library functions frexpl, ldexpl, and modfl are for 128-bit
217 // IBM 'long double' (i.e. __ibm128). Map to the 'double' versions
218 // if it is 64-bit 'long double' mode.
219 static const SmallDenseMap<unsigned, StringRef, 4> AIXLongDouble64Builtins{
220 {Builtin::BI__builtin_frexpl, "frexp"},
221 {Builtin::BI__builtin_ldexpl, "ldexp"},
222 {Builtin::BI__builtin_modfl, "modf"},
223 };
224
225 // If the builtin has been declared explicitly with an assembler label,
226 // use the mangled name. This differs from the plain label on platforms
227 // that prefix labels.
228 if (FD->hasAttr<AsmLabelAttr>())
229 Name = getMangledName(D);
230 else {
231 // TODO: This mutation should also be applied to other targets other than
232 // PPC, after backend supports IEEE 128-bit style libcalls.
233 if (getTriple().isPPC64() &&
234 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad() &&
235 F128Builtins.contains(BuiltinID))
236 Name = F128Builtins.lookup(BuiltinID);
237 else if (getTriple().isOSAIX() &&
238 &getTarget().getLongDoubleFormat() ==
239 &llvm::APFloat::IEEEdouble() &&
240 AIXLongDouble64Builtins.contains(BuiltinID))
241 Name = AIXLongDouble64Builtins.lookup(BuiltinID);
242 else
243 Name = Context.BuiltinInfo.getName(BuiltinID).substr(10);
244 }
245
246 llvm::FunctionType *Ty =
247 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
248
249 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
250}
251
252/// Emit the conversions required to turn the given value into an
253/// integer of the given size.
254Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
255 QualType T, llvm::IntegerType *IntType) {
256 V = CGF.EmitToMemory(V, T);
257
258 if (V->getType()->isPointerTy())
259 return CGF.Builder.CreatePtrToInt(V, IntType);
260
261 assert(V->getType() == IntType);
262 return V;
263}
264
265Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
266 QualType T, llvm::Type *ResultType) {
267 V = CGF.EmitFromMemory(V, T);
268
269 if (ResultType->isPointerTy())
270 return CGF.Builder.CreateIntToPtr(V, ResultType);
271
272 assert(V->getType() == ResultType);
273 return V;
274}
275
277 ASTContext &Ctx = CGF.getContext();
278 Address Ptr = CGF.EmitPointerWithAlignment(E->getArg(0));
279 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
280 unsigned Bytes = Ptr.getElementType()->isPointerTy()
282 : DL.getTypeStoreSize(Ptr.getElementType());
283 unsigned Align = Ptr.getAlignment().getQuantity();
284 if (Align % Bytes != 0) {
285 DiagnosticsEngine &Diags = CGF.CGM.getDiags();
286 Diags.Report(E->getBeginLoc(), diag::warn_sync_op_misaligned);
287 // Force address to be at least naturally-aligned.
288 return Ptr.withAlignment(CharUnits::fromQuantity(Bytes));
289 }
290 return Ptr;
291}
292
293/// Utility to insert an atomic instruction based on Intrinsic::ID
294/// and the expression node.
296 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
297 AtomicOrdering Ordering) {
298
299 QualType T = E->getType();
300 assert(E->getArg(0)->getType()->isPointerType());
301 assert(CGF.getContext().hasSameUnqualifiedType(T,
302 E->getArg(0)->getType()->getPointeeType()));
303 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
304
305 Address DestAddr = CheckAtomicAlignment(CGF, E);
306
307 llvm::IntegerType *IntType = llvm::IntegerType::get(
308 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
309
310 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1));
311 llvm::Type *ValueType = Val->getType();
312 Val = EmitToInt(CGF, Val, T, IntType);
313
314 llvm::Value *Result =
315 CGF.Builder.CreateAtomicRMW(Kind, DestAddr, Val, Ordering);
316 return EmitFromInt(CGF, Result, T, ValueType);
317}
318
320 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
322
323 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
324 LValue LV = CGF.MakeAddrLValue(Addr, E->getArg(0)->getType());
325 LV.setNontemporal(true);
326 CGF.EmitStoreOfScalar(Val, LV, false);
327 return nullptr;
328}
329
332
333 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
334 LV.setNontemporal(true);
335 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
336}
337
339 llvm::AtomicRMWInst::BinOp Kind,
340 const CallExpr *E) {
341 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
342}
343
344/// Utility to insert an atomic instruction based Intrinsic::ID and
345/// the expression node, where the return value is the result of the
346/// operation.
348 llvm::AtomicRMWInst::BinOp Kind,
349 const CallExpr *E,
350 Instruction::BinaryOps Op,
351 bool Invert = false) {
352 QualType T = E->getType();
353 assert(E->getArg(0)->getType()->isPointerType());
354 assert(CGF.getContext().hasSameUnqualifiedType(T,
355 E->getArg(0)->getType()->getPointeeType()));
356 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
357
358 Address DestAddr = CheckAtomicAlignment(CGF, E);
359
360 llvm::IntegerType *IntType = llvm::IntegerType::get(
361 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
362
363 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1));
364 llvm::Type *ValueType = Val->getType();
365 Val = EmitToInt(CGF, Val, T, IntType);
366
367 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
368 Kind, DestAddr, Val, llvm::AtomicOrdering::SequentiallyConsistent);
369 Result = CGF.Builder.CreateBinOp(Op, Result, Val);
370 if (Invert)
371 Result =
372 CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
373 llvm::ConstantInt::getAllOnesValue(IntType));
374 Result = EmitFromInt(CGF, Result, T, ValueType);
375 return RValue::get(Result);
376}
377
378/// Utility to insert an atomic cmpxchg instruction.
379///
380/// @param CGF The current codegen function.
381/// @param E Builtin call expression to convert to cmpxchg.
382/// arg0 - address to operate on
383/// arg1 - value to compare with
384/// arg2 - new value
385/// @param ReturnBool Specifies whether to return success flag of
386/// cmpxchg result or the old value.
387///
388/// @returns result of cmpxchg, according to ReturnBool
389///
390/// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
391/// invoke the function EmitAtomicCmpXchgForMSIntrin.
393 bool ReturnBool,
394 llvm::AtomicOrdering SuccessOrdering,
395 llvm::AtomicOrdering FailureOrdering) {
396 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
397 Address DestAddr = CheckAtomicAlignment(CGF, E);
398
399 llvm::IntegerType *IntType = llvm::IntegerType::get(
400 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
401
402 Value *Cmp = CGF.EmitScalarExpr(E->getArg(1));
403 llvm::Type *ValueType = Cmp->getType();
404 Cmp = EmitToInt(CGF, Cmp, T, IntType);
405 Value *New = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
406
408 DestAddr, Cmp, New, SuccessOrdering, FailureOrdering);
409 if (ReturnBool)
410 // Extract boolean success flag and zext it to int.
411 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
412 CGF.ConvertType(E->getType()));
413 else
414 // Extract old value and emit it using the same type as compare value.
415 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
416 ValueType);
417}
418
419/// This function should be invoked to emit atomic cmpxchg for Microsoft's
420/// _InterlockedCompareExchange* intrinsics which have the following signature:
421/// T _InterlockedCompareExchange(T volatile *Destination,
422/// T Exchange,
423/// T Comparand);
424///
425/// Whereas the llvm 'cmpxchg' instruction has the following syntax:
426/// cmpxchg *Destination, Comparand, Exchange.
427/// So we need to swap Comparand and Exchange when invoking
428/// CreateAtomicCmpXchg. That is the reason we could not use the above utility
429/// function MakeAtomicCmpXchgValue since it expects the arguments to be
430/// already swapped.
431
432static
434 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
435 assert(E->getArg(0)->getType()->isPointerType());
437 E->getType(), E->getArg(0)->getType()->getPointeeType()));
438 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
439 E->getArg(1)->getType()));
440 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
441 E->getArg(2)->getType()));
442
443 Address DestAddr = CheckAtomicAlignment(CGF, E);
444
445 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
446 auto *RTy = Exchange->getType();
447
448 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
449
450 if (RTy->isPointerTy()) {
451 Exchange = CGF.Builder.CreatePtrToInt(Exchange, CGF.IntPtrTy);
452 Comparand = CGF.Builder.CreatePtrToInt(Comparand, CGF.IntPtrTy);
453 }
454
455 // For Release ordering, the failure ordering should be Monotonic.
456 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
457 AtomicOrdering::Monotonic :
458 SuccessOrdering;
459
460 // The atomic instruction is marked volatile for consistency with MSVC. This
461 // blocks the few atomics optimizations that LLVM has. If we want to optimize
462 // _Interlocked* operations in the future, we will have to remove the volatile
463 // marker.
464 auto *CmpXchg = CGF.Builder.CreateAtomicCmpXchg(
465 DestAddr, Comparand, Exchange, SuccessOrdering, FailureOrdering);
466 CmpXchg->setVolatile(true);
467
468 auto *Result = CGF.Builder.CreateExtractValue(CmpXchg, 0);
469 if (RTy->isPointerTy()) {
470 Result = CGF.Builder.CreateIntToPtr(Result, RTy);
471 }
472
473 return Result;
474}
475
476// 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
477// prototyped like this:
478//
479// unsigned char _InterlockedCompareExchange128...(
480// __int64 volatile * _Destination,
481// __int64 _ExchangeHigh,
482// __int64 _ExchangeLow,
483// __int64 * _ComparandResult);
484//
485// Note that Destination is assumed to be at least 16-byte aligned, despite
486// being typed int64.
487
489 const CallExpr *E,
490 AtomicOrdering SuccessOrdering) {
491 assert(E->getNumArgs() == 4);
492 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
493 llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1));
494 llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2));
495 Address ComparandAddr = CGF.EmitPointerWithAlignment(E->getArg(3));
496
497 assert(DestPtr->getType()->isPointerTy());
498 assert(!ExchangeHigh->getType()->isPointerTy());
499 assert(!ExchangeLow->getType()->isPointerTy());
500
501 // For Release ordering, the failure ordering should be Monotonic.
502 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
503 ? AtomicOrdering::Monotonic
504 : SuccessOrdering;
505
506 // Convert to i128 pointers and values. Alignment is also overridden for
507 // destination pointer.
508 llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128);
509 Address DestAddr(DestPtr, Int128Ty,
511 ComparandAddr = ComparandAddr.withElementType(Int128Ty);
512
513 // (((i128)hi) << 64) | ((i128)lo)
514 ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
515 ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty);
516 ExchangeHigh =
517 CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64));
518 llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow);
519
520 // Load the comparand for the instruction.
521 llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandAddr);
522
523 auto *CXI = CGF.Builder.CreateAtomicCmpXchg(DestAddr, Comparand, Exchange,
524 SuccessOrdering, FailureOrdering);
525
526 // The atomic instruction is marked volatile for consistency with MSVC. This
527 // blocks the few atomics optimizations that LLVM has. If we want to optimize
528 // _Interlocked* operations in the future, we will have to remove the volatile
529 // marker.
530 CXI->setVolatile(true);
531
532 // Store the result as an outparameter.
533 CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0),
534 ComparandAddr);
535
536 // Get the success boolean and zero extend it to i8.
537 Value *Success = CGF.Builder.CreateExtractValue(CXI, 1);
538 return CGF.Builder.CreateZExt(Success, CGF.Int8Ty);
539}
540
542 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
543 assert(E->getArg(0)->getType()->isPointerType());
544
545 auto *IntTy = CGF.ConvertType(E->getType());
546 Address DestAddr = CheckAtomicAlignment(CGF, E);
547 auto *Result = CGF.Builder.CreateAtomicRMW(
548 AtomicRMWInst::Add, DestAddr, ConstantInt::get(IntTy, 1), Ordering);
549 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
550}
551
553 CodeGenFunction &CGF, const CallExpr *E,
554 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
555 assert(E->getArg(0)->getType()->isPointerType());
556
557 auto *IntTy = CGF.ConvertType(E->getType());
558 Address DestAddr = CheckAtomicAlignment(CGF, E);
559 auto *Result = CGF.Builder.CreateAtomicRMW(
560 AtomicRMWInst::Sub, DestAddr, ConstantInt::get(IntTy, 1), Ordering);
561 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
562}
563
564// Build a plain volatile load.
566 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
567 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
568 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
569 llvm::Type *ITy =
570 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
571 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize);
572 Load->setAtomic(llvm::AtomicOrdering::Monotonic);
573 Load->setVolatile(true);
574 return Load;
575}
576
577// Build a plain volatile store.
579 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
580 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
581 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
582 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
583 llvm::StoreInst *Store =
584 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
585 Store->setAtomic(llvm::AtomicOrdering::Monotonic);
586 Store->setVolatile(true);
587 return Store;
588}
589
590// Emit a simple mangled intrinsic that has 1 argument and a return type
591// matching the argument type. Depending on mode, this may be a constrained
592// floating-point intrinsic.
594 const CallExpr *E, unsigned IntrinsicID,
595 unsigned ConstrainedIntrinsicID) {
596 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
597
598 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
599 if (CGF.Builder.getIsFPConstrained()) {
600 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
601 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
602 } else {
603 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
604 return CGF.Builder.CreateCall(F, Src0);
605 }
606}
607
608// Emit an intrinsic that has 2 operands of the same type as its result.
609// Depending on mode, this may be a constrained floating-point intrinsic.
611 const CallExpr *E, unsigned IntrinsicID,
612 unsigned ConstrainedIntrinsicID) {
613 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
614 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
615
616 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
617 if (CGF.Builder.getIsFPConstrained()) {
618 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
619 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
620 } else {
621 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
622 return CGF.Builder.CreateCall(F, { Src0, Src1 });
623 }
624}
625
626// Has second type mangled argument.
627static Value *
629 Intrinsic::ID IntrinsicID,
630 Intrinsic::ID ConstrainedIntrinsicID) {
631 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
632 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
633
634 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
635 if (CGF.Builder.getIsFPConstrained()) {
636 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
637 {Src0->getType(), Src1->getType()});
638 return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1});
639 }
640
641 Function *F =
642 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), Src1->getType()});
643 return CGF.Builder.CreateCall(F, {Src0, Src1});
644}
645
646// Emit an intrinsic that has 3 operands of the same type as its result.
647// Depending on mode, this may be a constrained floating-point intrinsic.
649 const CallExpr *E, unsigned IntrinsicID,
650 unsigned ConstrainedIntrinsicID) {
651 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
652 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
653 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
654
655 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
656 if (CGF.Builder.getIsFPConstrained()) {
657 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
658 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
659 } else {
660 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
661 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
662 }
663}
664
665// Emit an intrinsic that has overloaded integer result and fp operand.
666static Value *
668 unsigned IntrinsicID,
669 unsigned ConstrainedIntrinsicID) {
670 llvm::Type *ResultType = CGF.ConvertType(E->getType());
671 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
672
673 if (CGF.Builder.getIsFPConstrained()) {
674 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
675 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
676 {ResultType, Src0->getType()});
677 return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
678 } else {
679 Function *F =
680 CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
681 return CGF.Builder.CreateCall(F, Src0);
682 }
683}
684
686 Intrinsic::ID IntrinsicID) {
687 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
688 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
689
690 QualType IntPtrTy = E->getArg(1)->getType()->getPointeeType();
691 llvm::Type *IntTy = CGF.ConvertType(IntPtrTy);
692 llvm::Function *F =
693 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), IntTy});
694 llvm::Value *Call = CGF.Builder.CreateCall(F, Src0);
695
696 llvm::Value *Exp = CGF.Builder.CreateExtractValue(Call, 1);
697 LValue LV = CGF.MakeNaturalAlignAddrLValue(Src1, IntPtrTy);
698 CGF.EmitStoreOfScalar(Exp, LV);
699
700 return CGF.Builder.CreateExtractValue(Call, 0);
701}
702
703static void emitSincosBuiltin(CodeGenFunction &CGF, const CallExpr *E,
704 Intrinsic::ID IntrinsicID) {
705 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(0));
706 llvm::Value *Dest0 = CGF.EmitScalarExpr(E->getArg(1));
707 llvm::Value *Dest1 = CGF.EmitScalarExpr(E->getArg(2));
708
709 llvm::Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {Val->getType()});
710 llvm::Value *Call = CGF.Builder.CreateCall(F, Val);
711
712 llvm::Value *SinResult = CGF.Builder.CreateExtractValue(Call, 0);
713 llvm::Value *CosResult = CGF.Builder.CreateExtractValue(Call, 1);
714
715 QualType DestPtrType = E->getArg(1)->getType()->getPointeeType();
716 LValue SinLV = CGF.MakeNaturalAlignAddrLValue(Dest0, DestPtrType);
717 LValue CosLV = CGF.MakeNaturalAlignAddrLValue(Dest1, DestPtrType);
718
719 llvm::StoreInst *StoreSin =
720 CGF.Builder.CreateStore(SinResult, SinLV.getAddress());
721 llvm::StoreInst *StoreCos =
722 CGF.Builder.CreateStore(CosResult, CosLV.getAddress());
723
724 // Mark the two stores as non-aliasing with each other. The order of stores
725 // emitted by this builtin is arbitrary, enforcing a particular order will
726 // prevent optimizations later on.
727 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
728 MDNode *Domain = MDHelper.createAnonymousAliasScopeDomain();
729 MDNode *AliasScope = MDHelper.createAnonymousAliasScope(Domain);
730 MDNode *AliasScopeList = MDNode::get(Call->getContext(), AliasScope);
731 StoreSin->setMetadata(LLVMContext::MD_alias_scope, AliasScopeList);
732 StoreCos->setMetadata(LLVMContext::MD_noalias, AliasScopeList);
733}
734
735static llvm::Value *emitModfBuiltin(CodeGenFunction &CGF, const CallExpr *E,
736 Intrinsic::ID IntrinsicID) {
737 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(0));
738 llvm::Value *IntPartDest = CGF.EmitScalarExpr(E->getArg(1));
739
740 llvm::Value *Call =
741 CGF.Builder.CreateIntrinsic(IntrinsicID, {Val->getType()}, Val);
742
743 llvm::Value *FractionalResult = CGF.Builder.CreateExtractValue(Call, 0);
744 llvm::Value *IntegralResult = CGF.Builder.CreateExtractValue(Call, 1);
745
746 QualType DestPtrType = E->getArg(1)->getType()->getPointeeType();
747 LValue IntegralLV = CGF.MakeNaturalAlignAddrLValue(IntPartDest, DestPtrType);
748 CGF.EmitStoreOfScalar(IntegralResult, IntegralLV);
749
750 return FractionalResult;
751}
752
753/// EmitFAbs - Emit a call to @llvm.fabs().
755 Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
756 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
757 Call->setDoesNotAccessMemory();
758 return Call;
759}
760
761/// Emit the computation of the sign bit for a floating point value. Returns
762/// the i1 sign bit value.
764 LLVMContext &C = CGF.CGM.getLLVMContext();
765
766 llvm::Type *Ty = V->getType();
767 int Width = Ty->getPrimitiveSizeInBits();
768 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
769 V = CGF.Builder.CreateBitCast(V, IntTy);
770 if (Ty->isPPC_FP128Ty()) {
771 // We want the sign bit of the higher-order double. The bitcast we just
772 // did works as if the double-double was stored to memory and then
773 // read as an i128. The "store" will put the higher-order double in the
774 // lower address in both little- and big-Endian modes, but the "load"
775 // will treat those bits as a different part of the i128: the low bits in
776 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
777 // we need to shift the high bits down to the low before truncating.
778 Width >>= 1;
779 if (CGF.getTarget().isBigEndian()) {
780 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
781 V = CGF.Builder.CreateLShr(V, ShiftCst);
782 }
783 // We are truncating value in order to extract the higher-order
784 // double, which we will be using to extract the sign from.
785 IntTy = llvm::IntegerType::get(C, Width);
786 V = CGF.Builder.CreateTrunc(V, IntTy);
787 }
788 Value *Zero = llvm::Constant::getNullValue(IntTy);
789 return CGF.Builder.CreateICmpSLT(V, Zero);
790}
791
792/// Checks no arguments or results are passed indirectly in the ABI (i.e. via a
793/// hidden pointer). This is used to check annotating FP libcalls (that could
794/// set `errno`) with "int" TBAA metadata is safe. If any floating-point
795/// arguments are passed indirectly, setup for the call could be incorrectly
796/// optimized out.
798 auto IsIndirect = [&](ABIArgInfo const &info) {
799 return info.isIndirect() || info.isIndirectAliased() || info.isInAlloca();
800 };
801 return !IsIndirect(FnInfo.getReturnInfo()) &&
802 llvm::none_of(FnInfo.arguments(),
803 [&](CGFunctionInfoArgInfo const &ArgInfo) {
804 return IsIndirect(ArgInfo.info);
805 });
806}
807
809 const CallExpr *E, llvm::Constant *calleeValue) {
810 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
811 CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
812 llvm::CallBase *callOrInvoke = nullptr;
813 CGFunctionInfo const *FnInfo = nullptr;
814 RValue Call =
815 CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot(),
816 /*Chain=*/nullptr, &callOrInvoke, &FnInfo);
817
818 if (unsigned BuiltinID = FD->getBuiltinID()) {
819 // Check whether a FP math builtin function, such as BI__builtin_expf
820 ASTContext &Context = CGF.getContext();
821 bool ConstWithoutErrnoAndExceptions =
823 // Restrict to target with errno, for example, MacOS doesn't set errno.
824 // TODO: Support builtin function with complex type returned, eg: cacosh
825 if (ConstWithoutErrnoAndExceptions && CGF.CGM.getLangOpts().MathErrno &&
826 !CGF.Builder.getIsFPConstrained() && Call.isScalar() &&
828 // Emit "int" TBAA metadata on FP math libcalls.
829 clang::QualType IntTy = Context.IntTy;
830 TBAAAccessInfo TBAAInfo = CGF.CGM.getTBAAAccessInfo(IntTy);
831 CGF.CGM.DecorateInstructionWithTBAA(callOrInvoke, TBAAInfo);
832 }
833 }
834 return Call;
835}
836
837/// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
838/// depending on IntrinsicID.
839///
840/// \arg CGF The current codegen function.
841/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
842/// \arg X The first argument to the llvm.*.with.overflow.*.
843/// \arg Y The second argument to the llvm.*.with.overflow.*.
844/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
845/// \returns The result (i.e. sum/product) returned by the intrinsic.
847 const Intrinsic::ID IntrinsicID,
848 llvm::Value *X, llvm::Value *Y,
849 llvm::Value *&Carry) {
850 // Make sure we have integers of the same width.
851 assert(X->getType() == Y->getType() &&
852 "Arguments must be the same type. (Did you forget to make sure both "
853 "arguments have the same integer width?)");
854
855 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
856 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
857 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
858 return CGF.Builder.CreateExtractValue(Tmp, 0);
859}
860
861namespace {
862 struct WidthAndSignedness {
863 unsigned Width;
864 bool Signed;
865 };
866}
867
868static WidthAndSignedness
870 const clang::QualType Type) {
871 assert(Type->isIntegerType() && "Given type is not an integer.");
872 unsigned Width = context.getIntWidth(Type);
874 return {Width, Signed};
875}
876
877// Given one or more integer types, this function produces an integer type that
878// encompasses them: any value in one of the given types could be expressed in
879// the encompassing type.
880static struct WidthAndSignedness
881EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
882 assert(Types.size() > 0 && "Empty list of types.");
883
884 // If any of the given types is signed, we must return a signed type.
885 bool Signed = false;
886 for (const auto &Type : Types) {
887 Signed |= Type.Signed;
888 }
889
890 // The encompassing type must have a width greater than or equal to the width
891 // of the specified types. Additionally, if the encompassing type is signed,
892 // its width must be strictly greater than the width of any unsigned types
893 // given.
894 unsigned Width = 0;
895 for (const auto &Type : Types) {
896 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
897 if (Width < MinWidth) {
898 Width = MinWidth;
899 }
900 }
901
902 return {Width, Signed};
903}
904
905Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
906 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
907 return Builder.CreateCall(CGM.getIntrinsic(inst, {ArgValue->getType()}),
908 ArgValue);
909}
910
911/// Checks if using the result of __builtin_object_size(p, @p From) in place of
912/// __builtin_object_size(p, @p To) is correct
913static bool areBOSTypesCompatible(int From, int To) {
914 // Note: Our __builtin_object_size implementation currently treats Type=0 and
915 // Type=2 identically. Encoding this implementation detail here may make
916 // improving __builtin_object_size difficult in the future, so it's omitted.
917 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
918}
919
920static llvm::Value *
921getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
922 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
923}
924
925llvm::Value *
926CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
927 llvm::IntegerType *ResType,
928 llvm::Value *EmittedE,
929 bool IsDynamic) {
930 if (std::optional<uint64_t> ObjectSize =
932 return ConstantInt::get(ResType, *ObjectSize, /*isSigned=*/true);
933 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
934}
935
936namespace {
937
938/// StructFieldAccess is a simple visitor class to grab the first MemberExpr
939/// from an Expr. It records any ArraySubscriptExpr we meet along the way.
940class StructFieldAccess
941 : public ConstStmtVisitor<StructFieldAccess, const Expr *> {
942 bool AddrOfSeen = false;
943
944public:
945 const Expr *ArrayIndex = nullptr;
946 QualType ArrayElementTy;
947
948 const Expr *VisitMemberExpr(const MemberExpr *E) {
949 if (AddrOfSeen && E->getType()->isArrayType())
950 // Avoid forms like '&ptr->array'.
951 return nullptr;
952 return E;
953 }
954
955 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
956 if (ArrayIndex)
957 // We don't support multiple subscripts.
958 return nullptr;
959
960 AddrOfSeen = false; // '&ptr->array[idx]' is okay.
961 ArrayIndex = E->getIdx();
962 ArrayElementTy = E->getBase()->getType();
963 return Visit(E->getBase());
964 }
965 const Expr *VisitCastExpr(const CastExpr *E) {
966 if (E->getCastKind() == CK_LValueToRValue)
967 return E;
968 return Visit(E->getSubExpr());
969 }
970 const Expr *VisitParenExpr(const ParenExpr *E) {
971 return Visit(E->getSubExpr());
972 }
973 const Expr *VisitUnaryAddrOf(const clang::UnaryOperator *E) {
974 AddrOfSeen = true;
975 return Visit(E->getSubExpr());
976 }
977 const Expr *VisitUnaryDeref(const clang::UnaryOperator *E) {
978 AddrOfSeen = false;
979 return Visit(E->getSubExpr());
980 }
981 const Expr *VisitBinaryOperator(const clang::BinaryOperator *Op) {
982 return Op->isCommaOp() ? Visit(Op->getRHS()) : nullptr;
983 }
984};
985
986} // end anonymous namespace
987
988/// Find a struct's flexible array member. It may be embedded inside multiple
989/// sub-structs, but must still be the last field.
991 ASTContext &Ctx,
992 const RecordDecl *RD) {
993 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
994 CGF.getLangOpts().getStrictFlexArraysLevel();
995
996 if (RD->isImplicit())
997 return nullptr;
998
999 for (const FieldDecl *FD : RD->fields()) {
1001 Ctx, FD, FD->getType(), StrictFlexArraysLevel,
1002 /*IgnoreTemplateOrMacroSubstitution=*/true))
1003 return FD;
1004
1005 if (const auto *RD = FD->getType()->getAsRecordDecl())
1006 if (const FieldDecl *FD = FindFlexibleArrayMemberField(CGF, Ctx, RD))
1007 return FD;
1008 }
1009
1010 return nullptr;
1011}
1012
1013/// Calculate the offset of a struct field. It may be embedded inside multiple
1014/// sub-structs.
1015static bool GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD,
1016 const FieldDecl *FD, int64_t &Offset) {
1017 if (RD->isImplicit())
1018 return false;
1019
1020 // Keep track of the field number ourselves, because the other methods
1021 // (CGRecordLayout::getLLVMFieldNo) aren't always equivalent to how the AST
1022 // is laid out.
1023 uint32_t FieldNo = 0;
1024 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
1025
1026 for (const FieldDecl *Field : RD->fields()) {
1027 if (Field == FD) {
1028 Offset += Layout.getFieldOffset(FieldNo);
1029 return true;
1030 }
1031
1032 if (const auto *RD = Field->getType()->getAsRecordDecl()) {
1033 if (GetFieldOffset(Ctx, RD, FD, Offset)) {
1034 Offset += Layout.getFieldOffset(FieldNo);
1035 return true;
1036 }
1037 }
1038
1039 if (!RD->isUnion())
1040 ++FieldNo;
1041 }
1042
1043 return false;
1044}
1045
1046static std::optional<int64_t>
1047GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FD) {
1048 int64_t Offset = 0;
1049
1050 if (GetFieldOffset(Ctx, RD, FD, Offset))
1051 return std::optional<int64_t>(Offset);
1052
1053 return std::nullopt;
1054}
1055
1056llvm::Value *CodeGenFunction::emitCountedBySize(const Expr *E,
1057 llvm::Value *EmittedE,
1058 unsigned Type,
1059 llvm::IntegerType *ResType) {
1060 // Note: If the whole struct is specificed in the __bdos (i.e. Visitor
1061 // returns a DeclRefExpr). The calculation of the whole size of the structure
1062 // with a flexible array member can be done in two ways:
1063 //
1064 // 1) sizeof(struct S) + count * sizeof(typeof(fam))
1065 // 2) offsetof(struct S, fam) + count * sizeof(typeof(fam))
1066 //
1067 // The first will add additional padding after the end of the array
1068 // allocation while the second method is more precise, but not quite expected
1069 // from programmers. See
1070 // https://lore.kernel.org/lkml/ZvV6X5FPBBW7CO1f@archlinux/ for a discussion
1071 // of the topic.
1072 //
1073 // GCC isn't (currently) able to calculate __bdos on a pointer to the whole
1074 // structure. Therefore, because of the above issue, we choose to match what
1075 // GCC does for consistency's sake.
1076
1077 StructFieldAccess Visitor;
1078 E = Visitor.Visit(E);
1079 if (!E)
1080 return nullptr;
1081
1082 const Expr *Idx = Visitor.ArrayIndex;
1083 if (Idx) {
1084 if (Idx->HasSideEffects(getContext()))
1085 // We can't have side-effects.
1086 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1087
1088 if (const auto *IL = dyn_cast<IntegerLiteral>(Idx)) {
1089 int64_t Val = IL->getValue().getSExtValue();
1090 if (Val < 0)
1091 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1092
1093 // The index is 0, so we don't need to take it into account.
1094 if (Val == 0)
1095 Idx = nullptr;
1096 }
1097 }
1098
1099 // __counted_by on either a flexible array member or a pointer into a struct
1100 // with a flexible array member.
1101 if (const auto *ME = dyn_cast<MemberExpr>(E))
1102 return emitCountedByMemberSize(ME, Idx, EmittedE, Visitor.ArrayElementTy,
1103 Type, ResType);
1104
1105 // __counted_by on a pointer in a struct.
1106 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E);
1107 ICE && ICE->getCastKind() == CK_LValueToRValue)
1108 return emitCountedByPointerSize(ICE, Idx, EmittedE, Visitor.ArrayElementTy,
1109 Type, ResType);
1110
1111 return nullptr;
1112}
1113
1115 llvm::Value *Res,
1116 llvm::Value *Index,
1117 llvm::IntegerType *ResType,
1118 bool IsSigned) {
1119 // cmp = (array_size >= 0)
1120 Value *Cmp = CGF.Builder.CreateIsNotNeg(Res);
1121 if (Index)
1122 // cmp = (cmp && index >= 0)
1123 Cmp = CGF.Builder.CreateAnd(CGF.Builder.CreateIsNotNeg(Index), Cmp);
1124
1125 // return cmp ? result : 0
1126 return CGF.Builder.CreateSelect(Cmp, Res,
1127 ConstantInt::get(ResType, 0, IsSigned));
1128}
1129
1130static std::pair<llvm::Value *, llvm::Value *>
1132 const FieldDecl *ArrayFD, const FieldDecl *CountFD,
1133 const Expr *Idx, llvm::IntegerType *ResType,
1134 bool IsSigned) {
1135 // count = ptr->count;
1136 Value *Count = CGF.EmitLoadOfCountedByField(ME, ArrayFD, CountFD);
1137 if (!Count)
1138 return std::make_pair<Value *>(nullptr, nullptr);
1139 Count = CGF.Builder.CreateIntCast(Count, ResType, IsSigned, "count");
1140
1141 // index = ptr->index;
1142 Value *Index = nullptr;
1143 if (Idx) {
1144 bool IdxSigned = Idx->getType()->isSignedIntegerType();
1145 Index = CGF.EmitScalarExpr(Idx);
1146 Index = CGF.Builder.CreateIntCast(Index, ResType, IdxSigned, "index");
1147 }
1148
1149 return std::make_pair(Count, Index);
1150}
1151
1152llvm::Value *CodeGenFunction::emitCountedByPointerSize(
1153 const ImplicitCastExpr *E, const Expr *Idx, llvm::Value *EmittedE,
1154 QualType CastedArrayElementTy, unsigned Type, llvm::IntegerType *ResType) {
1155 assert(E->getCastKind() == CK_LValueToRValue &&
1156 "must be an LValue to RValue cast");
1157
1158 const MemberExpr *ME =
1159 dyn_cast<MemberExpr>(E->getSubExpr()->IgnoreParenNoopCasts(getContext()));
1160 if (!ME)
1161 return nullptr;
1162
1163 const auto *ArrayBaseFD = dyn_cast<FieldDecl>(ME->getMemberDecl());
1164 if (!ArrayBaseFD || !ArrayBaseFD->getType()->isPointerType() ||
1165 !ArrayBaseFD->getType()->isCountAttributedType())
1166 return nullptr;
1167
1168 // Get the 'count' FieldDecl.
1169 const FieldDecl *CountFD = ArrayBaseFD->findCountedByField();
1170 if (!CountFD)
1171 // Can't find the field referenced by the "counted_by" attribute.
1172 return nullptr;
1173
1174 // Calculate the array's object size using these formulae. (Note: if the
1175 // calculation is negative, we return 0.):
1176 //
1177 // struct p;
1178 // struct s {
1179 // /* ... */
1180 // struct p **array __attribute__((counted_by(count)));
1181 // int count;
1182 // };
1183 //
1184 // 1) 'ptr->array':
1185 //
1186 // count = ptr->count;
1187 //
1188 // array_element_size = sizeof (*ptr->array);
1189 // array_size = count * array_element_size;
1190 //
1191 // result = array_size;
1192 //
1193 // cmp = (result >= 0)
1194 // return cmp ? result : 0;
1195 //
1196 // 2) '&((cast) ptr->array)[idx]':
1197 //
1198 // count = ptr->count;
1199 // index = idx;
1200 //
1201 // array_element_size = sizeof (*ptr->array);
1202 // array_size = count * array_element_size;
1203 //
1204 // casted_array_element_size = sizeof (*((cast) ptr->array));
1205 //
1206 // index_size = index * casted_array_element_size;
1207 // result = array_size - index_size;
1208 //
1209 // cmp = (result >= 0)
1210 // if (index)
1211 // cmp = (cmp && index > 0)
1212 // return cmp ? result : 0;
1213
1214 auto GetElementBaseSize = [&](QualType ElementTy) {
1215 CharUnits ElementSize =
1216 getContext().getTypeSizeInChars(ElementTy->getPointeeType());
1217
1218 if (ElementSize.isZero()) {
1219 // This might be a __sized_by (or __counted_by) on a
1220 // 'void *', which counts bytes, not elements.
1221 [[maybe_unused]] auto *CAT = ElementTy->getAs<CountAttributedType>();
1222 assert(CAT && "must have an CountAttributedType");
1223
1224 ElementSize = CharUnits::One();
1225 }
1226
1227 return std::optional<CharUnits>(ElementSize);
1228 };
1229
1230 // Get the sizes of the original array element and the casted array element,
1231 // if different.
1232 std::optional<CharUnits> ArrayElementBaseSize =
1233 GetElementBaseSize(ArrayBaseFD->getType());
1234 if (!ArrayElementBaseSize)
1235 return nullptr;
1236
1237 std::optional<CharUnits> CastedArrayElementBaseSize = ArrayElementBaseSize;
1238 if (!CastedArrayElementTy.isNull() && CastedArrayElementTy->isPointerType()) {
1239 CastedArrayElementBaseSize = GetElementBaseSize(CastedArrayElementTy);
1240 if (!CastedArrayElementBaseSize)
1241 return nullptr;
1242 }
1243
1244 bool IsSigned = CountFD->getType()->isSignedIntegerType();
1245
1246 // count = ptr->count;
1247 // index = ptr->index;
1248 Value *Count, *Index;
1249 std::tie(Count, Index) = GetCountFieldAndIndex(
1250 *this, ME, ArrayBaseFD, CountFD, Idx, ResType, IsSigned);
1251 if (!Count)
1252 return nullptr;
1253
1254 // array_element_size = sizeof (*ptr->array)
1255 auto *ArrayElementSize = llvm::ConstantInt::get(
1256 ResType, ArrayElementBaseSize->getQuantity(), IsSigned);
1257
1258 // casted_array_element_size = sizeof (*((cast) ptr->array));
1259 auto *CastedArrayElementSize = llvm::ConstantInt::get(
1260 ResType, CastedArrayElementBaseSize->getQuantity(), IsSigned);
1261
1262 // array_size = count * array_element_size;
1263 Value *ArraySize = Builder.CreateMul(Count, ArrayElementSize, "array_size",
1264 !IsSigned, IsSigned);
1265
1266 // Option (1) 'ptr->array'
1267 // result = array_size
1268 Value *Result = ArraySize;
1269
1270 if (Idx) { // Option (2) '&((cast) ptr->array)[idx]'
1271 // index_size = index * casted_array_element_size;
1272 Value *IndexSize = Builder.CreateMul(Index, CastedArrayElementSize,
1273 "index_size", !IsSigned, IsSigned);
1274
1275 // result = result - index_size;
1276 Result =
1277 Builder.CreateSub(Result, IndexSize, "result", !IsSigned, IsSigned);
1278 }
1279
1280 return EmitPositiveResultOrZero(*this, Result, Index, ResType, IsSigned);
1281}
1282
1283llvm::Value *CodeGenFunction::emitCountedByMemberSize(
1284 const MemberExpr *ME, const Expr *Idx, llvm::Value *EmittedE,
1285 QualType CastedArrayElementTy, unsigned Type, llvm::IntegerType *ResType) {
1286 const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
1287 if (!FD)
1288 return nullptr;
1289
1290 // Find the flexible array member and check that it has the __counted_by
1291 // attribute.
1292 ASTContext &Ctx = getContext();
1293 const RecordDecl *RD = FD->getDeclContext()->getOuterLexicalRecordContext();
1294 const FieldDecl *FlexibleArrayMemberFD = nullptr;
1295
1297 Ctx, FD, FD->getType(), getLangOpts().getStrictFlexArraysLevel(),
1298 /*IgnoreTemplateOrMacroSubstitution=*/true))
1299 FlexibleArrayMemberFD = FD;
1300 else
1301 FlexibleArrayMemberFD = FindFlexibleArrayMemberField(*this, Ctx, RD);
1302
1303 if (!FlexibleArrayMemberFD ||
1304 !FlexibleArrayMemberFD->getType()->isCountAttributedType())
1305 return nullptr;
1306
1307 // Get the 'count' FieldDecl.
1308 const FieldDecl *CountFD = FlexibleArrayMemberFD->findCountedByField();
1309 if (!CountFD)
1310 // Can't find the field referenced by the "counted_by" attribute.
1311 return nullptr;
1312
1313 // Calculate the flexible array member's object size using these formulae.
1314 // (Note: if the calculation is negative, we return 0.):
1315 //
1316 // struct p;
1317 // struct s {
1318 // /* ... */
1319 // int count;
1320 // struct p *array[] __attribute__((counted_by(count)));
1321 // };
1322 //
1323 // 1) 'ptr->array':
1324 //
1325 // count = ptr->count;
1326 //
1327 // flexible_array_member_element_size = sizeof (*ptr->array);
1328 // flexible_array_member_size =
1329 // count * flexible_array_member_element_size;
1330 //
1331 // result = flexible_array_member_size;
1332 //
1333 // cmp = (result >= 0)
1334 // return cmp ? result : 0;
1335 //
1336 // 2) '&((cast) ptr->array)[idx]':
1337 //
1338 // count = ptr->count;
1339 // index = idx;
1340 //
1341 // flexible_array_member_element_size = sizeof (*ptr->array);
1342 // flexible_array_member_size =
1343 // count * flexible_array_member_element_size;
1344 //
1345 // casted_flexible_array_member_element_size =
1346 // sizeof (*((cast) ptr->array));
1347 // index_size = index * casted_flexible_array_member_element_size;
1348 //
1349 // result = flexible_array_member_size - index_size;
1350 //
1351 // cmp = (result >= 0)
1352 // if (index != 0)
1353 // cmp = (cmp && index >= 0)
1354 // return cmp ? result : 0;
1355 //
1356 // 3) '&ptr->field':
1357 //
1358 // count = ptr->count;
1359 // sizeof_struct = sizeof (struct s);
1360 //
1361 // flexible_array_member_element_size = sizeof (*ptr->array);
1362 // flexible_array_member_size =
1363 // count * flexible_array_member_element_size;
1364 //
1365 // field_offset = offsetof (struct s, field);
1366 // offset_diff = sizeof_struct - field_offset;
1367 //
1368 // result = offset_diff + flexible_array_member_size;
1369 //
1370 // cmp = (result >= 0)
1371 // return cmp ? result : 0;
1372 //
1373 // 4) '&((cast) ptr->field_array)[idx]':
1374 //
1375 // count = ptr->count;
1376 // index = idx;
1377 // sizeof_struct = sizeof (struct s);
1378 //
1379 // flexible_array_member_element_size = sizeof (*ptr->array);
1380 // flexible_array_member_size =
1381 // count * flexible_array_member_element_size;
1382 //
1383 // casted_field_element_size = sizeof (*((cast) ptr->field_array));
1384 // field_offset = offsetof (struct s, field)
1385 // field_offset += index * casted_field_element_size;
1386 //
1387 // offset_diff = sizeof_struct - field_offset;
1388 //
1389 // result = offset_diff + flexible_array_member_size;
1390 //
1391 // cmp = (result >= 0)
1392 // if (index != 0)
1393 // cmp = (cmp && index >= 0)
1394 // return cmp ? result : 0;
1395
1396 bool IsSigned = CountFD->getType()->isSignedIntegerType();
1397
1398 QualType FlexibleArrayMemberTy = FlexibleArrayMemberFD->getType();
1399
1400 // Explicit cast because otherwise the CharWidth will promote an i32's into
1401 // u64's leading to overflows.
1402 int64_t CharWidth = static_cast<int64_t>(CGM.getContext().getCharWidth());
1403
1404 // field_offset = offsetof (struct s, field);
1405 Value *FieldOffset = nullptr;
1406 if (FlexibleArrayMemberFD != FD) {
1407 std::optional<int64_t> Offset = GetFieldOffset(Ctx, RD, FD);
1408 if (!Offset)
1409 return nullptr;
1410 FieldOffset =
1411 llvm::ConstantInt::get(ResType, *Offset / CharWidth, IsSigned);
1412 }
1413
1414 // count = ptr->count;
1415 // index = ptr->index;
1416 Value *Count, *Index;
1417 std::tie(Count, Index) = GetCountFieldAndIndex(
1418 *this, ME, FlexibleArrayMemberFD, CountFD, Idx, ResType, IsSigned);
1419 if (!Count)
1420 return nullptr;
1421
1422 // flexible_array_member_element_size = sizeof (*ptr->array);
1423 const ArrayType *ArrayTy = Ctx.getAsArrayType(FlexibleArrayMemberTy);
1424 CharUnits BaseSize = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
1425 auto *FlexibleArrayMemberElementSize =
1426 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1427
1428 // flexible_array_member_size = count * flexible_array_member_element_size;
1429 Value *FlexibleArrayMemberSize =
1430 Builder.CreateMul(Count, FlexibleArrayMemberElementSize,
1431 "flexible_array_member_size", !IsSigned, IsSigned);
1432
1433 Value *Result = nullptr;
1434 if (FlexibleArrayMemberFD == FD) {
1435 if (Idx) { // Option (2) '&((cast) ptr->array)[idx]'
1436 // casted_flexible_array_member_element_size =
1437 // sizeof (*((cast) ptr->array));
1438 llvm::ConstantInt *CastedFlexibleArrayMemberElementSize =
1439 FlexibleArrayMemberElementSize;
1440 if (!CastedArrayElementTy.isNull() &&
1441 CastedArrayElementTy->isPointerType()) {
1442 CharUnits BaseSize =
1443 Ctx.getTypeSizeInChars(CastedArrayElementTy->getPointeeType());
1444 CastedFlexibleArrayMemberElementSize =
1445 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1446 }
1447
1448 // index_size = index * casted_flexible_array_member_element_size;
1449 Value *IndexSize =
1450 Builder.CreateMul(Index, CastedFlexibleArrayMemberElementSize,
1451 "index_size", !IsSigned, IsSigned);
1452
1453 // result = flexible_array_member_size - index_size;
1454 Result = Builder.CreateSub(FlexibleArrayMemberSize, IndexSize, "result",
1455 !IsSigned, IsSigned);
1456 } else { // Option (1) 'ptr->array'
1457 // result = flexible_array_member_size;
1458 Result = FlexibleArrayMemberSize;
1459 }
1460 } else {
1461 // sizeof_struct = sizeof (struct s);
1462 llvm::StructType *StructTy = getTypes().getCGRecordLayout(RD).getLLVMType();
1463 const llvm::DataLayout &Layout = CGM.getDataLayout();
1464 TypeSize Size = Layout.getTypeSizeInBits(StructTy);
1465 Value *SizeofStruct =
1466 llvm::ConstantInt::get(ResType, Size.getKnownMinValue() / CharWidth);
1467
1468 if (Idx) { // Option (4) '&((cast) ptr->field_array)[idx]'
1469 // casted_field_element_size = sizeof (*((cast) ptr->field_array));
1470 CharUnits BaseSize;
1471 if (!CastedArrayElementTy.isNull() &&
1472 CastedArrayElementTy->isPointerType()) {
1473 BaseSize =
1474 Ctx.getTypeSizeInChars(CastedArrayElementTy->getPointeeType());
1475 } else {
1476 const ArrayType *ArrayTy = Ctx.getAsArrayType(FD->getType());
1477 BaseSize = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
1478 }
1479
1480 llvm::ConstantInt *CastedFieldElementSize =
1481 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1482
1483 // field_offset += index * casted_field_element_size;
1484 Value *Mul = Builder.CreateMul(Index, CastedFieldElementSize,
1485 "field_offset", !IsSigned, IsSigned);
1486 FieldOffset = Builder.CreateAdd(FieldOffset, Mul);
1487 }
1488 // Option (3) '&ptr->field', and Option (4) continuation.
1489 // offset_diff = flexible_array_member_offset - field_offset;
1490 Value *OffsetDiff = Builder.CreateSub(SizeofStruct, FieldOffset,
1491 "offset_diff", !IsSigned, IsSigned);
1492
1493 // result = offset_diff + flexible_array_member_size;
1494 Result = Builder.CreateAdd(FlexibleArrayMemberSize, OffsetDiff, "result");
1495 }
1496
1497 return EmitPositiveResultOrZero(*this, Result, Index, ResType, IsSigned);
1498}
1499
1500/// Returns a Value corresponding to the size of the given expression.
1501/// This Value may be either of the following:
1502/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
1503/// it)
1504/// - A call to the @llvm.objectsize intrinsic
1505///
1506/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
1507/// and we wouldn't otherwise try to reference a pass_object_size parameter,
1508/// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
1509llvm::Value *
1510CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
1511 llvm::IntegerType *ResType,
1512 llvm::Value *EmittedE, bool IsDynamic) {
1513 // We need to reference an argument if the pointer is a parameter with the
1514 // pass_object_size attribute.
1515 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
1516 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
1517 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
1518 if (Param != nullptr && PS != nullptr &&
1519 areBOSTypesCompatible(PS->getType(), Type)) {
1520 auto Iter = SizeArguments.find(Param);
1521 assert(Iter != SizeArguments.end());
1522
1523 const ImplicitParamDecl *D = Iter->second;
1524 auto DIter = LocalDeclMap.find(D);
1525 assert(DIter != LocalDeclMap.end());
1526
1527 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
1528 getContext().getSizeType(), E->getBeginLoc());
1529 }
1530 }
1531
1532 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
1533 // evaluate E for side-effects. In either case, we shouldn't lower to
1534 // @llvm.objectsize.
1535 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
1536 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1537
1538 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
1539 assert(Ptr->getType()->isPointerTy() &&
1540 "Non-pointer passed to __builtin_object_size?");
1541
1542 if (IsDynamic)
1543 // Emit special code for a flexible array member with the "counted_by"
1544 // attribute.
1545 if (Value *V = emitCountedBySize(E, Ptr, Type, ResType))
1546 return V;
1547
1548 Function *F =
1549 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
1550
1551 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
1552 Value *Min = Builder.getInt1((Type & 2) != 0);
1553 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
1554 Value *NullIsUnknown = Builder.getTrue();
1555 Value *Dynamic = Builder.getInt1(IsDynamic);
1556 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
1557}
1558
1559namespace {
1560/// A struct to generically describe a bit test intrinsic.
1561struct BitTest {
1562 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
1563 enum InterlockingKind : uint8_t {
1564 Unlocked,
1565 Sequential,
1566 Acquire,
1567 Release,
1568 NoFence
1569 };
1570
1571 ActionKind Action;
1572 InterlockingKind Interlocking;
1573 bool Is64Bit;
1574
1575 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
1576};
1577
1578} // namespace
1579
1580BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
1581 switch (BuiltinID) {
1582 // Main portable variants.
1583 case Builtin::BI_bittest:
1584 return {TestOnly, Unlocked, false};
1585 case Builtin::BI_bittestandcomplement:
1586 return {Complement, Unlocked, false};
1587 case Builtin::BI_bittestandreset:
1588 return {Reset, Unlocked, false};
1589 case Builtin::BI_bittestandset:
1590 return {Set, Unlocked, false};
1591 case Builtin::BI_interlockedbittestandreset:
1592 return {Reset, Sequential, false};
1593 case Builtin::BI_interlockedbittestandset:
1594 return {Set, Sequential, false};
1595
1596 // 64-bit variants.
1597 case Builtin::BI_bittest64:
1598 return {TestOnly, Unlocked, true};
1599 case Builtin::BI_bittestandcomplement64:
1600 return {Complement, Unlocked, true};
1601 case Builtin::BI_bittestandreset64:
1602 return {Reset, Unlocked, true};
1603 case Builtin::BI_bittestandset64:
1604 return {Set, Unlocked, true};
1605 case Builtin::BI_interlockedbittestandreset64:
1606 return {Reset, Sequential, true};
1607 case Builtin::BI_interlockedbittestandset64:
1608 return {Set, Sequential, true};
1609
1610 // ARM/AArch64-specific ordering variants.
1611 case Builtin::BI_interlockedbittestandset_acq:
1612 return {Set, Acquire, false};
1613 case Builtin::BI_interlockedbittestandset_rel:
1614 return {Set, Release, false};
1615 case Builtin::BI_interlockedbittestandset_nf:
1616 return {Set, NoFence, false};
1617 case Builtin::BI_interlockedbittestandreset_acq:
1618 return {Reset, Acquire, false};
1619 case Builtin::BI_interlockedbittestandreset_rel:
1620 return {Reset, Release, false};
1621 case Builtin::BI_interlockedbittestandreset_nf:
1622 return {Reset, NoFence, false};
1623 case Builtin::BI_interlockedbittestandreset64_acq:
1624 return {Reset, Acquire, false};
1625 case Builtin::BI_interlockedbittestandreset64_rel:
1626 return {Reset, Release, false};
1627 case Builtin::BI_interlockedbittestandreset64_nf:
1628 return {Reset, NoFence, false};
1629 case Builtin::BI_interlockedbittestandset64_acq:
1630 return {Set, Acquire, false};
1631 case Builtin::BI_interlockedbittestandset64_rel:
1632 return {Set, Release, false};
1633 case Builtin::BI_interlockedbittestandset64_nf:
1634 return {Set, NoFence, false};
1635 }
1636 llvm_unreachable("expected only bittest intrinsics");
1637}
1638
1639static char bitActionToX86BTCode(BitTest::ActionKind A) {
1640 switch (A) {
1641 case BitTest::TestOnly: return '\0';
1642 case BitTest::Complement: return 'c';
1643 case BitTest::Reset: return 'r';
1644 case BitTest::Set: return 's';
1645 }
1646 llvm_unreachable("invalid action");
1647}
1648
1650 BitTest BT,
1651 const CallExpr *E, Value *BitBase,
1652 Value *BitPos) {
1653 char Action = bitActionToX86BTCode(BT.Action);
1654 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
1655
1656 // Build the assembly.
1658 raw_svector_ostream AsmOS(Asm);
1659 if (BT.Interlocking != BitTest::Unlocked)
1660 AsmOS << "lock ";
1661 AsmOS << "bt";
1662 if (Action)
1663 AsmOS << Action;
1664 AsmOS << SizeSuffix << " $2, ($1)";
1665
1666 // Build the constraints. FIXME: We should support immediates when possible.
1667 std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
1668 std::string_view MachineClobbers = CGF.getTarget().getClobbers();
1669 if (!MachineClobbers.empty()) {
1670 Constraints += ',';
1671 Constraints += MachineClobbers;
1672 }
1673 llvm::IntegerType *IntType = llvm::IntegerType::get(
1674 CGF.getLLVMContext(),
1675 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
1676 llvm::FunctionType *FTy =
1677 llvm::FunctionType::get(CGF.Int8Ty, {CGF.DefaultPtrTy, IntType}, false);
1678
1679 llvm::InlineAsm *IA =
1680 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1681 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
1682}
1683
1684static llvm::AtomicOrdering
1685getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
1686 switch (I) {
1687 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
1688 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
1689 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
1690 case BitTest::Release: return llvm::AtomicOrdering::Release;
1691 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
1692 }
1693 llvm_unreachable("invalid interlocking");
1694}
1695
1696static llvm::Value *EmitBitCountExpr(CodeGenFunction &CGF, const Expr *E) {
1697 llvm::Value *ArgValue = CGF.EmitScalarExpr(E);
1698 llvm::Type *ArgType = ArgValue->getType();
1699
1700 // Boolean vectors can be casted directly to its bitfield representation. We
1701 // intentionally do not round up to the next power of two size and let LLVM
1702 // handle the trailing bits.
1703 if (auto *VT = dyn_cast<llvm::FixedVectorType>(ArgType);
1704 VT && VT->getElementType()->isIntegerTy(1)) {
1705 llvm::Type *StorageType =
1706 llvm::Type::getIntNTy(CGF.getLLVMContext(), VT->getNumElements());
1707 ArgValue = CGF.Builder.CreateBitCast(ArgValue, StorageType);
1708 }
1709
1710 return ArgValue;
1711}
1712
1713/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
1714/// bits and a bit position and read and optionally modify the bit at that
1715/// position. The position index can be arbitrarily large, i.e. it can be larger
1716/// than 31 or 63, so we need an indexed load in the general case.
1717static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
1718 unsigned BuiltinID,
1719 const CallExpr *E) {
1720 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
1721 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
1722
1723 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
1724
1725 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
1726 // indexing operation internally. Use them if possible.
1727 if (CGF.getTarget().getTriple().isX86())
1728 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
1729
1730 // Otherwise, use generic code to load one byte and test the bit. Use all but
1731 // the bottom three bits as the array index, and the bottom three bits to form
1732 // a mask.
1733 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
1734 Value *ByteIndex = CGF.Builder.CreateAShr(
1735 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
1736 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBase, ByteIndex,
1737 "bittest.byteaddr"),
1738 CGF.Int8Ty, CharUnits::One());
1739 Value *PosLow =
1740 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
1741 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
1742
1743 // The updating instructions will need a mask.
1744 Value *Mask = nullptr;
1745 if (BT.Action != BitTest::TestOnly) {
1746 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
1747 "bittest.mask");
1748 }
1749
1750 // Check the action and ordering of the interlocked intrinsics.
1751 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
1752
1753 Value *OldByte = nullptr;
1754 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
1755 // Emit a combined atomicrmw load/store operation for the interlocked
1756 // intrinsics.
1757 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
1758 if (BT.Action == BitTest::Reset) {
1759 Mask = CGF.Builder.CreateNot(Mask);
1760 RMWOp = llvm::AtomicRMWInst::And;
1761 }
1762 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr, Mask, Ordering);
1763 } else {
1764 // Emit a plain load for the non-interlocked intrinsics.
1765 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
1766 Value *NewByte = nullptr;
1767 switch (BT.Action) {
1768 case BitTest::TestOnly:
1769 // Don't store anything.
1770 break;
1771 case BitTest::Complement:
1772 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
1773 break;
1774 case BitTest::Reset:
1775 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
1776 break;
1777 case BitTest::Set:
1778 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
1779 break;
1780 }
1781 if (NewByte)
1782 CGF.Builder.CreateStore(NewByte, ByteAddr);
1783 }
1784
1785 // However we loaded the old byte, either by plain load or atomicrmw, shift
1786 // the bit into the low position and mask it to 0 or 1.
1787 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
1788 return CGF.Builder.CreateAnd(
1789 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
1790}
1791
1792namespace {
1793enum class MSVCSetJmpKind {
1794 _setjmpex,
1795 _setjmp3,
1796 _setjmp
1797};
1798}
1799
1800/// MSVC handles setjmp a bit differently on different platforms. On every
1801/// architecture except 32-bit x86, the frame address is passed. On x86, extra
1802/// parameters can be passed as variadic arguments, but we always pass none.
1803static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
1804 const CallExpr *E) {
1805 llvm::Value *Arg1 = nullptr;
1806 llvm::Type *Arg1Ty = nullptr;
1807 StringRef Name;
1808 bool IsVarArg = false;
1809 if (SJKind == MSVCSetJmpKind::_setjmp3) {
1810 Name = "_setjmp3";
1811 Arg1Ty = CGF.Int32Ty;
1812 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
1813 IsVarArg = true;
1814 } else {
1815 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
1816 Arg1Ty = CGF.Int8PtrTy;
1817 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
1818 Arg1 = CGF.Builder.CreateCall(
1819 CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
1820 } else
1821 Arg1 = CGF.Builder.CreateCall(
1822 CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
1823 llvm::ConstantInt::get(CGF.Int32Ty, 0));
1824 }
1825
1826 // Mark the call site and declaration with ReturnsTwice.
1827 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
1828 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
1829 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
1830 llvm::Attribute::ReturnsTwice);
1831 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
1832 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
1833 ReturnsTwiceAttr, /*Local=*/true);
1834
1835 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
1836 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
1837 llvm::Value *Args[] = {Buf, Arg1};
1838 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
1839 CB->setAttributes(ReturnsTwiceAttr);
1840 return RValue::get(CB);
1841}
1842
1843// Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated.
1845 const CallExpr *E) {
1846 switch (BuiltinID) {
1849 Address IndexAddress(EmitPointerWithAlignment(E->getArg(0)));
1850 Value *ArgValue = EmitScalarExpr(E->getArg(1));
1851
1852 llvm::Type *ArgType = ArgValue->getType();
1853 llvm::Type *IndexType = IndexAddress.getElementType();
1854 llvm::Type *ResultType = ConvertType(E->getType());
1855
1856 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
1857 Value *ResZero = llvm::Constant::getNullValue(ResultType);
1858 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
1859
1860 BasicBlock *Begin = Builder.GetInsertBlock();
1861 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
1862 Builder.SetInsertPoint(End);
1863 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
1864
1865 Builder.SetInsertPoint(Begin);
1866 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
1867 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
1868 Builder.CreateCondBr(IsZero, End, NotZero);
1869 Result->addIncoming(ResZero, Begin);
1870
1871 Builder.SetInsertPoint(NotZero);
1872
1873 if (BuiltinID == MSVCIntrin::_BitScanForward) {
1874 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1875 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1876 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1877 Builder.CreateStore(ZeroCount, IndexAddress, false);
1878 } else {
1879 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
1880 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
1881
1882 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1883 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1884 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1885 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
1886 Builder.CreateStore(Index, IndexAddress, false);
1887 }
1888 Builder.CreateBr(End);
1889 Result->addIncoming(ResOne, NotZero);
1890
1891 Builder.SetInsertPoint(End);
1892 return Result;
1893 }
1895 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
1897 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
1899 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
1901 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
1903 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
1905 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
1907 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1908 AtomicOrdering::Acquire);
1910 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1911 AtomicOrdering::Release);
1913 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1914 AtomicOrdering::Monotonic);
1916 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1917 AtomicOrdering::Acquire);
1919 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1920 AtomicOrdering::Release);
1922 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1923 AtomicOrdering::Monotonic);
1925 return EmitAtomicCmpXchgForMSIntrin(*this, E);
1927 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
1929 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
1931 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1934 *this, E, AtomicOrdering::SequentiallyConsistent);
1936 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire);
1938 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release);
1940 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1942 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1943 AtomicOrdering::Acquire);
1945 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1946 AtomicOrdering::Release);
1948 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1949 AtomicOrdering::Monotonic);
1951 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1952 AtomicOrdering::Acquire);
1954 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1955 AtomicOrdering::Release);
1957 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1958 AtomicOrdering::Monotonic);
1960 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1961 AtomicOrdering::Acquire);
1963 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1964 AtomicOrdering::Release);
1966 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1967 AtomicOrdering::Monotonic);
1969 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1971 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1973 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1975 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1977 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1979 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1980
1982 return EmitAtomicDecrementValue(*this, E);
1984 return EmitAtomicIncrementValue(*this, E);
1985
1987 // Request immediate process termination from the kernel. The instruction
1988 // sequences to do this are documented on MSDN:
1989 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1990 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1991 StringRef Asm, Constraints;
1992 switch (ISA) {
1993 default:
1994 ErrorUnsupported(E, "__fastfail call for this architecture");
1995 break;
1996 case llvm::Triple::x86:
1997 case llvm::Triple::x86_64:
1998 Asm = "int $$0x29";
1999 Constraints = "{cx}";
2000 break;
2001 case llvm::Triple::thumb:
2002 Asm = "udf #251";
2003 Constraints = "{r0}";
2004 break;
2005 case llvm::Triple::aarch64:
2006 Asm = "brk #0xF003";
2007 Constraints = "{w0}";
2008 }
2009 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
2010 llvm::InlineAsm *IA =
2011 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
2012 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
2013 getLLVMContext(), llvm::AttributeList::FunctionIndex,
2014 llvm::Attribute::NoReturn);
2015 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
2016 CI->setAttributes(NoReturnAttr);
2017 return CI;
2018 }
2019 }
2020 llvm_unreachable("Incorrect MSVC intrinsic!");
2021}
2022
2023namespace {
2024// ARC cleanup for __builtin_os_log_format
2025struct CallObjCArcUse final : EHScopeStack::Cleanup {
2026 CallObjCArcUse(llvm::Value *object) : object(object) {}
2027 llvm::Value *object;
2028
2029 void Emit(CodeGenFunction &CGF, Flags flags) override {
2030 CGF.EmitARCIntrinsicUse(object);
2031 }
2032};
2033}
2034
2036 BuiltinCheckKind Kind) {
2037 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) &&
2038 "Unsupported builtin check kind");
2039
2040 Value *ArgValue = EmitBitCountExpr(*this, E);
2041 if (!SanOpts.has(SanitizerKind::Builtin))
2042 return ArgValue;
2043
2044 auto CheckOrdinal = SanitizerKind::SO_Builtin;
2045 auto CheckHandler = SanitizerHandler::InvalidBuiltin;
2046 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2047 Value *Cond = Builder.CreateICmpNE(
2048 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
2049 EmitCheck(std::make_pair(Cond, CheckOrdinal), CheckHandler,
2051 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
2052 {});
2053 return ArgValue;
2054}
2055
2057 Value *ArgValue = EvaluateExprAsBool(E);
2058 if (!SanOpts.has(SanitizerKind::Builtin))
2059 return ArgValue;
2060
2061 auto CheckOrdinal = SanitizerKind::SO_Builtin;
2062 auto CheckHandler = SanitizerHandler::InvalidBuiltin;
2063 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2064 EmitCheck(
2065 std::make_pair(ArgValue, CheckOrdinal), CheckHandler,
2067 llvm::ConstantInt::get(Builder.getInt8Ty(), BCK_AssumePassedFalse)},
2068 {});
2069 return ArgValue;
2070}
2071
2072static Value *EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW) {
2073 return CGF.Builder.CreateBinaryIntrinsic(
2074 Intrinsic::abs, ArgValue,
2075 ConstantInt::get(CGF.Builder.getInt1Ty(), HasNSW));
2076}
2077
2079 bool SanitizeOverflow) {
2080 Value *ArgValue = CGF.EmitScalarExpr(E->getArg(0));
2081
2082 // Try to eliminate overflow check.
2083 if (const auto *VCI = dyn_cast<llvm::ConstantInt>(ArgValue)) {
2084 if (!VCI->isMinSignedValue())
2085 return EmitAbs(CGF, ArgValue, true);
2086 }
2087
2089 SanitizerHandler CheckHandler;
2090 if (SanitizeOverflow) {
2091 Ordinals.push_back(SanitizerKind::SO_SignedIntegerOverflow);
2092 CheckHandler = SanitizerHandler::NegateOverflow;
2093 } else
2094 CheckHandler = SanitizerHandler::SubOverflow;
2095
2096 SanitizerDebugLocation SanScope(&CGF, Ordinals, CheckHandler);
2097
2098 Constant *Zero = Constant::getNullValue(ArgValue->getType());
2099 Value *ResultAndOverflow = CGF.Builder.CreateBinaryIntrinsic(
2100 Intrinsic::ssub_with_overflow, Zero, ArgValue);
2101 Value *Result = CGF.Builder.CreateExtractValue(ResultAndOverflow, 0);
2102 Value *NotOverflow = CGF.Builder.CreateNot(
2103 CGF.Builder.CreateExtractValue(ResultAndOverflow, 1));
2104
2105 // TODO: support -ftrapv-handler.
2106 if (SanitizeOverflow) {
2107 CGF.EmitCheck({{NotOverflow, SanitizerKind::SO_SignedIntegerOverflow}},
2108 CheckHandler,
2111 {ArgValue});
2112 } else
2113 CGF.EmitTrapCheck(NotOverflow, CheckHandler);
2114
2115 Value *CmpResult = CGF.Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
2116 return CGF.Builder.CreateSelect(CmpResult, Result, ArgValue, "abs");
2117}
2118
2119/// Get the argument type for arguments to os_log_helper.
2121 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
2122 return C.getCanonicalType(UnsignedTy);
2123}
2124
2127 CharUnits BufferAlignment) {
2128 ASTContext &Ctx = getContext();
2129
2131 {
2132 raw_svector_ostream OS(Name);
2133 OS << "__os_log_helper";
2134 OS << "_" << BufferAlignment.getQuantity();
2135 OS << "_" << int(Layout.getSummaryByte());
2136 OS << "_" << int(Layout.getNumArgsByte());
2137 for (const auto &Item : Layout.Items)
2138 OS << "_" << int(Item.getSizeByte()) << "_"
2139 << int(Item.getDescriptorByte());
2140 }
2141
2142 if (llvm::Function *F = CGM.getModule().getFunction(Name))
2143 return F;
2144
2146 FunctionArgList Args;
2147 Args.push_back(ImplicitParamDecl::Create(
2148 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
2150 ArgTys.emplace_back(Ctx.VoidPtrTy);
2151
2152 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
2153 char Size = Layout.Items[I].getSizeByte();
2154 if (!Size)
2155 continue;
2156
2157 QualType ArgTy = getOSLogArgType(Ctx, Size);
2158 Args.push_back(ImplicitParamDecl::Create(
2159 Ctx, nullptr, SourceLocation(),
2160 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
2162 ArgTys.emplace_back(ArgTy);
2163 }
2164
2165 QualType ReturnTy = Ctx.VoidTy;
2166
2167 // The helper function has linkonce_odr linkage to enable the linker to merge
2168 // identical functions. To ensure the merging always happens, 'noinline' is
2169 // attached to the function when compiling with -Oz.
2170 const CGFunctionInfo &FI =
2171 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
2172 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
2173 llvm::Function *Fn = llvm::Function::Create(
2174 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
2175 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
2176 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn, /*IsThunk=*/false);
2177 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
2178 Fn->setDoesNotThrow();
2179
2180 // Attach 'noinline' at -Oz.
2181 if (CGM.getCodeGenOpts().OptimizeSize == 2)
2182 Fn->addFnAttr(llvm::Attribute::NoInline);
2183
2184 auto NL = ApplyDebugLocation::CreateEmpty(*this);
2185 StartFunction(GlobalDecl(), ReturnTy, Fn, FI, Args);
2186
2187 // Create a scope with an artificial location for the body of this function.
2188 auto AL = ApplyDebugLocation::CreateArtificial(*this);
2189
2190 CharUnits Offset;
2192 Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), Ctx.VoidTy,
2193 BufferAlignment);
2194 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
2195 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
2196 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
2197 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
2198
2199 unsigned I = 1;
2200 for (const auto &Item : Layout.Items) {
2201 Builder.CreateStore(
2202 Builder.getInt8(Item.getDescriptorByte()),
2203 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
2204 Builder.CreateStore(
2205 Builder.getInt8(Item.getSizeByte()),
2206 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
2207
2208 CharUnits Size = Item.size();
2209 if (!Size.getQuantity())
2210 continue;
2211
2212 Address Arg = GetAddrOfLocalVar(Args[I]);
2213 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
2214 Addr = Addr.withElementType(Arg.getElementType());
2215 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
2216 Offset += Size;
2217 ++I;
2218 }
2219
2221
2222 return Fn;
2223}
2224
2226 assert(E.getNumArgs() >= 2 &&
2227 "__builtin_os_log_format takes at least 2 arguments");
2228 ASTContext &Ctx = getContext();
2231 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
2232
2233 // Ignore argument 1, the format string. It is not currently used.
2234 CallArgList Args;
2235 Args.add(RValue::get(BufAddr.emitRawPointer(*this)), Ctx.VoidPtrTy);
2236
2237 for (const auto &Item : Layout.Items) {
2238 int Size = Item.getSizeByte();
2239 if (!Size)
2240 continue;
2241
2242 llvm::Value *ArgVal;
2243
2244 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
2245 uint64_t Val = 0;
2246 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
2247 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
2248 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
2249 } else if (const Expr *TheExpr = Item.getExpr()) {
2250 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
2251
2252 // If a temporary object that requires destruction after the full
2253 // expression is passed, push a lifetime-extended cleanup to extend its
2254 // lifetime to the end of the enclosing block scope.
2255 auto LifetimeExtendObject = [&](const Expr *E) {
2256 E = E->IgnoreParenCasts();
2257 // Extend lifetimes of objects returned by function calls and message
2258 // sends.
2259
2260 // FIXME: We should do this in other cases in which temporaries are
2261 // created including arguments of non-ARC types (e.g., C++
2262 // temporaries).
2264 return true;
2265 return false;
2266 };
2267
2268 if (TheExpr->getType()->isObjCRetainableType() &&
2269 getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
2270 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
2271 "Only scalar can be a ObjC retainable type");
2272 if (!isa<Constant>(ArgVal)) {
2273 CleanupKind Cleanup = getARCCleanupKind();
2274 QualType Ty = TheExpr->getType();
2276 RawAddress Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
2277 ArgVal = EmitARCRetain(Ty, ArgVal);
2278 Builder.CreateStore(ArgVal, Addr);
2279 pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
2281 Cleanup & EHCleanup);
2282
2283 // Push a clang.arc.use call to ensure ARC optimizer knows that the
2284 // argument has to be alive.
2285 if (CGM.getCodeGenOpts().OptimizationLevel != 0)
2287 }
2288 }
2289 } else {
2290 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
2291 }
2292
2293 unsigned ArgValSize =
2294 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
2295 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
2296 ArgValSize);
2297 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
2298 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
2299 // If ArgVal has type x86_fp80, zero-extend ArgVal.
2300 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
2301 Args.add(RValue::get(ArgVal), ArgTy);
2302 }
2303
2304 const CGFunctionInfo &FI =
2305 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
2306 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
2307 Layout, BufAddr.getAlignment());
2309 return RValue::get(BufAddr, *this);
2310}
2311
2313 unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info,
2314 WidthAndSignedness ResultInfo) {
2315 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2316 Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width &&
2317 !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed;
2318}
2319
2321 CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info,
2322 const clang::Expr *Op2, WidthAndSignedness Op2Info,
2323 const clang::Expr *ResultArg, QualType ResultQTy,
2324 WidthAndSignedness ResultInfo) {
2326 Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&
2327 "Cannot specialize this multiply");
2328
2329 llvm::Value *V1 = CGF.EmitScalarExpr(Op1);
2330 llvm::Value *V2 = CGF.EmitScalarExpr(Op2);
2331
2332 llvm::Value *HasOverflow;
2333 llvm::Value *Result = EmitOverflowIntrinsic(
2334 CGF, Intrinsic::umul_with_overflow, V1, V2, HasOverflow);
2335
2336 // The intrinsic call will detect overflow when the value is > UINT_MAX,
2337 // however, since the original builtin had a signed result, we need to report
2338 // an overflow when the result is greater than INT_MAX.
2339 auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width);
2340 llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax);
2341
2342 llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(Result, IntMaxValue);
2343 HasOverflow = CGF.Builder.CreateOr(HasOverflow, IntMaxOverflow);
2344
2345 bool isVolatile =
2346 ResultArg->getType()->getPointeeType().isVolatileQualified();
2347 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2348 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2349 isVolatile);
2350 return RValue::get(HasOverflow);
2351}
2352
2353/// Determine if a binop is a checked mixed-sign multiply we can specialize.
2354static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
2355 WidthAndSignedness Op1Info,
2356 WidthAndSignedness Op2Info,
2357 WidthAndSignedness ResultInfo) {
2358 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2359 std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
2360 Op1Info.Signed != Op2Info.Signed;
2361}
2362
2363/// Emit a checked mixed-sign multiply. This is a cheaper specialization of
2364/// the generic checked-binop irgen.
2365static RValue
2367 WidthAndSignedness Op1Info, const clang::Expr *Op2,
2368 WidthAndSignedness Op2Info,
2369 const clang::Expr *ResultArg, QualType ResultQTy,
2370 WidthAndSignedness ResultInfo) {
2371 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,
2372 Op2Info, ResultInfo) &&
2373 "Not a mixed-sign multipliction we can specialize");
2374
2375 // Emit the signed and unsigned operands.
2376 const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
2377 const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
2378 llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
2379 llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
2380 unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
2381 unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
2382
2383 // One of the operands may be smaller than the other. If so, [s|z]ext it.
2384 if (SignedOpWidth < UnsignedOpWidth)
2385 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
2386 if (UnsignedOpWidth < SignedOpWidth)
2387 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
2388
2389 llvm::Type *OpTy = Signed->getType();
2390 llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
2391 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2392 llvm::Type *ResTy = CGF.getTypes().ConvertType(ResultQTy);
2393 unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
2394
2395 // Take the absolute value of the signed operand.
2396 llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
2397 llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
2398 llvm::Value *AbsSigned =
2399 CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
2400
2401 // Perform a checked unsigned multiplication.
2402 llvm::Value *UnsignedOverflow;
2403 llvm::Value *UnsignedResult =
2404 EmitOverflowIntrinsic(CGF, Intrinsic::umul_with_overflow, AbsSigned,
2405 Unsigned, UnsignedOverflow);
2406
2407 llvm::Value *Overflow, *Result;
2408 if (ResultInfo.Signed) {
2409 // Signed overflow occurs if the result is greater than INT_MAX or lesser
2410 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
2411 auto IntMax =
2412 llvm::APInt::getSignedMaxValue(ResultInfo.Width).zext(OpWidth);
2413 llvm::Value *MaxResult =
2414 CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
2415 CGF.Builder.CreateZExt(IsNegative, OpTy));
2416 llvm::Value *SignedOverflow =
2417 CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
2418 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
2419
2420 // Prepare the signed result (possibly by negating it).
2421 llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
2422 llvm::Value *SignedResult =
2423 CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
2424 Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
2425 } else {
2426 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
2427 llvm::Value *Underflow = CGF.Builder.CreateAnd(
2428 IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
2429 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
2430 if (ResultInfo.Width < OpWidth) {
2431 auto IntMax =
2432 llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
2433 llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
2434 UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
2435 Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
2436 }
2437
2438 // Negate the product if it would be negative in infinite precision.
2439 Result = CGF.Builder.CreateSelect(
2440 IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
2441
2442 Result = CGF.Builder.CreateTrunc(Result, ResTy);
2443 }
2444 assert(Overflow && Result && "Missing overflow or result");
2445
2446 bool isVolatile =
2447 ResultArg->getType()->getPointeeType().isVolatileQualified();
2448 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2449 isVolatile);
2450 return RValue::get(Overflow);
2451}
2452
2453static bool
2455 llvm::SmallPtrSetImpl<const Decl *> &Seen) {
2456 if (const auto *Arr = Ctx.getAsArrayType(Ty))
2457 Ty = Ctx.getBaseElementType(Arr);
2458
2459 const auto *Record = Ty->getAsCXXRecordDecl();
2460 if (!Record)
2461 return false;
2462
2463 // We've already checked this type, or are in the process of checking it.
2464 if (!Seen.insert(Record).second)
2465 return false;
2466
2467 assert(Record->hasDefinition() &&
2468 "Incomplete types should already be diagnosed");
2469
2470 if (Record->isDynamicClass())
2471 return true;
2472
2473 for (FieldDecl *F : Record->fields()) {
2474 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
2475 return true;
2476 }
2477 return false;
2478}
2479
2480/// Determine if the specified type requires laundering by checking if it is a
2481/// dynamic class type or contains a subobject which is a dynamic class type.
2483 if (!CGM.getCodeGenOpts().StrictVTablePointers)
2484 return false;
2486 return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
2487}
2488
2489RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
2490 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
2491 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
2492
2493 // The builtin's shift arg may have a different type than the source arg and
2494 // result, but the LLVM intrinsic uses the same type for all values.
2495 llvm::Type *Ty = Src->getType();
2496 llvm::Type *ShiftTy = ShiftAmt->getType();
2497
2498 unsigned BitWidth = Ty->getIntegerBitWidth();
2499
2500 // Normalize shift amount to [0, BitWidth) range to match runtime behavior.
2501 // This matches the algorithm in ExprConstant.cpp for constant evaluation.
2502 if (BitWidth == 1) {
2503 // Rotating a 1-bit value is always a no-op
2504 ShiftAmt = ConstantInt::get(ShiftTy, 0);
2505 } else if (BitWidth == 2) {
2506 // For 2-bit values: rotation amount is 0 or 1 based on
2507 // whether the amount is even or odd. We can't use srem here because
2508 // the divisor (2) would be misinterpreted as -2 in 2-bit signed arithmetic.
2509 llvm::Value *One = ConstantInt::get(ShiftTy, 1);
2510 ShiftAmt = Builder.CreateAnd(ShiftAmt, One);
2511 } else {
2512 unsigned ShiftAmtBitWidth = ShiftTy->getIntegerBitWidth();
2513 bool ShiftAmtIsSigned = E->getArg(1)->getType()->isSignedIntegerType();
2514
2515 // Choose the wider type for the divisor to avoid truncation
2516 llvm::Type *DivisorTy = ShiftAmtBitWidth > BitWidth ? ShiftTy : Ty;
2517 llvm::Value *Divisor = ConstantInt::get(DivisorTy, BitWidth);
2518
2519 // Extend ShiftAmt to match Divisor width if needed
2520 if (ShiftAmtBitWidth < DivisorTy->getIntegerBitWidth()) {
2521 ShiftAmt = Builder.CreateIntCast(ShiftAmt, DivisorTy, ShiftAmtIsSigned);
2522 }
2523
2524 // Normalize to [0, BitWidth)
2525 llvm::Value *RemResult;
2526 if (ShiftAmtIsSigned) {
2527 RemResult = Builder.CreateSRem(ShiftAmt, Divisor);
2528 // Signed remainder can be negative, convert to positive equivalent
2529 llvm::Value *Zero = ConstantInt::get(DivisorTy, 0);
2530 llvm::Value *IsNegative = Builder.CreateICmpSLT(RemResult, Zero);
2531 llvm::Value *PositiveShift = Builder.CreateAdd(RemResult, Divisor);
2532 ShiftAmt = Builder.CreateSelect(IsNegative, PositiveShift, RemResult);
2533 } else {
2534 ShiftAmt = Builder.CreateURem(ShiftAmt, Divisor);
2535 }
2536 }
2537
2538 // Convert to the source type if needed
2539 if (ShiftAmt->getType() != Ty) {
2540 ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
2541 }
2542
2543 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
2544 unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
2545 Function *F = CGM.getIntrinsic(IID, Ty);
2546 return RValue::get(Builder.CreateCall(F, {Src, Src, ShiftAmt}));
2547}
2548
2549// Map math builtins for long-double to f128 version.
2550static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {
2551 switch (BuiltinID) {
2552#define MUTATE_LDBL(func) \
2553 case Builtin::BI__builtin_##func##l: \
2554 return Builtin::BI__builtin_##func##f128;
2585 MUTATE_LDBL(nans)
2586 MUTATE_LDBL(inf)
2605 MUTATE_LDBL(huge_val)
2615#undef MUTATE_LDBL
2616 default:
2617 return BuiltinID;
2618 }
2619}
2620
2621static Value *tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID,
2622 Value *V) {
2623 if (CGF.Builder.getIsFPConstrained() &&
2624 CGF.Builder.getDefaultConstrainedExcept() != fp::ebIgnore) {
2625 if (Value *Result =
2626 CGF.getTargetHooks().testFPKind(V, BuiltinID, CGF.Builder, CGF.CGM))
2627 return Result;
2628 }
2629 return nullptr;
2630}
2631
2633 const FunctionDecl *FD) {
2634 auto Name = FD->getNameAsString() + "__hipstdpar_unsupported";
2635 auto FnTy = CGF->CGM.getTypes().GetFunctionType(FD);
2636 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2637
2639 for (auto &&FormalTy : FnTy->params())
2640 Args.push_back(llvm::PoisonValue::get(FormalTy));
2641
2642 return RValue::get(CGF->Builder.CreateCall(UBF, Args));
2643}
2644
2646 const CallExpr *E,
2648 assert(!getContext().BuiltinInfo.isImmediate(BuiltinID) &&
2649 "Should not codegen for consteval builtins");
2650
2651 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
2652 // See if we can constant fold this builtin. If so, don't emit it at all.
2653 // TODO: Extend this handling to all builtin calls that we can constant-fold.
2655 if (E->isPRValue() && E->EvaluateAsRValue(Result, CGM.getContext()) &&
2656 !Result.hasSideEffects()) {
2657 if (Result.Val.isInt())
2658 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
2659 Result.Val.getInt()));
2660 if (Result.Val.isFloat())
2661 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
2662 Result.Val.getFloat()));
2663 }
2664
2665 // If current long-double semantics is IEEE 128-bit, replace math builtins
2666 // of long-double with f128 equivalent.
2667 // TODO: This mutation should also be applied to other targets other than PPC,
2668 // after backend supports IEEE 128-bit style libcalls.
2669 if (getTarget().getTriple().isPPC64() &&
2670 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
2671 BuiltinID = mutateLongDoubleBuiltin(BuiltinID);
2672
2673 // If the builtin has been declared explicitly with an assembler label,
2674 // disable the specialized emitting below. Ideally we should communicate the
2675 // rename in IR, or at least avoid generating the intrinsic calls that are
2676 // likely to get lowered to the renamed library functions.
2677 const unsigned BuiltinIDIfNoAsmLabel =
2678 FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID;
2679
2680 std::optional<bool> ErrnoOverriden;
2681 // ErrnoOverriden is true if math-errno is overriden via the
2682 // '#pragma float_control(precise, on)'. This pragma disables fast-math,
2683 // which implies math-errno.
2684 if (E->hasStoredFPFeatures()) {
2686 if (OP.hasMathErrnoOverride())
2687 ErrnoOverriden = OP.getMathErrnoOverride();
2688 }
2689 // True if 'attribute__((optnone))' is used. This attribute overrides
2690 // fast-math which implies math-errno.
2691 bool OptNone = CurFuncDecl && CurFuncDecl->hasAttr<OptimizeNoneAttr>();
2692
2693 bool IsOptimizationEnabled = CGM.getCodeGenOpts().OptimizationLevel != 0;
2694
2695 bool GenerateFPMathIntrinsics =
2697 BuiltinID, CGM.getTriple(), ErrnoOverriden, getLangOpts().MathErrno,
2698 OptNone, IsOptimizationEnabled);
2699
2700 if (GenerateFPMathIntrinsics) {
2701 switch (BuiltinIDIfNoAsmLabel) {
2702 case Builtin::BIacos:
2703 case Builtin::BIacosf:
2704 case Builtin::BIacosl:
2705 case Builtin::BI__builtin_acos:
2706 case Builtin::BI__builtin_acosf:
2707 case Builtin::BI__builtin_acosf16:
2708 case Builtin::BI__builtin_acosl:
2709 case Builtin::BI__builtin_acosf128:
2710 case Builtin::BI__builtin_elementwise_acos:
2712 *this, E, Intrinsic::acos, Intrinsic::experimental_constrained_acos));
2713
2714 case Builtin::BIasin:
2715 case Builtin::BIasinf:
2716 case Builtin::BIasinl:
2717 case Builtin::BI__builtin_asin:
2718 case Builtin::BI__builtin_asinf:
2719 case Builtin::BI__builtin_asinf16:
2720 case Builtin::BI__builtin_asinl:
2721 case Builtin::BI__builtin_asinf128:
2722 case Builtin::BI__builtin_elementwise_asin:
2724 *this, E, Intrinsic::asin, Intrinsic::experimental_constrained_asin));
2725
2726 case Builtin::BIatan:
2727 case Builtin::BIatanf:
2728 case Builtin::BIatanl:
2729 case Builtin::BI__builtin_atan:
2730 case Builtin::BI__builtin_atanf:
2731 case Builtin::BI__builtin_atanf16:
2732 case Builtin::BI__builtin_atanl:
2733 case Builtin::BI__builtin_atanf128:
2734 case Builtin::BI__builtin_elementwise_atan:
2736 *this, E, Intrinsic::atan, Intrinsic::experimental_constrained_atan));
2737
2738 case Builtin::BIatan2:
2739 case Builtin::BIatan2f:
2740 case Builtin::BIatan2l:
2741 case Builtin::BI__builtin_atan2:
2742 case Builtin::BI__builtin_atan2f:
2743 case Builtin::BI__builtin_atan2f16:
2744 case Builtin::BI__builtin_atan2l:
2745 case Builtin::BI__builtin_atan2f128:
2746 case Builtin::BI__builtin_elementwise_atan2:
2748 *this, E, Intrinsic::atan2,
2749 Intrinsic::experimental_constrained_atan2));
2750
2751 case Builtin::BIceil:
2752 case Builtin::BIceilf:
2753 case Builtin::BIceill:
2754 case Builtin::BI__builtin_ceil:
2755 case Builtin::BI__builtin_ceilf:
2756 case Builtin::BI__builtin_ceilf16:
2757 case Builtin::BI__builtin_ceill:
2758 case Builtin::BI__builtin_ceilf128:
2759 case Builtin::BI__builtin_elementwise_ceil:
2761 Intrinsic::ceil,
2762 Intrinsic::experimental_constrained_ceil));
2763
2764 case Builtin::BIcopysign:
2765 case Builtin::BIcopysignf:
2766 case Builtin::BIcopysignl:
2767 case Builtin::BI__builtin_copysign:
2768 case Builtin::BI__builtin_copysignf:
2769 case Builtin::BI__builtin_copysignf16:
2770 case Builtin::BI__builtin_copysignl:
2771 case Builtin::BI__builtin_copysignf128:
2772 return RValue::get(
2773 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::copysign));
2774
2775 case Builtin::BIcos:
2776 case Builtin::BIcosf:
2777 case Builtin::BIcosl:
2778 case Builtin::BI__builtin_cos:
2779 case Builtin::BI__builtin_cosf:
2780 case Builtin::BI__builtin_cosf16:
2781 case Builtin::BI__builtin_cosl:
2782 case Builtin::BI__builtin_cosf128:
2783 case Builtin::BI__builtin_elementwise_cos:
2785 Intrinsic::cos,
2786 Intrinsic::experimental_constrained_cos));
2787
2788 case Builtin::BIcosh:
2789 case Builtin::BIcoshf:
2790 case Builtin::BIcoshl:
2791 case Builtin::BI__builtin_cosh:
2792 case Builtin::BI__builtin_coshf:
2793 case Builtin::BI__builtin_coshf16:
2794 case Builtin::BI__builtin_coshl:
2795 case Builtin::BI__builtin_coshf128:
2796 case Builtin::BI__builtin_elementwise_cosh:
2798 *this, E, Intrinsic::cosh, Intrinsic::experimental_constrained_cosh));
2799
2800 case Builtin::BIexp:
2801 case Builtin::BIexpf:
2802 case Builtin::BIexpl:
2803 case Builtin::BI__builtin_exp:
2804 case Builtin::BI__builtin_expf:
2805 case Builtin::BI__builtin_expf16:
2806 case Builtin::BI__builtin_expl:
2807 case Builtin::BI__builtin_expf128:
2808 case Builtin::BI__builtin_elementwise_exp:
2810 Intrinsic::exp,
2811 Intrinsic::experimental_constrained_exp));
2812
2813 case Builtin::BIexp2:
2814 case Builtin::BIexp2f:
2815 case Builtin::BIexp2l:
2816 case Builtin::BI__builtin_exp2:
2817 case Builtin::BI__builtin_exp2f:
2818 case Builtin::BI__builtin_exp2f16:
2819 case Builtin::BI__builtin_exp2l:
2820 case Builtin::BI__builtin_exp2f128:
2821 case Builtin::BI__builtin_elementwise_exp2:
2823 Intrinsic::exp2,
2824 Intrinsic::experimental_constrained_exp2));
2825 case Builtin::BI__builtin_exp10:
2826 case Builtin::BI__builtin_exp10f:
2827 case Builtin::BI__builtin_exp10f16:
2828 case Builtin::BI__builtin_exp10l:
2829 case Builtin::BI__builtin_exp10f128:
2830 case Builtin::BI__builtin_elementwise_exp10: {
2831 // TODO: strictfp support
2832 if (Builder.getIsFPConstrained())
2833 break;
2834 return RValue::get(
2835 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::exp10));
2836 }
2837 case Builtin::BIfabs:
2838 case Builtin::BIfabsf:
2839 case Builtin::BIfabsl:
2840 case Builtin::BI__builtin_fabs:
2841 case Builtin::BI__builtin_fabsf:
2842 case Builtin::BI__builtin_fabsf16:
2843 case Builtin::BI__builtin_fabsl:
2844 case Builtin::BI__builtin_fabsf128:
2845 return RValue::get(
2846 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::fabs));
2847
2848 case Builtin::BIfloor:
2849 case Builtin::BIfloorf:
2850 case Builtin::BIfloorl:
2851 case Builtin::BI__builtin_floor:
2852 case Builtin::BI__builtin_floorf:
2853 case Builtin::BI__builtin_floorf16:
2854 case Builtin::BI__builtin_floorl:
2855 case Builtin::BI__builtin_floorf128:
2856 case Builtin::BI__builtin_elementwise_floor:
2858 Intrinsic::floor,
2859 Intrinsic::experimental_constrained_floor));
2860
2861 case Builtin::BIfma:
2862 case Builtin::BIfmaf:
2863 case Builtin::BIfmal:
2864 case Builtin::BI__builtin_fma:
2865 case Builtin::BI__builtin_fmaf:
2866 case Builtin::BI__builtin_fmaf16:
2867 case Builtin::BI__builtin_fmal:
2868 case Builtin::BI__builtin_fmaf128:
2869 case Builtin::BI__builtin_elementwise_fma:
2871 Intrinsic::fma,
2872 Intrinsic::experimental_constrained_fma));
2873
2874 case Builtin::BIfmax:
2875 case Builtin::BIfmaxf:
2876 case Builtin::BIfmaxl:
2877 case Builtin::BI__builtin_fmax:
2878 case Builtin::BI__builtin_fmaxf:
2879 case Builtin::BI__builtin_fmaxf16:
2880 case Builtin::BI__builtin_fmaxl:
2881 case Builtin::BI__builtin_fmaxf128: {
2882 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
2883 Builder.getFastMathFlags().setNoSignedZeros();
2885 *this, E, Intrinsic::maxnum,
2886 Intrinsic::experimental_constrained_maxnum));
2887 }
2888
2889 case Builtin::BIfmin:
2890 case Builtin::BIfminf:
2891 case Builtin::BIfminl:
2892 case Builtin::BI__builtin_fmin:
2893 case Builtin::BI__builtin_fminf:
2894 case Builtin::BI__builtin_fminf16:
2895 case Builtin::BI__builtin_fminl:
2896 case Builtin::BI__builtin_fminf128: {
2897 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
2898 Builder.getFastMathFlags().setNoSignedZeros();
2900 *this, E, Intrinsic::minnum,
2901 Intrinsic::experimental_constrained_minnum));
2902 }
2903
2904 case Builtin::BIfmaximum_num:
2905 case Builtin::BIfmaximum_numf:
2906 case Builtin::BIfmaximum_numl:
2907 case Builtin::BI__builtin_fmaximum_num:
2908 case Builtin::BI__builtin_fmaximum_numf:
2909 case Builtin::BI__builtin_fmaximum_numf16:
2910 case Builtin::BI__builtin_fmaximum_numl:
2911 case Builtin::BI__builtin_fmaximum_numf128:
2912 return RValue::get(
2913 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::maximumnum));
2914
2915 case Builtin::BIfminimum_num:
2916 case Builtin::BIfminimum_numf:
2917 case Builtin::BIfminimum_numl:
2918 case Builtin::BI__builtin_fminimum_num:
2919 case Builtin::BI__builtin_fminimum_numf:
2920 case Builtin::BI__builtin_fminimum_numf16:
2921 case Builtin::BI__builtin_fminimum_numl:
2922 case Builtin::BI__builtin_fminimum_numf128:
2923 return RValue::get(
2924 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::minimumnum));
2925
2926 // fmod() is a special-case. It maps to the frem instruction rather than an
2927 // LLVM intrinsic.
2928 case Builtin::BIfmod:
2929 case Builtin::BIfmodf:
2930 case Builtin::BIfmodl:
2931 case Builtin::BI__builtin_fmod:
2932 case Builtin::BI__builtin_fmodf:
2933 case Builtin::BI__builtin_fmodf16:
2934 case Builtin::BI__builtin_fmodl:
2935 case Builtin::BI__builtin_fmodf128:
2936 case Builtin::BI__builtin_elementwise_fmod: {
2937 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2938 Value *Arg1 = EmitScalarExpr(E->getArg(0));
2939 Value *Arg2 = EmitScalarExpr(E->getArg(1));
2940 if (Builder.getIsFPConstrained()) {
2941 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_frem,
2942 Arg1->getType());
2943 return RValue::get(Builder.CreateConstrainedFPCall(F, {Arg1, Arg2}));
2944 } else {
2945 return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
2946 }
2947 }
2948
2949 case Builtin::BIlog:
2950 case Builtin::BIlogf:
2951 case Builtin::BIlogl:
2952 case Builtin::BI__builtin_log:
2953 case Builtin::BI__builtin_logf:
2954 case Builtin::BI__builtin_logf16:
2955 case Builtin::BI__builtin_logl:
2956 case Builtin::BI__builtin_logf128:
2957 case Builtin::BI__builtin_elementwise_log:
2959 Intrinsic::log,
2960 Intrinsic::experimental_constrained_log));
2961
2962 case Builtin::BIlog10:
2963 case Builtin::BIlog10f:
2964 case Builtin::BIlog10l:
2965 case Builtin::BI__builtin_log10:
2966 case Builtin::BI__builtin_log10f:
2967 case Builtin::BI__builtin_log10f16:
2968 case Builtin::BI__builtin_log10l:
2969 case Builtin::BI__builtin_log10f128:
2970 case Builtin::BI__builtin_elementwise_log10:
2972 Intrinsic::log10,
2973 Intrinsic::experimental_constrained_log10));
2974
2975 case Builtin::BIlog2:
2976 case Builtin::BIlog2f:
2977 case Builtin::BIlog2l:
2978 case Builtin::BI__builtin_log2:
2979 case Builtin::BI__builtin_log2f:
2980 case Builtin::BI__builtin_log2f16:
2981 case Builtin::BI__builtin_log2l:
2982 case Builtin::BI__builtin_log2f128:
2983 case Builtin::BI__builtin_elementwise_log2:
2985 Intrinsic::log2,
2986 Intrinsic::experimental_constrained_log2));
2987
2988 case Builtin::BInearbyint:
2989 case Builtin::BInearbyintf:
2990 case Builtin::BInearbyintl:
2991 case Builtin::BI__builtin_nearbyint:
2992 case Builtin::BI__builtin_nearbyintf:
2993 case Builtin::BI__builtin_nearbyintl:
2994 case Builtin::BI__builtin_nearbyintf128:
2995 case Builtin::BI__builtin_elementwise_nearbyint:
2997 Intrinsic::nearbyint,
2998 Intrinsic::experimental_constrained_nearbyint));
2999
3000 case Builtin::BIpow:
3001 case Builtin::BIpowf:
3002 case Builtin::BIpowl:
3003 case Builtin::BI__builtin_pow:
3004 case Builtin::BI__builtin_powf:
3005 case Builtin::BI__builtin_powf16:
3006 case Builtin::BI__builtin_powl:
3007 case Builtin::BI__builtin_powf128:
3008 case Builtin::BI__builtin_elementwise_pow:
3010 Intrinsic::pow,
3011 Intrinsic::experimental_constrained_pow));
3012
3013 case Builtin::BIrint:
3014 case Builtin::BIrintf:
3015 case Builtin::BIrintl:
3016 case Builtin::BI__builtin_rint:
3017 case Builtin::BI__builtin_rintf:
3018 case Builtin::BI__builtin_rintf16:
3019 case Builtin::BI__builtin_rintl:
3020 case Builtin::BI__builtin_rintf128:
3021 case Builtin::BI__builtin_elementwise_rint:
3023 Intrinsic::rint,
3024 Intrinsic::experimental_constrained_rint));
3025
3026 case Builtin::BIround:
3027 case Builtin::BIroundf:
3028 case Builtin::BIroundl:
3029 case Builtin::BI__builtin_round:
3030 case Builtin::BI__builtin_roundf:
3031 case Builtin::BI__builtin_roundf16:
3032 case Builtin::BI__builtin_roundl:
3033 case Builtin::BI__builtin_roundf128:
3034 case Builtin::BI__builtin_elementwise_round:
3036 Intrinsic::round,
3037 Intrinsic::experimental_constrained_round));
3038
3039 case Builtin::BIroundeven:
3040 case Builtin::BIroundevenf:
3041 case Builtin::BIroundevenl:
3042 case Builtin::BI__builtin_roundeven:
3043 case Builtin::BI__builtin_roundevenf:
3044 case Builtin::BI__builtin_roundevenf16:
3045 case Builtin::BI__builtin_roundevenl:
3046 case Builtin::BI__builtin_roundevenf128:
3047 case Builtin::BI__builtin_elementwise_roundeven:
3049 Intrinsic::roundeven,
3050 Intrinsic::experimental_constrained_roundeven));
3051
3052 case Builtin::BIsin:
3053 case Builtin::BIsinf:
3054 case Builtin::BIsinl:
3055 case Builtin::BI__builtin_sin:
3056 case Builtin::BI__builtin_sinf:
3057 case Builtin::BI__builtin_sinf16:
3058 case Builtin::BI__builtin_sinl:
3059 case Builtin::BI__builtin_sinf128:
3060 case Builtin::BI__builtin_elementwise_sin:
3062 Intrinsic::sin,
3063 Intrinsic::experimental_constrained_sin));
3064
3065 case Builtin::BIsinh:
3066 case Builtin::BIsinhf:
3067 case Builtin::BIsinhl:
3068 case Builtin::BI__builtin_sinh:
3069 case Builtin::BI__builtin_sinhf:
3070 case Builtin::BI__builtin_sinhf16:
3071 case Builtin::BI__builtin_sinhl:
3072 case Builtin::BI__builtin_sinhf128:
3073 case Builtin::BI__builtin_elementwise_sinh:
3075 *this, E, Intrinsic::sinh, Intrinsic::experimental_constrained_sinh));
3076
3077 case Builtin::BI__builtin_sincospi:
3078 case Builtin::BI__builtin_sincospif:
3079 case Builtin::BI__builtin_sincospil:
3080 if (Builder.getIsFPConstrained())
3081 break; // TODO: Emit constrained sincospi intrinsic once one exists.
3082 emitSincosBuiltin(*this, E, Intrinsic::sincospi);
3083 return RValue::get(nullptr);
3084
3085 case Builtin::BIsincos:
3086 case Builtin::BIsincosf:
3087 case Builtin::BIsincosl:
3088 case Builtin::BI__builtin_sincos:
3089 case Builtin::BI__builtin_sincosf:
3090 case Builtin::BI__builtin_sincosf16:
3091 case Builtin::BI__builtin_sincosl:
3092 case Builtin::BI__builtin_sincosf128:
3093 if (Builder.getIsFPConstrained())
3094 break; // TODO: Emit constrained sincos intrinsic once one exists.
3095 emitSincosBuiltin(*this, E, Intrinsic::sincos);
3096 return RValue::get(nullptr);
3097
3098 case Builtin::BIsqrt:
3099 case Builtin::BIsqrtf:
3100 case Builtin::BIsqrtl:
3101 case Builtin::BI__builtin_sqrt:
3102 case Builtin::BI__builtin_sqrtf:
3103 case Builtin::BI__builtin_sqrtf16:
3104 case Builtin::BI__builtin_sqrtl:
3105 case Builtin::BI__builtin_sqrtf128:
3106 case Builtin::BI__builtin_elementwise_sqrt: {
3108 *this, E, Intrinsic::sqrt, Intrinsic::experimental_constrained_sqrt);
3110 return RValue::get(Call);
3111 }
3112
3113 case Builtin::BItan:
3114 case Builtin::BItanf:
3115 case Builtin::BItanl:
3116 case Builtin::BI__builtin_tan:
3117 case Builtin::BI__builtin_tanf:
3118 case Builtin::BI__builtin_tanf16:
3119 case Builtin::BI__builtin_tanl:
3120 case Builtin::BI__builtin_tanf128:
3121 case Builtin::BI__builtin_elementwise_tan:
3123 *this, E, Intrinsic::tan, Intrinsic::experimental_constrained_tan));
3124
3125 case Builtin::BItanh:
3126 case Builtin::BItanhf:
3127 case Builtin::BItanhl:
3128 case Builtin::BI__builtin_tanh:
3129 case Builtin::BI__builtin_tanhf:
3130 case Builtin::BI__builtin_tanhf16:
3131 case Builtin::BI__builtin_tanhl:
3132 case Builtin::BI__builtin_tanhf128:
3133 case Builtin::BI__builtin_elementwise_tanh:
3135 *this, E, Intrinsic::tanh, Intrinsic::experimental_constrained_tanh));
3136
3137 case Builtin::BItrunc:
3138 case Builtin::BItruncf:
3139 case Builtin::BItruncl:
3140 case Builtin::BI__builtin_trunc:
3141 case Builtin::BI__builtin_truncf:
3142 case Builtin::BI__builtin_truncf16:
3143 case Builtin::BI__builtin_truncl:
3144 case Builtin::BI__builtin_truncf128:
3145 case Builtin::BI__builtin_elementwise_trunc:
3147 Intrinsic::trunc,
3148 Intrinsic::experimental_constrained_trunc));
3149
3150 case Builtin::BIlround:
3151 case Builtin::BIlroundf:
3152 case Builtin::BIlroundl:
3153 case Builtin::BI__builtin_lround:
3154 case Builtin::BI__builtin_lroundf:
3155 case Builtin::BI__builtin_lroundl:
3156 case Builtin::BI__builtin_lroundf128:
3158 *this, E, Intrinsic::lround,
3159 Intrinsic::experimental_constrained_lround));
3160
3161 case Builtin::BIllround:
3162 case Builtin::BIllroundf:
3163 case Builtin::BIllroundl:
3164 case Builtin::BI__builtin_llround:
3165 case Builtin::BI__builtin_llroundf:
3166 case Builtin::BI__builtin_llroundl:
3167 case Builtin::BI__builtin_llroundf128:
3169 *this, E, Intrinsic::llround,
3170 Intrinsic::experimental_constrained_llround));
3171
3172 case Builtin::BIlrint:
3173 case Builtin::BIlrintf:
3174 case Builtin::BIlrintl:
3175 case Builtin::BI__builtin_lrint:
3176 case Builtin::BI__builtin_lrintf:
3177 case Builtin::BI__builtin_lrintl:
3178 case Builtin::BI__builtin_lrintf128:
3180 *this, E, Intrinsic::lrint,
3181 Intrinsic::experimental_constrained_lrint));
3182
3183 case Builtin::BIllrint:
3184 case Builtin::BIllrintf:
3185 case Builtin::BIllrintl:
3186 case Builtin::BI__builtin_llrint:
3187 case Builtin::BI__builtin_llrintf:
3188 case Builtin::BI__builtin_llrintl:
3189 case Builtin::BI__builtin_llrintf128:
3191 *this, E, Intrinsic::llrint,
3192 Intrinsic::experimental_constrained_llrint));
3193 case Builtin::BI__builtin_ldexp:
3194 case Builtin::BI__builtin_ldexpf:
3195 case Builtin::BI__builtin_ldexpl:
3196 case Builtin::BI__builtin_ldexpf16:
3197 case Builtin::BI__builtin_ldexpf128:
3198 case Builtin::BI__builtin_elementwise_ldexp:
3200 *this, E, Intrinsic::ldexp,
3201 Intrinsic::experimental_constrained_ldexp));
3202 default:
3203 break;
3204 }
3205 }
3206
3207 // Check NonnullAttribute/NullabilityArg and Alignment.
3208 auto EmitArgCheck = [&](TypeCheckKind Kind, Address A, const Expr *Arg,
3209 unsigned ParmNum) {
3210 Value *Val = A.emitRawPointer(*this);
3211 EmitNonNullArgCheck(RValue::get(Val), Arg->getType(), Arg->getExprLoc(), FD,
3212 ParmNum);
3213
3214 if (SanOpts.has(SanitizerKind::Alignment)) {
3215 SanitizerSet SkippedChecks;
3216 SkippedChecks.set(SanitizerKind::All);
3217 SkippedChecks.clear(SanitizerKind::Alignment);
3218 SourceLocation Loc = Arg->getExprLoc();
3219 // Strip an implicit cast.
3220 if (auto *CE = dyn_cast<ImplicitCastExpr>(Arg))
3221 if (CE->getCastKind() == CK_BitCast)
3222 Arg = CE->getSubExpr();
3223 EmitTypeCheck(Kind, Loc, Val, Arg->getType(), A.getAlignment(),
3224 SkippedChecks);
3225 }
3226 };
3227
3228 switch (BuiltinIDIfNoAsmLabel) {
3229 default: break;
3230 case Builtin::BI__builtin___CFStringMakeConstantString:
3231 case Builtin::BI__builtin___NSStringMakeConstantString:
3232 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
3233 case Builtin::BI__builtin_stdarg_start:
3234 case Builtin::BI__builtin_va_start:
3235 case Builtin::BI__va_start:
3236 case Builtin::BI__builtin_c23_va_start:
3237 case Builtin::BI__builtin_va_end:
3238 EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
3239 ? EmitScalarExpr(E->getArg(0))
3240 : EmitVAListRef(E->getArg(0)).emitRawPointer(*this),
3241 BuiltinID != Builtin::BI__builtin_va_end);
3242 return RValue::get(nullptr);
3243 case Builtin::BI__builtin_va_copy: {
3244 Value *DstPtr = EmitVAListRef(E->getArg(0)).emitRawPointer(*this);
3245 Value *SrcPtr = EmitVAListRef(E->getArg(1)).emitRawPointer(*this);
3246 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy, {DstPtr->getType()}),
3247 {DstPtr, SrcPtr});
3248 return RValue::get(nullptr);
3249 }
3250 case Builtin::BIabs:
3251 case Builtin::BIlabs:
3252 case Builtin::BIllabs:
3253 case Builtin::BI__builtin_abs:
3254 case Builtin::BI__builtin_labs:
3255 case Builtin::BI__builtin_llabs: {
3256 bool SanitizeOverflow = SanOpts.has(SanitizerKind::SignedIntegerOverflow);
3257
3258 Value *Result;
3259 switch (getLangOpts().getSignedOverflowBehavior()) {
3261 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), false);
3262 break;
3264 if (!SanitizeOverflow) {
3265 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), true);
3266 break;
3267 }
3268 [[fallthrough]];
3270 // TODO: Somehow handle the corner case when the address of abs is taken.
3271 Result = EmitOverflowCheckedAbs(*this, E, SanitizeOverflow);
3272 break;
3273 }
3274 return RValue::get(Result);
3275 }
3276 case Builtin::BI__builtin_complex: {
3277 Value *Real = EmitScalarExpr(E->getArg(0));
3278 Value *Imag = EmitScalarExpr(E->getArg(1));
3279 return RValue::getComplex({Real, Imag});
3280 }
3281 case Builtin::BI__builtin_conj:
3282 case Builtin::BI__builtin_conjf:
3283 case Builtin::BI__builtin_conjl:
3284 case Builtin::BIconj:
3285 case Builtin::BIconjf:
3286 case Builtin::BIconjl: {
3287 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3288 Value *Real = ComplexVal.first;
3289 Value *Imag = ComplexVal.second;
3290 Imag = Builder.CreateFNeg(Imag, "neg");
3291 return RValue::getComplex(std::make_pair(Real, Imag));
3292 }
3293 case Builtin::BI__builtin_creal:
3294 case Builtin::BI__builtin_crealf:
3295 case Builtin::BI__builtin_creall:
3296 case Builtin::BIcreal:
3297 case Builtin::BIcrealf:
3298 case Builtin::BIcreall: {
3299 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3300 return RValue::get(ComplexVal.first);
3301 }
3302
3303 case Builtin::BI__builtin_preserve_access_index: {
3304 // Only enabled preserved access index region when debuginfo
3305 // is available as debuginfo is needed to preserve user-level
3306 // access pattern.
3307 if (!getDebugInfo()) {
3308 CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
3309 return RValue::get(EmitScalarExpr(E->getArg(0)));
3310 }
3311
3312 // Nested builtin_preserve_access_index() not supported
3314 CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
3315 return RValue::get(EmitScalarExpr(E->getArg(0)));
3316 }
3317
3318 IsInPreservedAIRegion = true;
3319 Value *Res = EmitScalarExpr(E->getArg(0));
3320 IsInPreservedAIRegion = false;
3321 return RValue::get(Res);
3322 }
3323
3324 case Builtin::BI__builtin_cimag:
3325 case Builtin::BI__builtin_cimagf:
3326 case Builtin::BI__builtin_cimagl:
3327 case Builtin::BIcimag:
3328 case Builtin::BIcimagf:
3329 case Builtin::BIcimagl: {
3330 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3331 return RValue::get(ComplexVal.second);
3332 }
3333
3334 case Builtin::BI__builtin_clrsb:
3335 case Builtin::BI__builtin_clrsbl:
3336 case Builtin::BI__builtin_clrsbll: {
3337 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
3338 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3339
3340 llvm::Type *ArgType = ArgValue->getType();
3341 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3342
3343 llvm::Type *ResultType = ConvertType(E->getType());
3344 Value *Zero = llvm::Constant::getNullValue(ArgType);
3345 Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
3346 Value *Inverse = Builder.CreateNot(ArgValue, "not");
3347 Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
3348 Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
3349 Value *Result =
3350 Builder.CreateNUWSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
3351 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3352 "cast");
3353 return RValue::get(Result);
3354 }
3355 case Builtin::BI__builtin_ctzs:
3356 case Builtin::BI__builtin_ctz:
3357 case Builtin::BI__builtin_ctzl:
3358 case Builtin::BI__builtin_ctzll:
3359 case Builtin::BI__builtin_ctzg:
3360 case Builtin::BI__builtin_elementwise_ctzg: {
3361 bool HasFallback =
3362 (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_ctzg ||
3363 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_ctzg) &&
3364 E->getNumArgs() > 1;
3365
3366 Value *ArgValue =
3367 HasFallback ? EmitBitCountExpr(*this, E->getArg(0))
3369
3370 llvm::Type *ArgType = ArgValue->getType();
3371 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
3372
3373 llvm::Type *ResultType = ConvertType(E->getType());
3374 // The elementwise builtins always exhibit zero-is-undef behaviour
3375 Value *ZeroUndef = Builder.getInt1(
3376 HasFallback || getTarget().isCLZForZeroUndef() ||
3377 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_ctzg);
3378 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
3379 if (Result->getType() != ResultType)
3380 Result =
3381 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3382 if (!HasFallback)
3383 return RValue::get(Result);
3384
3385 Value *Zero = Constant::getNullValue(ArgType);
3386 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3387 Value *FallbackValue = EmitScalarExpr(E->getArg(1));
3388 Value *ResultOrFallback =
3389 Builder.CreateSelect(IsZero, FallbackValue, Result, "ctzg");
3390 return RValue::get(ResultOrFallback);
3391 }
3392 case Builtin::BI__builtin_clzs:
3393 case Builtin::BI__builtin_clz:
3394 case Builtin::BI__builtin_clzl:
3395 case Builtin::BI__builtin_clzll:
3396 case Builtin::BI__builtin_clzg:
3397 case Builtin::BI__builtin_elementwise_clzg: {
3398 bool HasFallback =
3399 (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_clzg ||
3400 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_clzg) &&
3401 E->getNumArgs() > 1;
3402
3403 Value *ArgValue =
3404 HasFallback ? EmitBitCountExpr(*this, E->getArg(0))
3406
3407 llvm::Type *ArgType = ArgValue->getType();
3408 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3409
3410 llvm::Type *ResultType = ConvertType(E->getType());
3411 // The elementwise builtins always exhibit zero-is-undef behaviour
3412 Value *ZeroUndef = Builder.getInt1(
3413 HasFallback || getTarget().isCLZForZeroUndef() ||
3414 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_clzg);
3415 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
3416 if (Result->getType() != ResultType)
3417 Result =
3418 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3419 if (!HasFallback)
3420 return RValue::get(Result);
3421
3422 Value *Zero = Constant::getNullValue(ArgType);
3423 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3424 Value *FallbackValue = EmitScalarExpr(E->getArg(1));
3425 Value *ResultOrFallback =
3426 Builder.CreateSelect(IsZero, FallbackValue, Result, "clzg");
3427 return RValue::get(ResultOrFallback);
3428 }
3429 case Builtin::BI__builtin_ffs:
3430 case Builtin::BI__builtin_ffsl:
3431 case Builtin::BI__builtin_ffsll: {
3432 // ffs(x) -> x ? cttz(x) + 1 : 0
3433 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3434
3435 llvm::Type *ArgType = ArgValue->getType();
3436 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
3437
3438 llvm::Type *ResultType = ConvertType(E->getType());
3439 Value *Tmp =
3440 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
3441 llvm::ConstantInt::get(ArgType, 1));
3442 Value *Zero = llvm::Constant::getNullValue(ArgType);
3443 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3444 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
3445 if (Result->getType() != ResultType)
3446 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3447 "cast");
3448 return RValue::get(Result);
3449 }
3450 case Builtin::BI__builtin_parity:
3451 case Builtin::BI__builtin_parityl:
3452 case Builtin::BI__builtin_parityll: {
3453 // parity(x) -> ctpop(x) & 1
3454 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3455
3456 llvm::Type *ArgType = ArgValue->getType();
3457 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3458
3459 llvm::Type *ResultType = ConvertType(E->getType());
3460 Value *Tmp = Builder.CreateCall(F, ArgValue);
3461 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
3462 if (Result->getType() != ResultType)
3463 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3464 "cast");
3465 return RValue::get(Result);
3466 }
3467 case Builtin::BI__lzcnt16:
3468 case Builtin::BI__lzcnt:
3469 case Builtin::BI__lzcnt64: {
3470 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3471
3472 llvm::Type *ArgType = ArgValue->getType();
3473 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3474
3475 llvm::Type *ResultType = ConvertType(E->getType());
3476 Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
3477 if (Result->getType() != ResultType)
3478 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3479 "cast");
3480 return RValue::get(Result);
3481 }
3482 case Builtin::BI__popcnt16:
3483 case Builtin::BI__popcnt:
3484 case Builtin::BI__popcnt64:
3485 case Builtin::BI__builtin_popcount:
3486 case Builtin::BI__builtin_popcountl:
3487 case Builtin::BI__builtin_popcountll:
3488 case Builtin::BI__builtin_popcountg: {
3489 Value *ArgValue = EmitBitCountExpr(*this, E->getArg(0));
3490
3491 llvm::Type *ArgType = ArgValue->getType();
3492 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3493
3494 llvm::Type *ResultType = ConvertType(E->getType());
3495 Value *Result = Builder.CreateCall(F, ArgValue);
3496 if (Result->getType() != ResultType)
3497 Result =
3498 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3499 return RValue::get(Result);
3500 }
3501 case Builtin::BI__builtin_unpredictable: {
3502 // Always return the argument of __builtin_unpredictable. LLVM does not
3503 // handle this builtin. Metadata for this builtin should be added directly
3504 // to instructions such as branches or switches that use it.
3505 return RValue::get(EmitScalarExpr(E->getArg(0)));
3506 }
3507 case Builtin::BI__builtin_expect: {
3508 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3509 llvm::Type *ArgType = ArgValue->getType();
3510
3511 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
3512 // Don't generate llvm.expect on -O0 as the backend won't use it for
3513 // anything.
3514 // Note, we still IRGen ExpectedValue because it could have side-effects.
3515 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3516 return RValue::get(ArgValue);
3517
3518 Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
3519 Value *Result =
3520 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
3521 return RValue::get(Result);
3522 }
3523 case Builtin::BI__builtin_expect_with_probability: {
3524 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3525 llvm::Type *ArgType = ArgValue->getType();
3526
3527 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
3528 llvm::APFloat Probability(0.0);
3529 const Expr *ProbArg = E->getArg(2);
3530 bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
3531 assert(EvalSucceed && "probability should be able to evaluate as float");
3532 (void)EvalSucceed;
3533 bool LoseInfo = false;
3534 Probability.convert(llvm::APFloat::IEEEdouble(),
3535 llvm::RoundingMode::Dynamic, &LoseInfo);
3536 llvm::Type *Ty = ConvertType(ProbArg->getType());
3537 Constant *Confidence = ConstantFP::get(Ty, Probability);
3538 // Don't generate llvm.expect.with.probability on -O0 as the backend
3539 // won't use it for anything.
3540 // Note, we still IRGen ExpectedValue because it could have side-effects.
3541 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3542 return RValue::get(ArgValue);
3543
3544 Function *FnExpect =
3545 CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
3546 Value *Result = Builder.CreateCall(
3547 FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
3548 return RValue::get(Result);
3549 }
3550 case Builtin::BI__builtin_assume_aligned: {
3551 const Expr *Ptr = E->getArg(0);
3552 Value *PtrValue = EmitScalarExpr(Ptr);
3553 Value *OffsetValue =
3554 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
3555
3556 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
3557 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
3558 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
3559 AlignmentCI = ConstantInt::get(AlignmentCI->getIntegerType(),
3560 llvm::Value::MaximumAlignment);
3561
3562 emitAlignmentAssumption(PtrValue, Ptr,
3563 /*The expr loc is sufficient.*/ SourceLocation(),
3564 AlignmentCI, OffsetValue);
3565 return RValue::get(PtrValue);
3566 }
3567 case Builtin::BI__builtin_assume_dereferenceable: {
3568 const Expr *Ptr = E->getArg(0);
3569 const Expr *Size = E->getArg(1);
3570 Value *PtrValue = EmitScalarExpr(Ptr);
3571 Value *SizeValue = EmitScalarExpr(Size);
3572 if (SizeValue->getType() != IntPtrTy)
3573 SizeValue =
3574 Builder.CreateIntCast(SizeValue, IntPtrTy, false, "casted.size");
3575 Builder.CreateDereferenceableAssumption(PtrValue, SizeValue);
3576 return RValue::get(nullptr);
3577 }
3578 case Builtin::BI__assume:
3579 case Builtin::BI__builtin_assume: {
3580 if (E->getArg(0)->HasSideEffects(getContext()))
3581 return RValue::get(nullptr);
3582
3583 Value *ArgValue = EmitCheckedArgForAssume(E->getArg(0));
3584 Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
3585 Builder.CreateCall(FnAssume, ArgValue);
3586 return RValue::get(nullptr);
3587 }
3588 case Builtin::BI__builtin_assume_separate_storage: {
3589 const Expr *Arg0 = E->getArg(0);
3590 const Expr *Arg1 = E->getArg(1);
3591
3592 Value *Value0 = EmitScalarExpr(Arg0);
3593 Value *Value1 = EmitScalarExpr(Arg1);
3594
3595 Value *Values[] = {Value0, Value1};
3596 OperandBundleDefT<Value *> OBD("separate_storage", Values);
3597 Builder.CreateAssumption(ConstantInt::getTrue(getLLVMContext()), {OBD});
3598 return RValue::get(nullptr);
3599 }
3600 case Builtin::BI__builtin_allow_runtime_check: {
3601 StringRef Kind =
3602 cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts())->getString();
3603 LLVMContext &Ctx = CGM.getLLVMContext();
3604 llvm::Value *Allow = Builder.CreateCall(
3605 CGM.getIntrinsic(Intrinsic::allow_runtime_check),
3606 llvm::MetadataAsValue::get(Ctx, llvm::MDString::get(Ctx, Kind)));
3607 return RValue::get(Allow);
3608 }
3609 case Builtin::BI__builtin_allow_sanitize_check: {
3610 Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
3611 StringRef Name =
3612 cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts())->getString();
3613
3614 // We deliberately allow the use of kernel- and non-kernel names
3615 // interchangably, even when one or the other is enabled. This is consistent
3616 // with the no_sanitize-attribute, which allows either kernel- or non-kernel
3617 // name to disable instrumentation (see CodeGenFunction::StartFunction).
3618 if (getLangOpts().Sanitize.hasOneOf(SanitizerKind::Address |
3619 SanitizerKind::KernelAddress) &&
3620 (Name == "address" || Name == "kernel-address")) {
3621 IntrID = Intrinsic::allow_sanitize_address;
3622 } else if (getLangOpts().Sanitize.has(SanitizerKind::Thread) &&
3623 Name == "thread") {
3624 IntrID = Intrinsic::allow_sanitize_thread;
3625 } else if (getLangOpts().Sanitize.hasOneOf(SanitizerKind::Memory |
3626 SanitizerKind::KernelMemory) &&
3627 (Name == "memory" || Name == "kernel-memory")) {
3628 IntrID = Intrinsic::allow_sanitize_memory;
3629 } else if (getLangOpts().Sanitize.hasOneOf(
3630 SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress) &&
3631 (Name == "hwaddress" || Name == "kernel-hwaddress")) {
3632 IntrID = Intrinsic::allow_sanitize_hwaddress;
3633 }
3634
3635 if (IntrID != Intrinsic::not_intrinsic) {
3636 llvm::Value *Allow = Builder.CreateCall(CGM.getIntrinsic(IntrID));
3637 return RValue::get(Allow);
3638 }
3639 // If the checked sanitizer is not enabled, we can safely lower to false
3640 // right away. This is also more efficient, since the LowerAllowCheckPass
3641 // must not always be enabled if none of the above sanitizers are enabled.
3642 return RValue::get(Builder.getFalse());
3643 }
3644 case Builtin::BI__arithmetic_fence: {
3645 // Create the builtin call if FastMath is selected, and the target
3646 // supports the builtin, otherwise just return the argument.
3647 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3648 llvm::FastMathFlags FMF = Builder.getFastMathFlags();
3649 bool isArithmeticFenceEnabled =
3650 FMF.allowReassoc() &&
3652 QualType ArgType = E->getArg(0)->getType();
3653 if (ArgType->isComplexType()) {
3654 if (isArithmeticFenceEnabled) {
3655 QualType ElementType = ArgType->castAs<ComplexType>()->getElementType();
3656 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3657 Value *Real = Builder.CreateArithmeticFence(ComplexVal.first,
3658 ConvertType(ElementType));
3659 Value *Imag = Builder.CreateArithmeticFence(ComplexVal.second,
3660 ConvertType(ElementType));
3661 return RValue::getComplex(std::make_pair(Real, Imag));
3662 }
3663 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3664 Value *Real = ComplexVal.first;
3665 Value *Imag = ComplexVal.second;
3666 return RValue::getComplex(std::make_pair(Real, Imag));
3667 }
3668 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3669 if (isArithmeticFenceEnabled)
3670 return RValue::get(
3671 Builder.CreateArithmeticFence(ArgValue, ConvertType(ArgType)));
3672 return RValue::get(ArgValue);
3673 }
3674 case Builtin::BI__builtin_bswapg: {
3675 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3676 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(ArgValue->getType());
3677 assert(IntTy && "LLVM's __builtin_bswapg only supports integer variants");
3678 if (IntTy->getBitWidth() == 1 || IntTy->getBitWidth() == 8)
3679 return RValue::get(ArgValue);
3680 assert(((IntTy->getBitWidth() % 16 == 0 && IntTy->getBitWidth() != 0)) &&
3681 "LLVM's __builtin_bswapg only supports integer variants that has a "
3682 "multiple of 16 bits as well as a single byte");
3683 return RValue::get(
3684 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bswap));
3685 }
3686 case Builtin::BI__builtin_bswap16:
3687 case Builtin::BI__builtin_bswap32:
3688 case Builtin::BI__builtin_bswap64:
3689 case Builtin::BI_byteswap_ushort:
3690 case Builtin::BI_byteswap_ulong:
3691 case Builtin::BI_byteswap_uint64: {
3692 return RValue::get(
3693 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bswap));
3694 }
3695 case Builtin::BI__builtin_bitreverseg: {
3696 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3697 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(ArgValue->getType());
3698 assert(IntTy &&
3699 "LLVM's __builtin_bitreverseg only support integer variants");
3700 if (IntTy->getBitWidth() == 1)
3701 return RValue::get(ArgValue);
3702 return RValue::get(
3703 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bitreverse));
3704 }
3705 case Builtin::BI__builtin_bitreverse8:
3706 case Builtin::BI__builtin_bitreverse16:
3707 case Builtin::BI__builtin_bitreverse32:
3708 case Builtin::BI__builtin_bitreverse64: {
3709 return RValue::get(
3710 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bitreverse));
3711 }
3712 case Builtin::BI__builtin_rotateleft8:
3713 case Builtin::BI__builtin_rotateleft16:
3714 case Builtin::BI__builtin_rotateleft32:
3715 case Builtin::BI__builtin_rotateleft64:
3716 case Builtin::BI__builtin_stdc_rotate_left:
3717 case Builtin::BI_rotl8: // Microsoft variants of rotate left
3718 case Builtin::BI_rotl16:
3719 case Builtin::BI_rotl:
3720 case Builtin::BI_lrotl:
3721 case Builtin::BI_rotl64:
3722 return emitRotate(E, false);
3723
3724 case Builtin::BI__builtin_rotateright8:
3725 case Builtin::BI__builtin_rotateright16:
3726 case Builtin::BI__builtin_rotateright32:
3727 case Builtin::BI__builtin_rotateright64:
3728 case Builtin::BI__builtin_stdc_rotate_right:
3729 case Builtin::BI_rotr8: // Microsoft variants of rotate right
3730 case Builtin::BI_rotr16:
3731 case Builtin::BI_rotr:
3732 case Builtin::BI_lrotr:
3733 case Builtin::BI_rotr64:
3734 return emitRotate(E, true);
3735
3736 case Builtin::BI__builtin_constant_p: {
3737 llvm::Type *ResultType = ConvertType(E->getType());
3738
3739 const Expr *Arg = E->getArg(0);
3740 QualType ArgType = Arg->getType();
3741 // FIXME: The allowance for Obj-C pointers and block pointers is historical
3742 // and likely a mistake.
3743 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
3744 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
3745 // Per the GCC documentation, only numeric constants are recognized after
3746 // inlining.
3747 return RValue::get(ConstantInt::get(ResultType, 0));
3748
3749 if (Arg->HasSideEffects(getContext()))
3750 // The argument is unevaluated, so be conservative if it might have
3751 // side-effects.
3752 return RValue::get(ConstantInt::get(ResultType, 0));
3753
3754 Value *ArgValue = EmitScalarExpr(Arg);
3755 if (ArgType->isObjCObjectPointerType()) {
3756 // Convert Objective-C objects to id because we cannot distinguish between
3757 // LLVM types for Obj-C classes as they are opaque.
3758 ArgType = CGM.getContext().getObjCIdType();
3759 ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
3760 }
3761 Function *F =
3762 CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
3763 Value *Result = Builder.CreateCall(F, ArgValue);
3764 if (Result->getType() != ResultType)
3765 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
3766 return RValue::get(Result);
3767 }
3768 case Builtin::BI__builtin_dynamic_object_size:
3769 case Builtin::BI__builtin_object_size: {
3770 unsigned Type =
3771 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
3772 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
3773
3774 // We pass this builtin onto the optimizer so that it can figure out the
3775 // object size in more complex cases.
3776 bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
3777 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
3778 /*EmittedE=*/nullptr, IsDynamic));
3779 }
3780 case Builtin::BI__builtin_counted_by_ref: {
3781 // Default to returning '(void *) 0'.
3782 llvm::Value *Result = llvm::ConstantPointerNull::get(
3783 llvm::PointerType::getUnqual(getLLVMContext()));
3784
3785 const Expr *Arg = E->getArg(0)->IgnoreParenImpCasts();
3786
3787 if (auto *UO = dyn_cast<UnaryOperator>(Arg);
3788 UO && UO->getOpcode() == UO_AddrOf) {
3789 Arg = UO->getSubExpr()->IgnoreParenImpCasts();
3790
3791 if (auto *ASE = dyn_cast<ArraySubscriptExpr>(Arg))
3792 Arg = ASE->getBase()->IgnoreParenImpCasts();
3793 }
3794
3795 if (const MemberExpr *ME = dyn_cast_if_present<MemberExpr>(Arg)) {
3796 if (auto *CATy =
3798 CATy && CATy->getKind() == CountAttributedType::CountedBy) {
3799 const auto *MemberDecl = cast<FieldDecl>(ME->getMemberDecl());
3800 if (const FieldDecl *CountFD = MemberDecl->findCountedByField())
3801 Result = GetCountedByFieldExprGEP(Arg, MemberDecl, CountFD);
3802 else
3803 llvm::report_fatal_error("Cannot find the counted_by 'count' field");
3804 }
3805 }
3806
3807 return RValue::get(Result);
3808 }
3809 case Builtin::BI__builtin_prefetch: {
3810 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
3811 // FIXME: Technically these constants should of type 'int', yes?
3812 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
3813 llvm::ConstantInt::get(Int32Ty, 0);
3814 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
3815 llvm::ConstantInt::get(Int32Ty, 3);
3816 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
3817 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
3818 Builder.CreateCall(F, {Address, RW, Locality, Data});
3819 return RValue::get(nullptr);
3820 }
3821 case Builtin::BI__builtin_readcyclecounter: {
3822 Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
3823 return RValue::get(Builder.CreateCall(F));
3824 }
3825 case Builtin::BI__builtin_readsteadycounter: {
3826 Function *F = CGM.getIntrinsic(Intrinsic::readsteadycounter);
3827 return RValue::get(Builder.CreateCall(F));
3828 }
3829 case Builtin::BI__builtin___clear_cache: {
3830 Value *Begin = EmitScalarExpr(E->getArg(0));
3831 Value *End = EmitScalarExpr(E->getArg(1));
3832 Function *F = CGM.getIntrinsic(Intrinsic::clear_cache);
3833 return RValue::get(Builder.CreateCall(F, {Begin, End}));
3834 }
3835 case Builtin::BI__builtin_trap:
3836 EmitTrapCall(Intrinsic::trap);
3837 return RValue::get(nullptr);
3838 case Builtin::BI__builtin_verbose_trap: {
3839 llvm::DILocation *TrapLocation = Builder.getCurrentDebugLocation();
3840 if (getDebugInfo()) {
3841 TrapLocation = getDebugInfo()->CreateTrapFailureMessageFor(
3842 TrapLocation, *E->getArg(0)->tryEvaluateString(getContext()),
3844 }
3845 ApplyDebugLocation ApplyTrapDI(*this, TrapLocation);
3846 // Currently no attempt is made to prevent traps from being merged.
3847 EmitTrapCall(Intrinsic::trap);
3848 return RValue::get(nullptr);
3849 }
3850 case Builtin::BI__debugbreak:
3851 EmitTrapCall(Intrinsic::debugtrap);
3852 return RValue::get(nullptr);
3853 case Builtin::BI__builtin_unreachable: {
3855
3856 // We do need to preserve an insertion point.
3857 EmitBlock(createBasicBlock("unreachable.cont"));
3858
3859 return RValue::get(nullptr);
3860 }
3861
3862 case Builtin::BI__builtin_powi:
3863 case Builtin::BI__builtin_powif:
3864 case Builtin::BI__builtin_powil: {
3865 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
3866 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
3867
3868 if (Builder.getIsFPConstrained()) {
3869 // FIXME: llvm.powi has 2 mangling types,
3870 // llvm.experimental.constrained.powi has one.
3871 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3872 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi,
3873 Src0->getType());
3874 return RValue::get(Builder.CreateConstrainedFPCall(F, { Src0, Src1 }));
3875 }
3876
3877 Function *F = CGM.getIntrinsic(Intrinsic::powi,
3878 { Src0->getType(), Src1->getType() });
3879 return RValue::get(Builder.CreateCall(F, { Src0, Src1 }));
3880 }
3881 case Builtin::BI__builtin_frexpl: {
3882 // Linux PPC will not be adding additional PPCDoubleDouble support.
3883 // WIP to switch default to IEEE long double. Will emit libcall for
3884 // frexpl instead of legalizing this type in the BE.
3885 if (&getTarget().getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble())
3886 break;
3887 [[fallthrough]];
3888 }
3889 case Builtin::BI__builtin_frexp:
3890 case Builtin::BI__builtin_frexpf:
3891 case Builtin::BI__builtin_frexpf128:
3892 case Builtin::BI__builtin_frexpf16:
3893 return RValue::get(emitFrexpBuiltin(*this, E, Intrinsic::frexp));
3894 case Builtin::BImodf:
3895 case Builtin::BImodff:
3896 case Builtin::BImodfl:
3897 case Builtin::BI__builtin_modf:
3898 case Builtin::BI__builtin_modff:
3899 case Builtin::BI__builtin_modfl:
3900 if (Builder.getIsFPConstrained())
3901 break; // TODO: Emit constrained modf intrinsic once one exists.
3902 return RValue::get(emitModfBuiltin(*this, E, Intrinsic::modf));
3903 case Builtin::BI__builtin_isgreater:
3904 case Builtin::BI__builtin_isgreaterequal:
3905 case Builtin::BI__builtin_isless:
3906 case Builtin::BI__builtin_islessequal:
3907 case Builtin::BI__builtin_islessgreater:
3908 case Builtin::BI__builtin_isunordered: {
3909 // Ordered comparisons: we know the arguments to these are matching scalar
3910 // floating point values.
3911 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3912 Value *LHS = EmitScalarExpr(E->getArg(0));
3913 Value *RHS = EmitScalarExpr(E->getArg(1));
3914
3915 switch (BuiltinID) {
3916 default: llvm_unreachable("Unknown ordered comparison");
3917 case Builtin::BI__builtin_isgreater:
3918 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
3919 break;
3920 case Builtin::BI__builtin_isgreaterequal:
3921 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
3922 break;
3923 case Builtin::BI__builtin_isless:
3924 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
3925 break;
3926 case Builtin::BI__builtin_islessequal:
3927 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
3928 break;
3929 case Builtin::BI__builtin_islessgreater:
3930 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
3931 break;
3932 case Builtin::BI__builtin_isunordered:
3933 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
3934 break;
3935 }
3936 // ZExt bool to int type.
3937 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
3938 }
3939
3940 case Builtin::BI__builtin_isnan: {
3941 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3942 Value *V = EmitScalarExpr(E->getArg(0));
3943 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3944 return RValue::get(Result);
3945 return RValue::get(
3946 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNan),
3947 ConvertType(E->getType())));
3948 }
3949
3950 case Builtin::BI__builtin_issignaling: {
3951 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3952 Value *V = EmitScalarExpr(E->getArg(0));
3953 return RValue::get(
3954 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSNan),
3955 ConvertType(E->getType())));
3956 }
3957
3958 case Builtin::BI__builtin_isinf: {
3959 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3960 Value *V = EmitScalarExpr(E->getArg(0));
3961 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3962 return RValue::get(Result);
3963 return RValue::get(
3964 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcInf),
3965 ConvertType(E->getType())));
3966 }
3967
3968 case Builtin::BIfinite:
3969 case Builtin::BI__finite:
3970 case Builtin::BIfinitef:
3971 case Builtin::BI__finitef:
3972 case Builtin::BIfinitel:
3973 case Builtin::BI__finitel:
3974 case Builtin::BI__builtin_isfinite: {
3975 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3976 Value *V = EmitScalarExpr(E->getArg(0));
3977 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3978 return RValue::get(Result);
3979 return RValue::get(
3980 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcFinite),
3981 ConvertType(E->getType())));
3982 }
3983
3984 case Builtin::BI__builtin_isnormal: {
3985 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3986 Value *V = EmitScalarExpr(E->getArg(0));
3987 return RValue::get(
3988 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNormal),
3989 ConvertType(E->getType())));
3990 }
3991
3992 case Builtin::BI__builtin_issubnormal: {
3993 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3994 Value *V = EmitScalarExpr(E->getArg(0));
3995 return RValue::get(
3996 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSubnormal),
3997 ConvertType(E->getType())));
3998 }
3999
4000 case Builtin::BI__builtin_iszero: {
4001 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4002 Value *V = EmitScalarExpr(E->getArg(0));
4003 return RValue::get(
4004 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcZero),
4005 ConvertType(E->getType())));
4006 }
4007
4008 case Builtin::BI__builtin_isfpclass: {
4010 if (!E->getArg(1)->EvaluateAsInt(Result, CGM.getContext()))
4011 break;
4012 uint64_t Test = Result.Val.getInt().getLimitedValue();
4013 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4014 Value *V = EmitScalarExpr(E->getArg(0));
4015 return RValue::get(Builder.CreateZExt(Builder.createIsFPClass(V, Test),
4016 ConvertType(E->getType())));
4017 }
4018
4019 case Builtin::BI__builtin_nondeterministic_value: {
4020 llvm::Type *Ty = ConvertType(E->getArg(0)->getType());
4021
4022 Value *Result = PoisonValue::get(Ty);
4023 Result = Builder.CreateFreeze(Result);
4024
4025 return RValue::get(Result);
4026 }
4027
4028 case Builtin::BI__builtin_elementwise_abs: {
4029 Value *Result;
4030 QualType QT = E->getArg(0)->getType();
4031
4032 if (auto *VecTy = QT->getAs<VectorType>())
4033 QT = VecTy->getElementType();
4034 if (QT->isIntegerType())
4035 Result = Builder.CreateBinaryIntrinsic(
4036 Intrinsic::abs, EmitScalarExpr(E->getArg(0)), Builder.getFalse(),
4037 nullptr, "elt.abs");
4038 else
4039 Result = emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::fabs,
4040 "elt.abs");
4041
4042 return RValue::get(Result);
4043 }
4044 case Builtin::BI__builtin_elementwise_bitreverse:
4046 *this, E, Intrinsic::bitreverse, "elt.bitreverse"));
4047 case Builtin::BI__builtin_elementwise_popcount:
4049 *this, E, Intrinsic::ctpop, "elt.ctpop"));
4050 case Builtin::BI__builtin_elementwise_canonicalize:
4052 *this, E, Intrinsic::canonicalize, "elt.canonicalize"));
4053 case Builtin::BI__builtin_elementwise_copysign:
4054 return RValue::get(
4055 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::copysign));
4056 case Builtin::BI__builtin_elementwise_fshl:
4057 return RValue::get(
4058 emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fshl));
4059 case Builtin::BI__builtin_elementwise_fshr:
4060 return RValue::get(
4061 emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fshr));
4062
4063 case Builtin::BI__builtin_elementwise_add_sat:
4064 case Builtin::BI__builtin_elementwise_sub_sat: {
4065 Value *Op0 = EmitScalarExpr(E->getArg(0));
4066 Value *Op1 = EmitScalarExpr(E->getArg(1));
4067 Value *Result;
4068 assert(Op0->getType()->isIntOrIntVectorTy() && "integer type expected");
4069 QualType Ty = E->getArg(0)->getType();
4070 if (auto *VecTy = Ty->getAs<VectorType>())
4071 Ty = VecTy->getElementType();
4072 bool IsSigned = Ty->isSignedIntegerType();
4073 unsigned Opc;
4074 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_add_sat)
4075 Opc = IsSigned ? Intrinsic::sadd_sat : Intrinsic::uadd_sat;
4076 else
4077 Opc = IsSigned ? Intrinsic::ssub_sat : Intrinsic::usub_sat;
4078 Result = Builder.CreateBinaryIntrinsic(Opc, Op0, Op1, nullptr, "elt.sat");
4079 return RValue::get(Result);
4080 }
4081
4082 case Builtin::BI__builtin_elementwise_max: {
4083 Value *Op0 = EmitScalarExpr(E->getArg(0));
4084 Value *Op1 = EmitScalarExpr(E->getArg(1));
4085 Value *Result;
4086 if (Op0->getType()->isIntOrIntVectorTy()) {
4087 QualType Ty = E->getArg(0)->getType();
4088 if (auto *VecTy = Ty->getAs<VectorType>())
4089 Ty = VecTy->getElementType();
4090 Result = Builder.CreateBinaryIntrinsic(
4091 Ty->isSignedIntegerType() ? Intrinsic::smax : Intrinsic::umax, Op0,
4092 Op1, nullptr, "elt.max");
4093 } else
4094 Result = Builder.CreateMaxNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.max");
4095 return RValue::get(Result);
4096 }
4097 case Builtin::BI__builtin_elementwise_min: {
4098 Value *Op0 = EmitScalarExpr(E->getArg(0));
4099 Value *Op1 = EmitScalarExpr(E->getArg(1));
4100 Value *Result;
4101 if (Op0->getType()->isIntOrIntVectorTy()) {
4102 QualType Ty = E->getArg(0)->getType();
4103 if (auto *VecTy = Ty->getAs<VectorType>())
4104 Ty = VecTy->getElementType();
4105 Result = Builder.CreateBinaryIntrinsic(
4106 Ty->isSignedIntegerType() ? Intrinsic::smin : Intrinsic::umin, Op0,
4107 Op1, nullptr, "elt.min");
4108 } else
4109 Result = Builder.CreateMinNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.min");
4110 return RValue::get(Result);
4111 }
4112
4113 case Builtin::BI__builtin_elementwise_maxnum: {
4114 Value *Op0 = EmitScalarExpr(E->getArg(0));
4115 Value *Op1 = EmitScalarExpr(E->getArg(1));
4116 Value *Result = Builder.CreateBinaryIntrinsic(llvm::Intrinsic::maxnum, Op0,
4117 Op1, nullptr, "elt.maxnum");
4118 return RValue::get(Result);
4119 }
4120
4121 case Builtin::BI__builtin_elementwise_minnum: {
4122 Value *Op0 = EmitScalarExpr(E->getArg(0));
4123 Value *Op1 = EmitScalarExpr(E->getArg(1));
4124 Value *Result = Builder.CreateBinaryIntrinsic(llvm::Intrinsic::minnum, Op0,
4125 Op1, nullptr, "elt.minnum");
4126 return RValue::get(Result);
4127 }
4128
4129 case Builtin::BI__builtin_elementwise_maximum: {
4130 Value *Op0 = EmitScalarExpr(E->getArg(0));
4131 Value *Op1 = EmitScalarExpr(E->getArg(1));
4132 Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::maximum, Op0, Op1,
4133 nullptr, "elt.maximum");
4134 return RValue::get(Result);
4135 }
4136
4137 case Builtin::BI__builtin_elementwise_minimum: {
4138 Value *Op0 = EmitScalarExpr(E->getArg(0));
4139 Value *Op1 = EmitScalarExpr(E->getArg(1));
4140 Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::minimum, Op0, Op1,
4141 nullptr, "elt.minimum");
4142 return RValue::get(Result);
4143 }
4144
4145 case Builtin::BI__builtin_elementwise_maximumnum: {
4146 Value *Op0 = EmitScalarExpr(E->getArg(0));
4147 Value *Op1 = EmitScalarExpr(E->getArg(1));
4148 Value *Result = Builder.CreateBinaryIntrinsic(
4149 Intrinsic::maximumnum, Op0, Op1, nullptr, "elt.maximumnum");
4150 return RValue::get(Result);
4151 }
4152
4153 case Builtin::BI__builtin_elementwise_minimumnum: {
4154 Value *Op0 = EmitScalarExpr(E->getArg(0));
4155 Value *Op1 = EmitScalarExpr(E->getArg(1));
4156 Value *Result = Builder.CreateBinaryIntrinsic(
4157 Intrinsic::minimumnum, Op0, Op1, nullptr, "elt.minimumnum");
4158 return RValue::get(Result);
4159 }
4160
4161 case Builtin::BI__builtin_reduce_max: {
4162 auto GetIntrinsicID = [this](QualType QT) {
4163 if (auto *VecTy = QT->getAs<VectorType>())
4164 QT = VecTy->getElementType();
4165 else if (QT->isSizelessVectorType())
4166 QT = QT->getSizelessVectorEltType(CGM.getContext());
4167
4168 if (QT->isSignedIntegerType())
4169 return Intrinsic::vector_reduce_smax;
4170 if (QT->isUnsignedIntegerType())
4171 return Intrinsic::vector_reduce_umax;
4172 assert(QT->isFloatingType() && "must have a float here");
4173 return Intrinsic::vector_reduce_fmax;
4174 };
4176 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
4177 }
4178
4179 case Builtin::BI__builtin_reduce_min: {
4180 auto GetIntrinsicID = [this](QualType QT) {
4181 if (auto *VecTy = QT->getAs<VectorType>())
4182 QT = VecTy->getElementType();
4183 else if (QT->isSizelessVectorType())
4184 QT = QT->getSizelessVectorEltType(CGM.getContext());
4185
4186 if (QT->isSignedIntegerType())
4187 return Intrinsic::vector_reduce_smin;
4188 if (QT->isUnsignedIntegerType())
4189 return Intrinsic::vector_reduce_umin;
4190 assert(QT->isFloatingType() && "must have a float here");
4191 return Intrinsic::vector_reduce_fmin;
4192 };
4193
4195 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
4196 }
4197
4198 case Builtin::BI__builtin_reduce_add:
4200 *this, E, Intrinsic::vector_reduce_add, "rdx.add"));
4201 case Builtin::BI__builtin_reduce_mul:
4203 *this, E, Intrinsic::vector_reduce_mul, "rdx.mul"));
4204 case Builtin::BI__builtin_reduce_xor:
4206 *this, E, Intrinsic::vector_reduce_xor, "rdx.xor"));
4207 case Builtin::BI__builtin_reduce_or:
4209 *this, E, Intrinsic::vector_reduce_or, "rdx.or"));
4210 case Builtin::BI__builtin_reduce_and:
4212 *this, E, Intrinsic::vector_reduce_and, "rdx.and"));
4213 case Builtin::BI__builtin_reduce_maximum:
4215 *this, E, Intrinsic::vector_reduce_fmaximum, "rdx.maximum"));
4216 case Builtin::BI__builtin_reduce_minimum:
4218 *this, E, Intrinsic::vector_reduce_fminimum, "rdx.minimum"));
4219 case Builtin::BI__builtin_reduce_assoc_fadd:
4220 case Builtin::BI__builtin_reduce_in_order_fadd: {
4221 llvm::Value *Vector = EmitScalarExpr(E->getArg(0));
4222 llvm::Type *ScalarTy = Vector->getType()->getScalarType();
4223 llvm::Value *StartValue = nullptr;
4224 if (E->getNumArgs() == 2)
4225 StartValue = Builder.CreateFPCast(EmitScalarExpr(E->getArg(1)), ScalarTy);
4226 llvm::Value *Args[] = {/*start_value=*/StartValue
4227 ? StartValue
4228 : llvm::ConstantFP::get(ScalarTy, -0.0F),
4229 /*vector=*/Vector};
4230 llvm::Function *F =
4231 CGM.getIntrinsic(Intrinsic::vector_reduce_fadd, Vector->getType());
4232 llvm::CallBase *Reduce = Builder.CreateCall(F, Args, "rdx.addf");
4233 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_reduce_assoc_fadd) {
4234 // `__builtin_reduce_assoc_fadd` is an associative reduction which
4235 // requires the reassoc FMF flag.
4236 llvm::FastMathFlags FMF;
4237 FMF.setAllowReassoc();
4238 cast<llvm::CallBase>(Reduce)->setFastMathFlags(FMF);
4239 }
4240 return RValue::get(Reduce);
4241 }
4242
4243 case Builtin::BI__builtin_matrix_transpose: {
4244 auto *MatrixTy = E->getArg(0)->getType()->castAs<ConstantMatrixType>();
4245 Value *MatValue = EmitScalarExpr(E->getArg(0));
4246 MatrixBuilder MB(Builder);
4247 Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
4248 MatrixTy->getNumColumns());
4249 return RValue::get(Result);
4250 }
4251
4252 case Builtin::BI__builtin_matrix_column_major_load: {
4253 MatrixBuilder MB(Builder);
4254 // Emit everything that isn't dependent on the first parameter type
4255 Value *Stride = EmitScalarExpr(E->getArg(3));
4256 const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
4257 auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
4258 assert(PtrTy && "arg0 must be of pointer type");
4259 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
4260
4263 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4264 0);
4265 Value *Result = MB.CreateColumnMajorLoad(
4266 Src.getElementType(), Src.emitRawPointer(*this),
4267 Align(Src.getAlignment().getQuantity()), Stride, IsVolatile,
4268 ResultTy->getNumRows(), ResultTy->getNumColumns(), "matrix");
4269 return RValue::get(Result);
4270 }
4271
4272 case Builtin::BI__builtin_matrix_column_major_store: {
4273 MatrixBuilder MB(Builder);
4274 Value *Matrix = EmitScalarExpr(E->getArg(0));
4276 Value *Stride = EmitScalarExpr(E->getArg(2));
4277
4278 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
4279 auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
4280 assert(PtrTy && "arg1 must be of pointer type");
4281 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
4282
4284 E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
4285 0);
4286 Value *Result = MB.CreateColumnMajorStore(
4287 Matrix, Dst.emitRawPointer(*this),
4288 Align(Dst.getAlignment().getQuantity()), Stride, IsVolatile,
4289 MatrixTy->getNumRows(), MatrixTy->getNumColumns());
4291 return RValue::get(Result);
4292 }
4293
4294 case Builtin::BI__builtin_masked_load:
4295 case Builtin::BI__builtin_masked_expand_load: {
4296 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4297 llvm::Value *Ptr = EmitScalarExpr(E->getArg(1));
4298
4299 llvm::Type *RetTy = CGM.getTypes().ConvertType(E->getType());
4300 llvm::Value *PassThru = llvm::PoisonValue::get(RetTy);
4301 if (E->getNumArgs() > 2)
4302 PassThru = EmitScalarExpr(E->getArg(2));
4303
4304 CharUnits Align = CGM.getNaturalTypeAlignment(
4305 E->getType()->getAs<VectorType>()->getElementType(), nullptr);
4306
4307 llvm::Value *Result;
4308 if (BuiltinID == Builtin::BI__builtin_masked_load) {
4309 Result = Builder.CreateMaskedLoad(RetTy, Ptr, Align.getAsAlign(), Mask,
4310 PassThru, "masked_load");
4311 } else {
4312 Function *F = CGM.getIntrinsic(Intrinsic::masked_expandload, {RetTy});
4313 Result =
4314 Builder.CreateCall(F, {Ptr, Mask, PassThru}, "masked_expand_load");
4315 }
4316 return RValue::get(Result);
4317 };
4318 case Builtin::BI__builtin_masked_gather: {
4319 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4320 llvm::Value *Idx = EmitScalarExpr(E->getArg(1));
4321 llvm::Value *Ptr = EmitScalarExpr(E->getArg(2));
4322
4323 llvm::Type *RetTy = CGM.getTypes().ConvertType(E->getType());
4324 CharUnits Align = CGM.getNaturalTypeAlignment(
4325 E->getType()->getAs<VectorType>()->getElementType(), nullptr);
4326
4327 llvm::Value *PassThru = llvm::PoisonValue::get(RetTy);
4328 if (E->getNumArgs() > 3)
4329 PassThru = EmitScalarExpr(E->getArg(3));
4330
4331 llvm::Type *ElemTy = CGM.getTypes().ConvertType(
4333 llvm::Value *PtrVec = Builder.CreateGEP(ElemTy, Ptr, Idx);
4334
4335 llvm::Value *Result = Builder.CreateMaskedGather(
4336 RetTy, PtrVec, Align.getAsAlign(), Mask, PassThru, "masked_gather");
4337 return RValue::get(Result);
4338 }
4339 case Builtin::BI__builtin_masked_store:
4340 case Builtin::BI__builtin_masked_compress_store: {
4341 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4342 llvm::Value *Val = EmitScalarExpr(E->getArg(1));
4343 llvm::Value *Ptr = EmitScalarExpr(E->getArg(2));
4344
4345 QualType ValTy = E->getArg(1)->getType();
4346 llvm::Type *ValLLTy = CGM.getTypes().ConvertType(ValTy);
4347
4348 CharUnits Align = CGM.getNaturalTypeAlignment(
4350 nullptr);
4351
4352 if (BuiltinID == Builtin::BI__builtin_masked_store) {
4353 Builder.CreateMaskedStore(Val, Ptr, Align.getAsAlign(), Mask);
4354 } else {
4355 llvm::Function *F =
4356 CGM.getIntrinsic(llvm::Intrinsic::masked_compressstore, {ValLLTy});
4357 Builder.CreateCall(F, {Val, Ptr, Mask});
4358 }
4359 return RValue::get(nullptr);
4360 }
4361 case Builtin::BI__builtin_masked_scatter: {
4362 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4363 llvm::Value *Idx = EmitScalarExpr(E->getArg(1));
4364 llvm::Value *Val = EmitScalarExpr(E->getArg(2));
4365 llvm::Value *Ptr = EmitScalarExpr(E->getArg(3));
4366
4367 CharUnits Align = CGM.getNaturalTypeAlignment(
4369 nullptr);
4370
4371 llvm::Type *ElemTy = CGM.getTypes().ConvertType(
4372 E->getArg(1)->getType()->getAs<VectorType>()->getElementType());
4373 llvm::Value *PtrVec = Builder.CreateGEP(ElemTy, Ptr, Idx);
4374
4375 Builder.CreateMaskedScatter(Val, PtrVec, Align.getAsAlign(), Mask);
4376 return RValue();
4377 }
4378 case Builtin::BI__builtin_isinf_sign: {
4379 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
4380 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4381 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
4382 Value *Arg = EmitScalarExpr(E->getArg(0));
4383 Value *AbsArg = EmitFAbs(*this, Arg);
4384 Value *IsInf = Builder.CreateFCmpOEQ(
4385 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
4386 Value *IsNeg = EmitSignBit(*this, Arg);
4387
4388 llvm::Type *IntTy = ConvertType(E->getType());
4389 Value *Zero = Constant::getNullValue(IntTy);
4390 Value *One = ConstantInt::get(IntTy, 1);
4391 Value *NegativeOne = ConstantInt::getAllOnesValue(IntTy);
4392 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
4393 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
4394 return RValue::get(Result);
4395 }
4396
4397 case Builtin::BI__builtin_flt_rounds: {
4398 Function *F = CGM.getIntrinsic(Intrinsic::get_rounding);
4399
4400 llvm::Type *ResultType = ConvertType(E->getType());
4401 Value *Result = Builder.CreateCall(F);
4402 if (Result->getType() != ResultType)
4403 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
4404 "cast");
4405 return RValue::get(Result);
4406 }
4407
4408 case Builtin::BI__builtin_set_flt_rounds: {
4409 Function *F = CGM.getIntrinsic(Intrinsic::set_rounding);
4410
4411 Value *V = EmitScalarExpr(E->getArg(0));
4412 Builder.CreateCall(F, V);
4413 return RValue::get(nullptr);
4414 }
4415
4416 case Builtin::BI__builtin_fpclassify: {
4417 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4418 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
4419 Value *V = EmitScalarExpr(E->getArg(5));
4420 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
4421
4422 // Create Result
4423 BasicBlock *Begin = Builder.GetInsertBlock();
4424 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
4425 Builder.SetInsertPoint(End);
4426 PHINode *Result =
4427 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
4428 "fpclassify_result");
4429
4430 // if (V==0) return FP_ZERO
4431 Builder.SetInsertPoint(Begin);
4432 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
4433 "iszero");
4434 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
4435 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
4436 Builder.CreateCondBr(IsZero, End, NotZero);
4437 Result->addIncoming(ZeroLiteral, Begin);
4438
4439 // if (V != V) return FP_NAN
4440 Builder.SetInsertPoint(NotZero);
4441 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
4442 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
4443 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
4444 Builder.CreateCondBr(IsNan, End, NotNan);
4445 Result->addIncoming(NanLiteral, NotZero);
4446
4447 // if (fabs(V) == infinity) return FP_INFINITY
4448 Builder.SetInsertPoint(NotNan);
4449 Value *VAbs = EmitFAbs(*this, V);
4450 Value *IsInf =
4451 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
4452 "isinf");
4453 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
4454 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
4455 Builder.CreateCondBr(IsInf, End, NotInf);
4456 Result->addIncoming(InfLiteral, NotNan);
4457
4458 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
4459 Builder.SetInsertPoint(NotInf);
4460 APFloat Smallest = APFloat::getSmallestNormalized(
4461 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
4462 Value *IsNormal =
4463 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
4464 "isnormal");
4465 Value *NormalResult =
4466 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
4467 EmitScalarExpr(E->getArg(3)));
4468 Builder.CreateBr(End);
4469 Result->addIncoming(NormalResult, NotInf);
4470
4471 // return Result
4472 Builder.SetInsertPoint(End);
4473 return RValue::get(Result);
4474 }
4475
4476 // An alloca will always return a pointer to the alloca (stack) address
4477 // space. This address space need not be the same as the AST / Language
4478 // default (e.g. in C / C++ auto vars are in the generic address space). At
4479 // the AST level this is handled within CreateTempAlloca et al., but for the
4480 // builtin / dynamic alloca we have to handle it here. We use an explicit cast
4481 // instead of passing an AS to CreateAlloca so as to not inhibit optimisation.
4482 case Builtin::BIalloca:
4483 case Builtin::BI_alloca:
4484 case Builtin::BI__builtin_alloca_uninitialized:
4485 case Builtin::BI__builtin_alloca: {
4486 Value *Size = EmitScalarExpr(E->getArg(0));
4487 const TargetInfo &TI = getContext().getTargetInfo();
4488 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
4489 const Align SuitableAlignmentInBytes =
4490 CGM.getContext()
4491 .toCharUnitsFromBits(TI.getSuitableAlign())
4492 .getAsAlign();
4493 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
4494 AI->setAlignment(SuitableAlignmentInBytes);
4495 if (BuiltinID != Builtin::BI__builtin_alloca_uninitialized)
4496 initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
4497 if (AI->getAddressSpace() !=
4498 CGM.getContext().getTargetAddressSpace(
4500 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
4501 return RValue::get(performAddrSpaceCast(AI, Ty));
4502 }
4503 return RValue::get(AI);
4504 }
4505
4506 case Builtin::BI__builtin_alloca_with_align_uninitialized:
4507 case Builtin::BI__builtin_alloca_with_align: {
4508 Value *Size = EmitScalarExpr(E->getArg(0));
4509 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
4510 auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
4511 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
4512 const Align AlignmentInBytes =
4513 CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
4514 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
4515 AI->setAlignment(AlignmentInBytes);
4516 if (BuiltinID != Builtin::BI__builtin_alloca_with_align_uninitialized)
4517 initializeAlloca(*this, AI, Size, AlignmentInBytes);
4518 if (AI->getAddressSpace() !=
4519 CGM.getContext().getTargetAddressSpace(
4521 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
4522 return RValue::get(performAddrSpaceCast(AI, Ty));
4523 }
4524 return RValue::get(AI);
4525 }
4526
4527 case Builtin::BI__builtin_infer_alloc_token: {
4528 llvm::MDNode *MDN = buildAllocToken(E);
4529 llvm::Value *MDV = MetadataAsValue::get(getLLVMContext(), MDN);
4530 llvm::Function *F =
4531 CGM.getIntrinsic(llvm::Intrinsic::alloc_token_id, {IntPtrTy});
4532 llvm::CallBase *TokenID = Builder.CreateCall(F, MDV);
4533 return RValue::get(TokenID);
4534 }
4535
4536 case Builtin::BIbzero:
4537 case Builtin::BI__builtin_bzero: {
4539 Value *SizeVal = EmitScalarExpr(E->getArg(1));
4540 EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
4541 E->getArg(0)->getExprLoc(), FD, 0);
4542 auto *I = Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
4543 addInstToNewSourceAtom(I, nullptr);
4544 return RValue::get(nullptr);
4545 }
4546
4547 case Builtin::BIbcopy:
4548 case Builtin::BI__builtin_bcopy: {
4551 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4553 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4554 0);
4556 E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
4557 0);
4558 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4559 addInstToNewSourceAtom(I, nullptr);
4560 return RValue::get(nullptr);
4561 }
4562
4563 case Builtin::BImemcpy:
4564 case Builtin::BI__builtin_memcpy:
4565 case Builtin::BImempcpy:
4566 case Builtin::BI__builtin_mempcpy: {
4569 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4570 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4571 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4572 auto *I = Builder.CreateMemCpy(Dest, Src, SizeVal, false);
4573 addInstToNewSourceAtom(I, nullptr);
4574 if (BuiltinID == Builtin::BImempcpy ||
4575 BuiltinID == Builtin::BI__builtin_mempcpy)
4576 return RValue::get(Builder.CreateInBoundsGEP(
4577 Dest.getElementType(), Dest.emitRawPointer(*this), SizeVal));
4578 else
4579 return RValue::get(Dest, *this);
4580 }
4581
4582 case Builtin::BI__builtin_memcpy_inline: {
4585 uint64_t Size =
4586 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
4587 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4588 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4589 auto *I = Builder.CreateMemCpyInline(Dest, Src, Size);
4590 addInstToNewSourceAtom(I, nullptr);
4591 return RValue::get(nullptr);
4592 }
4593
4594 case Builtin::BI__builtin_char_memchr:
4595 BuiltinID = Builtin::BI__builtin_memchr;
4596 break;
4597
4598 case Builtin::BI__builtin___memcpy_chk: {
4599 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
4600 Expr::EvalResult SizeResult, DstSizeResult;
4601 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4602 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4603 break;
4604 llvm::APSInt Size = SizeResult.Val.getInt();
4605 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4606 if (Size.ugt(DstSize))
4607 break;
4610 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4611 auto *I = Builder.CreateMemCpy(Dest, Src, SizeVal, false);
4612 addInstToNewSourceAtom(I, nullptr);
4613 return RValue::get(Dest, *this);
4614 }
4615
4616 case Builtin::BI__builtin_objc_memmove_collectable: {
4617 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
4618 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
4619 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4620 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
4621 DestAddr, SrcAddr, SizeVal);
4622 return RValue::get(DestAddr, *this);
4623 }
4624
4625 case Builtin::BI__builtin___memmove_chk: {
4626 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
4627 Expr::EvalResult SizeResult, DstSizeResult;
4628 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4629 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4630 break;
4631 llvm::APSInt Size = SizeResult.Val.getInt();
4632 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4633 if (Size.ugt(DstSize))
4634 break;
4637 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4638 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4639 addInstToNewSourceAtom(I, nullptr);
4640 return RValue::get(Dest, *this);
4641 }
4642
4643 case Builtin::BI__builtin_trivially_relocate:
4644 case Builtin::BImemmove:
4645 case Builtin::BI__builtin_memmove: {
4648 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4649 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_trivially_relocate)
4650 SizeVal = Builder.CreateMul(
4651 SizeVal,
4652 ConstantInt::get(
4653 SizeVal->getType(),
4654 getContext()
4655 .getTypeSizeInChars(E->getArg(0)->getType()->getPointeeType())
4656 .getQuantity()));
4657 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4658 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4659 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4660 addInstToNewSourceAtom(I, nullptr);
4661 return RValue::get(Dest, *this);
4662 }
4663 case Builtin::BImemset:
4664 case Builtin::BI__builtin_memset: {
4666 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
4667 Builder.getInt8Ty());
4668 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4669 EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
4670 E->getArg(0)->getExprLoc(), FD, 0);
4671 auto *I = Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
4672 addInstToNewSourceAtom(I, ByteVal);
4673 return RValue::get(Dest, *this);
4674 }
4675 case Builtin::BI__builtin_memset_inline: {
4677 Value *ByteVal =
4678 Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty());
4679 uint64_t Size =
4680 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
4682 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4683 0);
4684 auto *I = Builder.CreateMemSetInline(Dest, ByteVal, Size);
4685 addInstToNewSourceAtom(I, nullptr);
4686 return RValue::get(nullptr);
4687 }
4688 case Builtin::BI__builtin___memset_chk: {
4689 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
4690 Expr::EvalResult SizeResult, DstSizeResult;
4691 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4692 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4693 break;
4694 llvm::APSInt Size = SizeResult.Val.getInt();
4695 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4696 if (Size.ugt(DstSize))
4697 break;
4699 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
4700 Builder.getInt8Ty());
4701 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4702 auto *I = Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
4703 addInstToNewSourceAtom(I, nullptr);
4704 return RValue::get(Dest, *this);
4705 }
4706 case Builtin::BI__builtin_wmemchr: {
4707 // The MSVC runtime library does not provide a definition of wmemchr, so we
4708 // need an inline implementation.
4709 if (!getTarget().getTriple().isOSMSVCRT())
4710 break;
4711
4712 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
4713 Value *Str = EmitScalarExpr(E->getArg(0));
4714 Value *Chr = EmitScalarExpr(E->getArg(1));
4715 Value *Size = EmitScalarExpr(E->getArg(2));
4716
4717 BasicBlock *Entry = Builder.GetInsertBlock();
4718 BasicBlock *CmpEq = createBasicBlock("wmemchr.eq");
4719 BasicBlock *Next = createBasicBlock("wmemchr.next");
4720 BasicBlock *Exit = createBasicBlock("wmemchr.exit");
4721 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
4722 Builder.CreateCondBr(SizeEq0, Exit, CmpEq);
4723
4724 EmitBlock(CmpEq);
4725 PHINode *StrPhi = Builder.CreatePHI(Str->getType(), 2);
4726 StrPhi->addIncoming(Str, Entry);
4727 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
4728 SizePhi->addIncoming(Size, Entry);
4729 CharUnits WCharAlign =
4731 Value *StrCh = Builder.CreateAlignedLoad(WCharTy, StrPhi, WCharAlign);
4732 Value *FoundChr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 0);
4733 Value *StrEqChr = Builder.CreateICmpEQ(StrCh, Chr);
4734 Builder.CreateCondBr(StrEqChr, Exit, Next);
4735
4736 EmitBlock(Next);
4737 Value *NextStr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 1);
4738 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
4739 Value *NextSizeEq0 =
4740 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
4741 Builder.CreateCondBr(NextSizeEq0, Exit, CmpEq);
4742 StrPhi->addIncoming(NextStr, Next);
4743 SizePhi->addIncoming(NextSize, Next);
4744
4745 EmitBlock(Exit);
4746 PHINode *Ret = Builder.CreatePHI(Str->getType(), 3);
4747 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Entry);
4748 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Next);
4749 Ret->addIncoming(FoundChr, CmpEq);
4750 return RValue::get(Ret);
4751 }
4752 case Builtin::BI__builtin_wmemcmp: {
4753 // The MSVC runtime library does not provide a definition of wmemcmp, so we
4754 // need an inline implementation.
4755 if (!getTarget().getTriple().isOSMSVCRT())
4756 break;
4757
4758 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
4759
4760 Value *Dst = EmitScalarExpr(E->getArg(0));
4761 Value *Src = EmitScalarExpr(E->getArg(1));
4762 Value *Size = EmitScalarExpr(E->getArg(2));
4763
4764 BasicBlock *Entry = Builder.GetInsertBlock();
4765 BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
4766 BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
4767 BasicBlock *Next = createBasicBlock("wmemcmp.next");
4768 BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
4769 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
4770 Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
4771
4772 EmitBlock(CmpGT);
4773 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
4774 DstPhi->addIncoming(Dst, Entry);
4775 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
4776 SrcPhi->addIncoming(Src, Entry);
4777 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
4778 SizePhi->addIncoming(Size, Entry);
4779 CharUnits WCharAlign =
4781 Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
4782 Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
4783 Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
4784 Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
4785
4786 EmitBlock(CmpLT);
4787 Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
4788 Builder.CreateCondBr(DstLtSrc, Exit, Next);
4789
4790 EmitBlock(Next);
4791 Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
4792 Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
4793 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
4794 Value *NextSizeEq0 =
4795 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
4796 Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
4797 DstPhi->addIncoming(NextDst, Next);
4798 SrcPhi->addIncoming(NextSrc, Next);
4799 SizePhi->addIncoming(NextSize, Next);
4800
4801 EmitBlock(Exit);
4802 PHINode *Ret = Builder.CreatePHI(IntTy, 4);
4803 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
4804 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
4805 Ret->addIncoming(ConstantInt::getAllOnesValue(IntTy), CmpLT);
4806 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
4807 return RValue::get(Ret);
4808 }
4809 case Builtin::BI__builtin_dwarf_cfa: {
4810 // The offset in bytes from the first argument to the CFA.
4811 //
4812 // Why on earth is this in the frontend? Is there any reason at
4813 // all that the backend can't reasonably determine this while
4814 // lowering llvm.eh.dwarf.cfa()?
4815 //
4816 // TODO: If there's a satisfactory reason, add a target hook for
4817 // this instead of hard-coding 0, which is correct for most targets.
4818 int32_t Offset = 0;
4819
4820 Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
4821 return RValue::get(Builder.CreateCall(F,
4822 llvm::ConstantInt::get(Int32Ty, Offset)));
4823 }
4824 case Builtin::BI__builtin_return_address: {
4825 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
4826 getContext().UnsignedIntTy);
4827 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
4828 return RValue::get(Builder.CreateCall(F, Depth));
4829 }
4830 case Builtin::BI_ReturnAddress: {
4831 Function *F = CGM.getIntrinsic(Intrinsic::returnaddress);
4832 return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
4833 }
4834 case Builtin::BI__builtin_frame_address: {
4835 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
4836 getContext().UnsignedIntTy);
4837 Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
4838 return RValue::get(Builder.CreateCall(F, Depth));
4839 }
4840 case Builtin::BI__builtin_stack_address: {
4841 return RValue::get(Builder.CreateCall(
4842 CGM.getIntrinsic(Intrinsic::stackaddress, AllocaInt8PtrTy)));
4843 }
4844 case Builtin::BI__builtin_extract_return_addr: {
4847 return RValue::get(Result);
4848 }
4849 case Builtin::BI__builtin_frob_return_addr: {
4852 return RValue::get(Result);
4853 }
4854 case Builtin::BI__builtin_dwarf_sp_column: {
4855 llvm::IntegerType *Ty
4858 if (Column == -1) {
4859 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
4860 return RValue::get(llvm::UndefValue::get(Ty));
4861 }
4862 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
4863 }
4864 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
4866 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
4867 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
4868 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
4869 }
4870 case Builtin::BI__builtin_eh_return: {
4871 Value *Int = EmitScalarExpr(E->getArg(0));
4872 Value *Ptr = EmitScalarExpr(E->getArg(1));
4873
4874 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
4875 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
4876 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
4877 Function *F =
4878 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
4879 : Intrinsic::eh_return_i64);
4880 Builder.CreateCall(F, {Int, Ptr});
4881 Builder.CreateUnreachable();
4882
4883 // We do need to preserve an insertion point.
4884 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
4885
4886 return RValue::get(nullptr);
4887 }
4888 case Builtin::BI__builtin_unwind_init: {
4889 Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
4890 Builder.CreateCall(F);
4891 return RValue::get(nullptr);
4892 }
4893 case Builtin::BI__builtin_extend_pointer: {
4894 // Extends a pointer to the size of an _Unwind_Word, which is
4895 // uint64_t on all platforms. Generally this gets poked into a
4896 // register and eventually used as an address, so if the
4897 // addressing registers are wider than pointers and the platform
4898 // doesn't implicitly ignore high-order bits when doing
4899 // addressing, we need to make sure we zext / sext based on
4900 // the platform's expectations.
4901 //
4902 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
4903
4904 // Cast the pointer to intptr_t.
4905 Value *Ptr = EmitScalarExpr(E->getArg(0));
4906 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
4907
4908 // If that's 64 bits, we're done.
4909 if (IntPtrTy->getBitWidth() == 64)
4910 return RValue::get(Result);
4911
4912 // Otherwise, ask the codegen data what to do.
4913 if (getTargetHooks().extendPointerWithSExt())
4914 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
4915 else
4916 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
4917 }
4918 case Builtin::BI__builtin_setjmp: {
4919 // Buffer is a void**.
4921
4922 if (getTarget().getTriple().getArch() == llvm::Triple::systemz) {
4923 // On this target, the back end fills in the context buffer completely.
4924 // It doesn't really matter if the frontend stores to the buffer before
4925 // calling setjmp, the back-end is going to overwrite them anyway.
4926 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
4927 return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
4928 }
4929
4930 // Store the frame pointer to the setjmp buffer.
4931 Value *FrameAddr = Builder.CreateCall(
4932 CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
4933 ConstantInt::get(Int32Ty, 0));
4934 Builder.CreateStore(FrameAddr, Buf);
4935
4936 // Store the stack pointer to the setjmp buffer.
4937 Value *StackAddr = Builder.CreateStackSave();
4938 assert(Buf.emitRawPointer(*this)->getType() == StackAddr->getType());
4939
4940 Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
4941 Builder.CreateStore(StackAddr, StackSaveSlot);
4942
4943 // Call LLVM's EH setjmp, which is lightweight.
4944 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
4945 return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
4946 }
4947 case Builtin::BI__builtin_longjmp: {
4948 Value *Buf = EmitScalarExpr(E->getArg(0));
4949
4950 // Call LLVM's EH longjmp, which is lightweight.
4951 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
4952
4953 // longjmp doesn't return; mark this as unreachable.
4954 Builder.CreateUnreachable();
4955
4956 // We do need to preserve an insertion point.
4957 EmitBlock(createBasicBlock("longjmp.cont"));
4958
4959 return RValue::get(nullptr);
4960 }
4961 case Builtin::BI__builtin_launder: {
4962 const Expr *Arg = E->getArg(0);
4963 QualType ArgTy = Arg->getType()->getPointeeType();
4964 Value *Ptr = EmitScalarExpr(Arg);
4965 if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
4966 Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
4967
4968 return RValue::get(Ptr);
4969 }
4970 case Builtin::BI__sync_fetch_and_add:
4971 case Builtin::BI__sync_fetch_and_sub:
4972 case Builtin::BI__sync_fetch_and_or:
4973 case Builtin::BI__sync_fetch_and_and:
4974 case Builtin::BI__sync_fetch_and_xor:
4975 case Builtin::BI__sync_fetch_and_nand:
4976 case Builtin::BI__sync_add_and_fetch:
4977 case Builtin::BI__sync_sub_and_fetch:
4978 case Builtin::BI__sync_and_and_fetch:
4979 case Builtin::BI__sync_or_and_fetch:
4980 case Builtin::BI__sync_xor_and_fetch:
4981 case Builtin::BI__sync_nand_and_fetch:
4982 case Builtin::BI__sync_val_compare_and_swap:
4983 case Builtin::BI__sync_bool_compare_and_swap:
4984 case Builtin::BI__sync_lock_test_and_set:
4985 case Builtin::BI__sync_lock_release:
4986 case Builtin::BI__sync_swap:
4987 llvm_unreachable("Shouldn't make it through sema");
4988 case Builtin::BI__sync_fetch_and_add_1:
4989 case Builtin::BI__sync_fetch_and_add_2:
4990 case Builtin::BI__sync_fetch_and_add_4:
4991 case Builtin::BI__sync_fetch_and_add_8:
4992 case Builtin::BI__sync_fetch_and_add_16:
4993 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
4994 case Builtin::BI__sync_fetch_and_sub_1:
4995 case Builtin::BI__sync_fetch_and_sub_2:
4996 case Builtin::BI__sync_fetch_and_sub_4:
4997 case Builtin::BI__sync_fetch_and_sub_8:
4998 case Builtin::BI__sync_fetch_and_sub_16:
4999 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
5000 case Builtin::BI__sync_fetch_and_or_1:
5001 case Builtin::BI__sync_fetch_and_or_2:
5002 case Builtin::BI__sync_fetch_and_or_4:
5003 case Builtin::BI__sync_fetch_and_or_8:
5004 case Builtin::BI__sync_fetch_and_or_16:
5005 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
5006 case Builtin::BI__sync_fetch_and_and_1:
5007 case Builtin::BI__sync_fetch_and_and_2:
5008 case Builtin::BI__sync_fetch_and_and_4:
5009 case Builtin::BI__sync_fetch_and_and_8:
5010 case Builtin::BI__sync_fetch_and_and_16:
5011 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
5012 case Builtin::BI__sync_fetch_and_xor_1:
5013 case Builtin::BI__sync_fetch_and_xor_2:
5014 case Builtin::BI__sync_fetch_and_xor_4:
5015 case Builtin::BI__sync_fetch_and_xor_8:
5016 case Builtin::BI__sync_fetch_and_xor_16:
5017 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
5018 case Builtin::BI__sync_fetch_and_nand_1:
5019 case Builtin::BI__sync_fetch_and_nand_2:
5020 case Builtin::BI__sync_fetch_and_nand_4:
5021 case Builtin::BI__sync_fetch_and_nand_8:
5022 case Builtin::BI__sync_fetch_and_nand_16:
5023 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
5024
5025 // Clang extensions: not overloaded yet.
5026 case Builtin::BI__sync_fetch_and_min:
5027 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
5028 case Builtin::BI__sync_fetch_and_max:
5029 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
5030 case Builtin::BI__sync_fetch_and_umin:
5031 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
5032 case Builtin::BI__sync_fetch_and_umax:
5033 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
5034
5035 case Builtin::BI__sync_add_and_fetch_1:
5036 case Builtin::BI__sync_add_and_fetch_2:
5037 case Builtin::BI__sync_add_and_fetch_4:
5038 case Builtin::BI__sync_add_and_fetch_8:
5039 case Builtin::BI__sync_add_and_fetch_16:
5040 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
5041 llvm::Instruction::Add);
5042 case Builtin::BI__sync_sub_and_fetch_1:
5043 case Builtin::BI__sync_sub_and_fetch_2:
5044 case Builtin::BI__sync_sub_and_fetch_4:
5045 case Builtin::BI__sync_sub_and_fetch_8:
5046 case Builtin::BI__sync_sub_and_fetch_16:
5047 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
5048 llvm::Instruction::Sub);
5049 case Builtin::BI__sync_and_and_fetch_1:
5050 case Builtin::BI__sync_and_and_fetch_2:
5051 case Builtin::BI__sync_and_and_fetch_4:
5052 case Builtin::BI__sync_and_and_fetch_8:
5053 case Builtin::BI__sync_and_and_fetch_16:
5054 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
5055 llvm::Instruction::And);
5056 case Builtin::BI__sync_or_and_fetch_1:
5057 case Builtin::BI__sync_or_and_fetch_2:
5058 case Builtin::BI__sync_or_and_fetch_4:
5059 case Builtin::BI__sync_or_and_fetch_8:
5060 case Builtin::BI__sync_or_and_fetch_16:
5061 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
5062 llvm::Instruction::Or);
5063 case Builtin::BI__sync_xor_and_fetch_1:
5064 case Builtin::BI__sync_xor_and_fetch_2:
5065 case Builtin::BI__sync_xor_and_fetch_4:
5066 case Builtin::BI__sync_xor_and_fetch_8:
5067 case Builtin::BI__sync_xor_and_fetch_16:
5068 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
5069 llvm::Instruction::Xor);
5070 case Builtin::BI__sync_nand_and_fetch_1:
5071 case Builtin::BI__sync_nand_and_fetch_2:
5072 case Builtin::BI__sync_nand_and_fetch_4:
5073 case Builtin::BI__sync_nand_and_fetch_8:
5074 case Builtin::BI__sync_nand_and_fetch_16:
5075 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
5076 llvm::Instruction::And, true);
5077
5078 case Builtin::BI__sync_val_compare_and_swap_1:
5079 case Builtin::BI__sync_val_compare_and_swap_2:
5080 case Builtin::BI__sync_val_compare_and_swap_4:
5081 case Builtin::BI__sync_val_compare_and_swap_8:
5082 case Builtin::BI__sync_val_compare_and_swap_16:
5084 *this, E, false, AtomicOrdering::SequentiallyConsistent,
5085 AtomicOrdering::SequentiallyConsistent));
5086
5087 case Builtin::BI__sync_bool_compare_and_swap_1:
5088 case Builtin::BI__sync_bool_compare_and_swap_2:
5089 case Builtin::BI__sync_bool_compare_and_swap_4:
5090 case Builtin::BI__sync_bool_compare_and_swap_8:
5091 case Builtin::BI__sync_bool_compare_and_swap_16:
5093 *this, E, true, AtomicOrdering::SequentiallyConsistent,
5094 AtomicOrdering::SequentiallyConsistent));
5095
5096 case Builtin::BI__sync_swap_1:
5097 case Builtin::BI__sync_swap_2:
5098 case Builtin::BI__sync_swap_4:
5099 case Builtin::BI__sync_swap_8:
5100 case Builtin::BI__sync_swap_16:
5101 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
5102
5103 case Builtin::BI__sync_lock_test_and_set_1:
5104 case Builtin::BI__sync_lock_test_and_set_2:
5105 case Builtin::BI__sync_lock_test_and_set_4:
5106 case Builtin::BI__sync_lock_test_and_set_8:
5107 case Builtin::BI__sync_lock_test_and_set_16:
5108 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
5109
5110 case Builtin::BI__sync_lock_release_1:
5111 case Builtin::BI__sync_lock_release_2:
5112 case Builtin::BI__sync_lock_release_4:
5113 case Builtin::BI__sync_lock_release_8:
5114 case Builtin::BI__sync_lock_release_16: {
5115 Address Ptr = CheckAtomicAlignment(*this, E);
5116 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
5117
5118 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
5119 getContext().getTypeSize(ElTy));
5120 llvm::StoreInst *Store =
5121 Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
5122 Store->setAtomic(llvm::AtomicOrdering::Release);
5123 return RValue::get(nullptr);
5124 }
5125
5126 case Builtin::BI__sync_synchronize: {
5127 // We assume this is supposed to correspond to a C++0x-style
5128 // sequentially-consistent fence (i.e. this is only usable for
5129 // synchronization, not device I/O or anything like that). This intrinsic
5130 // is really badly designed in the sense that in theory, there isn't
5131 // any way to safely use it... but in practice, it mostly works
5132 // to use it with non-atomic loads and stores to get acquire/release
5133 // semantics.
5134 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
5135 return RValue::get(nullptr);
5136 }
5137
5138 case Builtin::BI__builtin_nontemporal_load:
5139 return RValue::get(EmitNontemporalLoad(*this, E));
5140 case Builtin::BI__builtin_nontemporal_store:
5141 return RValue::get(EmitNontemporalStore(*this, E));
5142 case Builtin::BI__c11_atomic_is_lock_free:
5143 case Builtin::BI__atomic_is_lock_free: {
5144 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
5145 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
5146 // _Atomic(T) is always properly-aligned.
5147 const char *LibCallName = "__atomic_is_lock_free";
5148 CallArgList Args;
5149 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
5150 getContext().getSizeType());
5151 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
5152 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
5154 else
5155 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
5157 const CGFunctionInfo &FuncInfo =
5158 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
5159 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
5160 llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
5161 return EmitCall(FuncInfo, CGCallee::forDirect(Func),
5162 ReturnValueSlot(), Args);
5163 }
5164
5165 case Builtin::BI__atomic_thread_fence:
5166 case Builtin::BI__atomic_signal_fence:
5167 case Builtin::BI__c11_atomic_thread_fence:
5168 case Builtin::BI__c11_atomic_signal_fence: {
5169 llvm::SyncScope::ID SSID;
5170 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
5171 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
5172 SSID = llvm::SyncScope::SingleThread;
5173 else
5174 SSID = llvm::SyncScope::System;
5175 Value *Order = EmitScalarExpr(E->getArg(0));
5176 if (isa<llvm::ConstantInt>(Order)) {
5177 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
5178 switch (ord) {
5179 case 0: // memory_order_relaxed
5180 default: // invalid order
5181 break;
5182 case 1: // memory_order_consume
5183 case 2: // memory_order_acquire
5184 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
5185 break;
5186 case 3: // memory_order_release
5187 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
5188 break;
5189 case 4: // memory_order_acq_rel
5190 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
5191 break;
5192 case 5: // memory_order_seq_cst
5193 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
5194 break;
5195 }
5196 return RValue::get(nullptr);
5197 }
5198
5199 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
5200 AcquireBB = createBasicBlock("acquire", CurFn);
5201 ReleaseBB = createBasicBlock("release", CurFn);
5202 AcqRelBB = createBasicBlock("acqrel", CurFn);
5203 SeqCstBB = createBasicBlock("seqcst", CurFn);
5204 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
5205
5206 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
5207 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
5208
5209 Builder.SetInsertPoint(AcquireBB);
5210 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
5211 Builder.CreateBr(ContBB);
5212 SI->addCase(Builder.getInt32(1), AcquireBB);
5213 SI->addCase(Builder.getInt32(2), AcquireBB);
5214
5215 Builder.SetInsertPoint(ReleaseBB);
5216 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
5217 Builder.CreateBr(ContBB);
5218 SI->addCase(Builder.getInt32(3), ReleaseBB);
5219
5220 Builder.SetInsertPoint(AcqRelBB);
5221 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
5222 Builder.CreateBr(ContBB);
5223 SI->addCase(Builder.getInt32(4), AcqRelBB);
5224
5225 Builder.SetInsertPoint(SeqCstBB);
5226 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
5227 Builder.CreateBr(ContBB);
5228 SI->addCase(Builder.getInt32(5), SeqCstBB);
5229
5230 Builder.SetInsertPoint(ContBB);
5231 return RValue::get(nullptr);
5232 }
5233 case Builtin::BI__scoped_atomic_thread_fence: {
5235
5236 Value *Order = EmitScalarExpr(E->getArg(0));
5237 Value *Scope = EmitScalarExpr(E->getArg(1));
5238 auto Ord = dyn_cast<llvm::ConstantInt>(Order);
5239 auto Scp = dyn_cast<llvm::ConstantInt>(Scope);
5240 if (Ord && Scp) {
5241 SyncScope SS = ScopeModel->isValid(Scp->getZExtValue())
5242 ? ScopeModel->map(Scp->getZExtValue())
5243 : ScopeModel->map(ScopeModel->getFallBackValue());
5244 switch (Ord->getZExtValue()) {
5245 case 0: // memory_order_relaxed
5246 default: // invalid order
5247 break;
5248 case 1: // memory_order_consume
5249 case 2: // memory_order_acquire
5250 Builder.CreateFence(
5251 llvm::AtomicOrdering::Acquire,
5252 getTargetHooks().getLLVMSyncScopeID(getLangOpts(), SS,
5253 llvm::AtomicOrdering::Acquire,
5254 getLLVMContext()));
5255 break;
5256 case 3: // memory_order_release
5257 Builder.CreateFence(
5258 llvm::AtomicOrdering::Release,
5259 getTargetHooks().getLLVMSyncScopeID(getLangOpts(), SS,
5260 llvm::AtomicOrdering::Release,
5261 getLLVMContext()));
5262 break;
5263 case 4: // memory_order_acq_rel
5264 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease,
5265 getTargetHooks().getLLVMSyncScopeID(
5266 getLangOpts(), SS,
5267 llvm::AtomicOrdering::AcquireRelease,
5268 getLLVMContext()));
5269 break;
5270 case 5: // memory_order_seq_cst
5271 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
5272 getTargetHooks().getLLVMSyncScopeID(
5273 getLangOpts(), SS,
5274 llvm::AtomicOrdering::SequentiallyConsistent,
5275 getLLVMContext()));
5276 break;
5277 }
5278 return RValue::get(nullptr);
5279 }
5280
5281 llvm::BasicBlock *ContBB = createBasicBlock("atomic.scope.continue", CurFn);
5282
5284 OrderBBs;
5285 if (Ord) {
5286 switch (Ord->getZExtValue()) {
5287 case 0: // memory_order_relaxed
5288 default: // invalid order
5289 ContBB->eraseFromParent();
5290 return RValue::get(nullptr);
5291 case 1: // memory_order_consume
5292 case 2: // memory_order_acquire
5293 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5294 llvm::AtomicOrdering::Acquire);
5295 break;
5296 case 3: // memory_order_release
5297 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5298 llvm::AtomicOrdering::Release);
5299 break;
5300 case 4: // memory_order_acq_rel
5301 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5302 llvm::AtomicOrdering::AcquireRelease);
5303 break;
5304 case 5: // memory_order_seq_cst
5305 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5306 llvm::AtomicOrdering::SequentiallyConsistent);
5307 break;
5308 }
5309 } else {
5310 llvm::BasicBlock *AcquireBB = createBasicBlock("acquire", CurFn);
5311 llvm::BasicBlock *ReleaseBB = createBasicBlock("release", CurFn);
5312 llvm::BasicBlock *AcqRelBB = createBasicBlock("acqrel", CurFn);
5313 llvm::BasicBlock *SeqCstBB = createBasicBlock("seqcst", CurFn);
5314
5315 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
5316 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
5317 SI->addCase(Builder.getInt32(1), AcquireBB);
5318 SI->addCase(Builder.getInt32(2), AcquireBB);
5319 SI->addCase(Builder.getInt32(3), ReleaseBB);
5320 SI->addCase(Builder.getInt32(4), AcqRelBB);
5321 SI->addCase(Builder.getInt32(5), SeqCstBB);
5322
5323 OrderBBs.emplace_back(AcquireBB, llvm::AtomicOrdering::Acquire);
5324 OrderBBs.emplace_back(ReleaseBB, llvm::AtomicOrdering::Release);
5325 OrderBBs.emplace_back(AcqRelBB, llvm::AtomicOrdering::AcquireRelease);
5326 OrderBBs.emplace_back(SeqCstBB,
5327 llvm::AtomicOrdering::SequentiallyConsistent);
5328 }
5329
5330 for (auto &[OrderBB, Ordering] : OrderBBs) {
5331 Builder.SetInsertPoint(OrderBB);
5332 if (Scp) {
5333 SyncScope SS = ScopeModel->isValid(Scp->getZExtValue())
5334 ? ScopeModel->map(Scp->getZExtValue())
5335 : ScopeModel->map(ScopeModel->getFallBackValue());
5336 Builder.CreateFence(Ordering,
5337 getTargetHooks().getLLVMSyncScopeID(
5338 getLangOpts(), SS, Ordering, getLLVMContext()));
5339 Builder.CreateBr(ContBB);
5340 } else {
5341 llvm::DenseMap<unsigned, llvm::BasicBlock *> BBs;
5342 for (unsigned Scp : ScopeModel->getRuntimeValues())
5343 BBs[Scp] = createBasicBlock(getAsString(ScopeModel->map(Scp)), CurFn);
5344
5345 auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
5346 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, ContBB);
5347 for (unsigned Scp : ScopeModel->getRuntimeValues()) {
5348 auto *B = BBs[Scp];
5349 SI->addCase(Builder.getInt32(Scp), B);
5350
5351 Builder.SetInsertPoint(B);
5352 Builder.CreateFence(Ordering, getTargetHooks().getLLVMSyncScopeID(
5353 getLangOpts(), ScopeModel->map(Scp),
5354 Ordering, getLLVMContext()));
5355 Builder.CreateBr(ContBB);
5356 }
5357 }
5358 }
5359
5360 Builder.SetInsertPoint(ContBB);
5361 return RValue::get(nullptr);
5362 }
5363
5364 case Builtin::BI__builtin_signbit:
5365 case Builtin::BI__builtin_signbitf:
5366 case Builtin::BI__builtin_signbitl: {
5367 return RValue::get(
5368 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
5369 ConvertType(E->getType())));
5370 }
5371 case Builtin::BI__warn_memset_zero_len:
5372 return RValue::getIgnored();
5373 case Builtin::BI__annotation: {
5374 // Re-encode each wide string to UTF8 and make an MDString.
5376 for (const Expr *Arg : E->arguments()) {
5377 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
5378 assert(Str->getCharByteWidth() == 2);
5379 StringRef WideBytes = Str->getBytes();
5380 std::string StrUtf8;
5381 if (!convertUTF16ToUTF8String(
5382 ArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
5383 CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
5384 continue;
5385 }
5386 Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
5387 }
5388
5389 // Build and MDTuple of MDStrings and emit the intrinsic call.
5390 llvm::Function *F = CGM.getIntrinsic(Intrinsic::codeview_annotation, {});
5391 MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
5392 Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
5393 return RValue::getIgnored();
5394 }
5395 case Builtin::BI__builtin_annotation: {
5396 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
5397 llvm::Function *F = CGM.getIntrinsic(
5398 Intrinsic::annotation, {AnnVal->getType(), CGM.ConstGlobalsPtrTy});
5399
5400 // Get the annotation string, go through casts. Sema requires this to be a
5401 // non-wide string literal, potentially casted, so the cast<> is safe.
5402 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
5403 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
5404 return RValue::get(
5405 EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr));
5406 }
5407 case Builtin::BI__builtin_addcb:
5408 case Builtin::BI__builtin_addcs:
5409 case Builtin::BI__builtin_addc:
5410 case Builtin::BI__builtin_addcl:
5411 case Builtin::BI__builtin_addcll:
5412 case Builtin::BI__builtin_subcb:
5413 case Builtin::BI__builtin_subcs:
5414 case Builtin::BI__builtin_subc:
5415 case Builtin::BI__builtin_subcl:
5416 case Builtin::BI__builtin_subcll: {
5417
5418 // We translate all of these builtins from expressions of the form:
5419 // int x = ..., y = ..., carryin = ..., carryout, result;
5420 // result = __builtin_addc(x, y, carryin, &carryout);
5421 //
5422 // to LLVM IR of the form:
5423 //
5424 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
5425 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
5426 // %carry1 = extractvalue {i32, i1} %tmp1, 1
5427 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
5428 // i32 %carryin)
5429 // %result = extractvalue {i32, i1} %tmp2, 0
5430 // %carry2 = extractvalue {i32, i1} %tmp2, 1
5431 // %tmp3 = or i1 %carry1, %carry2
5432 // %tmp4 = zext i1 %tmp3 to i32
5433 // store i32 %tmp4, i32* %carryout
5434
5435 // Scalarize our inputs.
5436 llvm::Value *X = EmitScalarExpr(E->getArg(0));
5437 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
5438 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
5439 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
5440
5441 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
5442 Intrinsic::ID IntrinsicId;
5443 switch (BuiltinID) {
5444 default: llvm_unreachable("Unknown multiprecision builtin id.");
5445 case Builtin::BI__builtin_addcb:
5446 case Builtin::BI__builtin_addcs:
5447 case Builtin::BI__builtin_addc:
5448 case Builtin::BI__builtin_addcl:
5449 case Builtin::BI__builtin_addcll:
5450 IntrinsicId = Intrinsic::uadd_with_overflow;
5451 break;
5452 case Builtin::BI__builtin_subcb:
5453 case Builtin::BI__builtin_subcs:
5454 case Builtin::BI__builtin_subc:
5455 case Builtin::BI__builtin_subcl:
5456 case Builtin::BI__builtin_subcll:
5457 IntrinsicId = Intrinsic::usub_with_overflow;
5458 break;
5459 }
5460
5461 // Construct our resulting LLVM IR expression.
5462 llvm::Value *Carry1;
5463 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
5464 X, Y, Carry1);
5465 llvm::Value *Carry2;
5466 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
5467 Sum1, Carryin, Carry2);
5468 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
5469 X->getType());
5470 Builder.CreateStore(CarryOut, CarryOutPtr);
5471 return RValue::get(Sum2);
5472 }
5473
5474 case Builtin::BI__builtin_add_overflow:
5475 case Builtin::BI__builtin_sub_overflow:
5476 case Builtin::BI__builtin_mul_overflow: {
5477 const clang::Expr *LeftArg = E->getArg(0);
5478 const clang::Expr *RightArg = E->getArg(1);
5479 const clang::Expr *ResultArg = E->getArg(2);
5480
5481 clang::QualType ResultQTy =
5482 ResultArg->getType()->castAs<PointerType>()->getPointeeType();
5483
5484 WidthAndSignedness LeftInfo =
5485 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
5486 WidthAndSignedness RightInfo =
5487 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
5488 WidthAndSignedness ResultInfo =
5489 getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
5490
5491 // Handle mixed-sign multiplication as a special case, because adding
5492 // runtime or backend support for our generic irgen would be too expensive.
5493 if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
5494 return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
5495 RightInfo, ResultArg, ResultQTy,
5496 ResultInfo);
5497
5498 if (isSpecialUnsignedMultiplySignedResult(BuiltinID, LeftInfo, RightInfo,
5499 ResultInfo))
5501 *this, LeftArg, LeftInfo, RightArg, RightInfo, ResultArg, ResultQTy,
5502 ResultInfo);
5503
5504 WidthAndSignedness EncompassingInfo =
5505 EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
5506
5507 llvm::Type *EncompassingLLVMTy =
5508 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
5509
5510 llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
5511
5512 Intrinsic::ID IntrinsicId;
5513 switch (BuiltinID) {
5514 default:
5515 llvm_unreachable("Unknown overflow builtin id.");
5516 case Builtin::BI__builtin_add_overflow:
5517 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::sadd_with_overflow
5518 : Intrinsic::uadd_with_overflow;
5519 break;
5520 case Builtin::BI__builtin_sub_overflow:
5521 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::ssub_with_overflow
5522 : Intrinsic::usub_with_overflow;
5523 break;
5524 case Builtin::BI__builtin_mul_overflow:
5525 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::smul_with_overflow
5526 : Intrinsic::umul_with_overflow;
5527 break;
5528 }
5529
5530 llvm::Value *Left = EmitScalarExpr(LeftArg);
5531 llvm::Value *Right = EmitScalarExpr(RightArg);
5532 Address ResultPtr = EmitPointerWithAlignment(ResultArg);
5533
5534 // Extend each operand to the encompassing type.
5535 Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
5536 Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
5537
5538 // Perform the operation on the extended values.
5539 llvm::Value *Overflow, *Result;
5540 Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
5541
5542 if (EncompassingInfo.Width > ResultInfo.Width) {
5543 // The encompassing type is wider than the result type, so we need to
5544 // truncate it.
5545 llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
5546
5547 // To see if the truncation caused an overflow, we will extend
5548 // the result and then compare it to the original result.
5549 llvm::Value *ResultTruncExt = Builder.CreateIntCast(
5550 ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
5551 llvm::Value *TruncationOverflow =
5552 Builder.CreateICmpNE(Result, ResultTruncExt);
5553
5554 Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
5555 Result = ResultTrunc;
5556 }
5557
5558 // Finally, store the result using the pointer.
5559 bool isVolatile =
5560 ResultArg->getType()->getPointeeType().isVolatileQualified();
5561 Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
5562
5563 return RValue::get(Overflow);
5564 }
5565
5566 case Builtin::BI__builtin_uadd_overflow:
5567 case Builtin::BI__builtin_uaddl_overflow:
5568 case Builtin::BI__builtin_uaddll_overflow:
5569 case Builtin::BI__builtin_usub_overflow:
5570 case Builtin::BI__builtin_usubl_overflow:
5571 case Builtin::BI__builtin_usubll_overflow:
5572 case Builtin::BI__builtin_umul_overflow:
5573 case Builtin::BI__builtin_umull_overflow:
5574 case Builtin::BI__builtin_umulll_overflow:
5575 case Builtin::BI__builtin_sadd_overflow:
5576 case Builtin::BI__builtin_saddl_overflow:
5577 case Builtin::BI__builtin_saddll_overflow:
5578 case Builtin::BI__builtin_ssub_overflow:
5579 case Builtin::BI__builtin_ssubl_overflow:
5580 case Builtin::BI__builtin_ssubll_overflow:
5581 case Builtin::BI__builtin_smul_overflow:
5582 case Builtin::BI__builtin_smull_overflow:
5583 case Builtin::BI__builtin_smulll_overflow: {
5584
5585 // We translate all of these builtins directly to the relevant llvm IR node.
5586
5587 // Scalarize our inputs.
5588 llvm::Value *X = EmitScalarExpr(E->getArg(0));
5589 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
5590 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
5591
5592 // Decide which of the overflow intrinsics we are lowering to:
5593 Intrinsic::ID IntrinsicId;
5594 switch (BuiltinID) {
5595 default: llvm_unreachable("Unknown overflow builtin id.");
5596 case Builtin::BI__builtin_uadd_overflow:
5597 case Builtin::BI__builtin_uaddl_overflow:
5598 case Builtin::BI__builtin_uaddll_overflow:
5599 IntrinsicId = Intrinsic::uadd_with_overflow;
5600 break;
5601 case Builtin::BI__builtin_usub_overflow:
5602 case Builtin::BI__builtin_usubl_overflow:
5603 case Builtin::BI__builtin_usubll_overflow:
5604 IntrinsicId = Intrinsic::usub_with_overflow;
5605 break;
5606 case Builtin::BI__builtin_umul_overflow:
5607 case Builtin::BI__builtin_umull_overflow:
5608 case Builtin::BI__builtin_umulll_overflow:
5609 IntrinsicId = Intrinsic::umul_with_overflow;
5610 break;
5611 case Builtin::BI__builtin_sadd_overflow:
5612 case Builtin::BI__builtin_saddl_overflow:
5613 case Builtin::BI__builtin_saddll_overflow:
5614 IntrinsicId = Intrinsic::sadd_with_overflow;
5615 break;
5616 case Builtin::BI__builtin_ssub_overflow:
5617 case Builtin::BI__builtin_ssubl_overflow:
5618 case Builtin::BI__builtin_ssubll_overflow:
5619 IntrinsicId = Intrinsic::ssub_with_overflow;
5620 break;
5621 case Builtin::BI__builtin_smul_overflow:
5622 case Builtin::BI__builtin_smull_overflow:
5623 case Builtin::BI__builtin_smulll_overflow:
5624 IntrinsicId = Intrinsic::smul_with_overflow;
5625 break;
5626 }
5627
5628
5629 llvm::Value *Carry;
5630 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
5631 Builder.CreateStore(Sum, SumOutPtr);
5632
5633 return RValue::get(Carry);
5634 }
5635 case Builtin::BIaddressof:
5636 case Builtin::BI__addressof:
5637 case Builtin::BI__builtin_addressof:
5638 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
5639 case Builtin::BI__builtin_function_start:
5640 return RValue::get(CGM.GetFunctionStart(
5641 E->getArg(0)->getAsBuiltinConstantDeclRef(CGM.getContext())));
5642 case Builtin::BI__builtin_operator_new:
5644 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
5645 case Builtin::BI__builtin_operator_delete:
5647 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
5648 return RValue::get(nullptr);
5649
5650 case Builtin::BI__builtin_is_aligned:
5651 return EmitBuiltinIsAligned(E);
5652 case Builtin::BI__builtin_align_up:
5653 return EmitBuiltinAlignTo(E, true);
5654 case Builtin::BI__builtin_align_down:
5655 return EmitBuiltinAlignTo(E, false);
5656
5657 case Builtin::BI__noop:
5658 // __noop always evaluates to an integer literal zero.
5659 return RValue::get(ConstantInt::get(IntTy, 0));
5660 case Builtin::BI__builtin_call_with_static_chain: {
5661 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
5662 const Expr *Chain = E->getArg(1);
5663 return EmitCall(Call->getCallee()->getType(),
5664 EmitCallee(Call->getCallee()), Call, ReturnValue,
5665 EmitScalarExpr(Chain));
5666 }
5667 case Builtin::BI_InterlockedExchange8:
5668 case Builtin::BI_InterlockedExchange16:
5669 case Builtin::BI_InterlockedExchange:
5670 case Builtin::BI_InterlockedExchangePointer:
5671 return RValue::get(
5673 case Builtin::BI_InterlockedCompareExchangePointer:
5674 return RValue::get(
5676 case Builtin::BI_InterlockedCompareExchangePointer_nf:
5677 return RValue::get(
5679 case Builtin::BI_InterlockedCompareExchange8:
5680 case Builtin::BI_InterlockedCompareExchange16:
5681 case Builtin::BI_InterlockedCompareExchange:
5682 case Builtin::BI_InterlockedCompareExchange64:
5683 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
5684 case Builtin::BI_InterlockedIncrement16:
5685 case Builtin::BI_InterlockedIncrement:
5686 return RValue::get(
5688 case Builtin::BI_InterlockedDecrement16:
5689 case Builtin::BI_InterlockedDecrement:
5690 return RValue::get(
5692 case Builtin::BI_InterlockedAnd8:
5693 case Builtin::BI_InterlockedAnd16:
5694 case Builtin::BI_InterlockedAnd:
5696 case Builtin::BI_InterlockedExchangeAdd8:
5697 case Builtin::BI_InterlockedExchangeAdd16:
5698 case Builtin::BI_InterlockedExchangeAdd:
5699 return RValue::get(
5701 case Builtin::BI_InterlockedExchangeSub8:
5702 case Builtin::BI_InterlockedExchangeSub16:
5703 case Builtin::BI_InterlockedExchangeSub:
5704 return RValue::get(
5706 case Builtin::BI_InterlockedOr8:
5707 case Builtin::BI_InterlockedOr16:
5708 case Builtin::BI_InterlockedOr:
5710 case Builtin::BI_InterlockedXor8:
5711 case Builtin::BI_InterlockedXor16:
5712 case Builtin::BI_InterlockedXor:
5714
5715 case Builtin::BI_bittest64:
5716 case Builtin::BI_bittest:
5717 case Builtin::BI_bittestandcomplement64:
5718 case Builtin::BI_bittestandcomplement:
5719 case Builtin::BI_bittestandreset64:
5720 case Builtin::BI_bittestandreset:
5721 case Builtin::BI_bittestandset64:
5722 case Builtin::BI_bittestandset:
5723 case Builtin::BI_interlockedbittestandreset:
5724 case Builtin::BI_interlockedbittestandreset64:
5725 case Builtin::BI_interlockedbittestandreset64_acq:
5726 case Builtin::BI_interlockedbittestandreset64_rel:
5727 case Builtin::BI_interlockedbittestandreset64_nf:
5728 case Builtin::BI_interlockedbittestandset64:
5729 case Builtin::BI_interlockedbittestandset64_acq:
5730 case Builtin::BI_interlockedbittestandset64_rel:
5731 case Builtin::BI_interlockedbittestandset64_nf:
5732 case Builtin::BI_interlockedbittestandset:
5733 case Builtin::BI_interlockedbittestandset_acq:
5734 case Builtin::BI_interlockedbittestandset_rel:
5735 case Builtin::BI_interlockedbittestandset_nf:
5736 case Builtin::BI_interlockedbittestandreset_acq:
5737 case Builtin::BI_interlockedbittestandreset_rel:
5738 case Builtin::BI_interlockedbittestandreset_nf:
5739 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
5740
5741 // These builtins exist to emit regular volatile loads and stores not
5742 // affected by the -fms-volatile setting.
5743 case Builtin::BI__iso_volatile_load8:
5744 case Builtin::BI__iso_volatile_load16:
5745 case Builtin::BI__iso_volatile_load32:
5746 case Builtin::BI__iso_volatile_load64:
5747 return RValue::get(EmitISOVolatileLoad(*this, E));
5748 case Builtin::BI__iso_volatile_store8:
5749 case Builtin::BI__iso_volatile_store16:
5750 case Builtin::BI__iso_volatile_store32:
5751 case Builtin::BI__iso_volatile_store64:
5752 return RValue::get(EmitISOVolatileStore(*this, E));
5753
5754 case Builtin::BI__builtin_ptrauth_sign_constant:
5755 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
5756
5757 case Builtin::BI__builtin_ptrauth_auth:
5758 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5759 case Builtin::BI__builtin_ptrauth_auth_load_relative_and_sign:
5760 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5761 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5762 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5763 case Builtin::BI__builtin_ptrauth_strip: {
5764 // Emit the arguments.
5766 for (auto argExpr : E->arguments())
5767 Args.push_back(EmitScalarExpr(argExpr));
5768
5769 // Cast the value to intptr_t, saving its original type.
5770 llvm::Type *OrigValueType = Args[0]->getType();
5771 if (OrigValueType->isPointerTy())
5772 Args[0] = Builder.CreatePtrToInt(Args[0], IntPtrTy);
5773
5774 switch (BuiltinID) {
5775 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5776 case Builtin::BI__builtin_ptrauth_auth_load_relative_and_sign:
5777 if (Args[4]->getType()->isPointerTy())
5778 Args[4] = Builder.CreatePtrToInt(Args[4], IntPtrTy);
5779 [[fallthrough]];
5780
5781 case Builtin::BI__builtin_ptrauth_auth:
5782 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5783 if (Args[2]->getType()->isPointerTy())
5784 Args[2] = Builder.CreatePtrToInt(Args[2], IntPtrTy);
5785 break;
5786
5787 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5788 if (Args[1]->getType()->isPointerTy())
5789 Args[1] = Builder.CreatePtrToInt(Args[1], IntPtrTy);
5790 break;
5791
5792 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5793 case Builtin::BI__builtin_ptrauth_strip:
5794 break;
5795 }
5796
5797 // Call the intrinsic.
5798 auto IntrinsicID = [&]() -> unsigned {
5799 switch (BuiltinID) {
5800 case Builtin::BI__builtin_ptrauth_auth:
5801 return Intrinsic::ptrauth_auth;
5802 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5803 return Intrinsic::ptrauth_resign;
5804 case Builtin::BI__builtin_ptrauth_auth_load_relative_and_sign:
5805 return Intrinsic::ptrauth_resign_load_relative;
5806 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5807 return Intrinsic::ptrauth_blend;
5808 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5809 return Intrinsic::ptrauth_sign_generic;
5810 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5811 return Intrinsic::ptrauth_sign;
5812 case Builtin::BI__builtin_ptrauth_strip:
5813 return Intrinsic::ptrauth_strip;
5814 }
5815 llvm_unreachable("bad ptrauth intrinsic");
5816 }();
5817 auto Intrinsic = CGM.getIntrinsic(IntrinsicID);
5818 llvm::Value *Result = EmitRuntimeCall(Intrinsic, Args);
5819
5820 if (BuiltinID != Builtin::BI__builtin_ptrauth_sign_generic_data &&
5821 BuiltinID != Builtin::BI__builtin_ptrauth_blend_discriminator &&
5822 OrigValueType->isPointerTy()) {
5823 Result = Builder.CreateIntToPtr(Result, OrigValueType);
5824 }
5825 return RValue::get(Result);
5826 }
5827
5828 case Builtin::BI__builtin_get_vtable_pointer: {
5829 const Expr *Target = E->getArg(0);
5830 QualType TargetType = Target->getType();
5831 const CXXRecordDecl *Decl = TargetType->getPointeeCXXRecordDecl();
5832 assert(Decl);
5833 auto ThisAddress = EmitPointerWithAlignment(Target);
5834 assert(ThisAddress.isValid());
5835 llvm::Value *VTablePointer =
5837 return RValue::get(VTablePointer);
5838 }
5839
5840 case Builtin::BI__exception_code:
5841 case Builtin::BI_exception_code:
5843 case Builtin::BI__exception_info:
5844 case Builtin::BI_exception_info:
5846 case Builtin::BI__abnormal_termination:
5847 case Builtin::BI_abnormal_termination:
5849 case Builtin::BI_setjmpex:
5850 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
5851 E->getArg(0)->getType()->isPointerType())
5852 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
5853 break;
5854 case Builtin::BI_setjmp:
5855 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
5856 E->getArg(0)->getType()->isPointerType()) {
5857 if (getTarget().getTriple().getArch() == llvm::Triple::x86)
5858 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
5859 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
5860 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
5861 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
5862 }
5863 break;
5864
5865 // C++ std:: builtins.
5866 case Builtin::BImove:
5867 case Builtin::BImove_if_noexcept:
5868 case Builtin::BIforward:
5869 case Builtin::BIforward_like:
5870 case Builtin::BIas_const:
5871 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
5872 case Builtin::BI__GetExceptionInfo: {
5873 if (llvm::GlobalVariable *GV =
5874 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
5875 return RValue::get(GV);
5876 break;
5877 }
5878
5879 case Builtin::BI__fastfail:
5881
5882 case Builtin::BI__builtin_coro_id:
5883 return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
5884 case Builtin::BI__builtin_coro_promise:
5885 return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
5886 case Builtin::BI__builtin_coro_resume:
5887 EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
5888 return RValue::get(nullptr);
5889 case Builtin::BI__builtin_coro_frame:
5890 return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
5891 case Builtin::BI__builtin_coro_noop:
5892 return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
5893 case Builtin::BI__builtin_coro_free:
5894 return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
5895 case Builtin::BI__builtin_coro_destroy:
5896 EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
5897 return RValue::get(nullptr);
5898 case Builtin::BI__builtin_coro_done:
5899 return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
5900 case Builtin::BI__builtin_coro_alloc:
5901 return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
5902 case Builtin::BI__builtin_coro_begin:
5903 return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
5904 case Builtin::BI__builtin_coro_end:
5905 return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
5906 case Builtin::BI__builtin_coro_suspend:
5907 return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
5908 case Builtin::BI__builtin_coro_size:
5909 return EmitCoroutineIntrinsic(E, Intrinsic::coro_size);
5910 case Builtin::BI__builtin_coro_align:
5911 return EmitCoroutineIntrinsic(E, Intrinsic::coro_align);
5912
5913 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
5914 case Builtin::BIread_pipe:
5915 case Builtin::BIwrite_pipe: {
5916 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5917 *Arg1 = EmitScalarExpr(E->getArg(1));
5918 CGOpenCLRuntime OpenCLRT(CGM);
5919 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5920 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5921
5922 // Type of the generic packet parameter.
5923 unsigned GenericAS =
5925 llvm::Type *I8PTy = llvm::PointerType::get(getLLVMContext(), GenericAS);
5926
5927 // Testing which overloaded version we should generate the call for.
5928 if (2U == E->getNumArgs()) {
5929 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
5930 : "__write_pipe_2";
5931 // Creating a generic function type to be able to call with any builtin or
5932 // user defined type.
5933 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
5934 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5935 Value *ACast = Builder.CreateAddrSpaceCast(Arg1, I8PTy);
5936 return RValue::get(
5937 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5938 {Arg0, ACast, PacketSize, PacketAlign}));
5939 } else {
5940 assert(4 == E->getNumArgs() &&
5941 "Illegal number of parameters to pipe function");
5942 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
5943 : "__write_pipe_4";
5944
5945 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
5946 Int32Ty, Int32Ty};
5947 Value *Arg2 = EmitScalarExpr(E->getArg(2)),
5948 *Arg3 = EmitScalarExpr(E->getArg(3));
5949 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5950 Value *ACast = Builder.CreateAddrSpaceCast(Arg3, I8PTy);
5951 // We know the third argument is an integer type, but we may need to cast
5952 // it to i32.
5953 if (Arg2->getType() != Int32Ty)
5954 Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
5955 return RValue::get(
5956 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5957 {Arg0, Arg1, Arg2, ACast, PacketSize, PacketAlign}));
5958 }
5959 }
5960 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
5961 // functions
5962 case Builtin::BIreserve_read_pipe:
5963 case Builtin::BIreserve_write_pipe:
5964 case Builtin::BIwork_group_reserve_read_pipe:
5965 case Builtin::BIwork_group_reserve_write_pipe:
5966 case Builtin::BIsub_group_reserve_read_pipe:
5967 case Builtin::BIsub_group_reserve_write_pipe: {
5968 // Composing the mangled name for the function.
5969 const char *Name;
5970 if (BuiltinID == Builtin::BIreserve_read_pipe)
5971 Name = "__reserve_read_pipe";
5972 else if (BuiltinID == Builtin::BIreserve_write_pipe)
5973 Name = "__reserve_write_pipe";
5974 else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
5975 Name = "__work_group_reserve_read_pipe";
5976 else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
5977 Name = "__work_group_reserve_write_pipe";
5978 else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
5979 Name = "__sub_group_reserve_read_pipe";
5980 else
5981 Name = "__sub_group_reserve_write_pipe";
5982
5983 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5984 *Arg1 = EmitScalarExpr(E->getArg(1));
5985 llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
5986 CGOpenCLRuntime OpenCLRT(CGM);
5987 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5988 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5989
5990 // Building the generic function prototype.
5991 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
5992 llvm::FunctionType *FTy =
5993 llvm::FunctionType::get(ReservedIDTy, ArgTys, false);
5994 // We know the second argument is an integer type, but we may need to cast
5995 // it to i32.
5996 if (Arg1->getType() != Int32Ty)
5997 Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
5998 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5999 {Arg0, Arg1, PacketSize, PacketAlign}));
6000 }
6001 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
6002 // functions
6003 case Builtin::BIcommit_read_pipe:
6004 case Builtin::BIcommit_write_pipe:
6005 case Builtin::BIwork_group_commit_read_pipe:
6006 case Builtin::BIwork_group_commit_write_pipe:
6007 case Builtin::BIsub_group_commit_read_pipe:
6008 case Builtin::BIsub_group_commit_write_pipe: {
6009 const char *Name;
6010 if (BuiltinID == Builtin::BIcommit_read_pipe)
6011 Name = "__commit_read_pipe";
6012 else if (BuiltinID == Builtin::BIcommit_write_pipe)
6013 Name = "__commit_write_pipe";
6014 else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
6015 Name = "__work_group_commit_read_pipe";
6016 else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
6017 Name = "__work_group_commit_write_pipe";
6018 else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
6019 Name = "__sub_group_commit_read_pipe";
6020 else
6021 Name = "__sub_group_commit_write_pipe";
6022
6023 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
6024 *Arg1 = EmitScalarExpr(E->getArg(1));
6025 CGOpenCLRuntime OpenCLRT(CGM);
6026 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
6027 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
6028
6029 // Building the generic function prototype.
6030 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
6031 llvm::FunctionType *FTy = llvm::FunctionType::get(
6032 llvm::Type::getVoidTy(getLLVMContext()), ArgTys, false);
6033
6034 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6035 {Arg0, Arg1, PacketSize, PacketAlign}));
6036 }
6037 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
6038 case Builtin::BIget_pipe_num_packets:
6039 case Builtin::BIget_pipe_max_packets: {
6040 const char *BaseName;
6041 const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
6042 if (BuiltinID == Builtin::BIget_pipe_num_packets)
6043 BaseName = "__get_pipe_num_packets";
6044 else
6045 BaseName = "__get_pipe_max_packets";
6046 std::string Name = std::string(BaseName) +
6047 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
6048
6049 // Building the generic function prototype.
6050 Value *Arg0 = EmitScalarExpr(E->getArg(0));
6051 CGOpenCLRuntime OpenCLRT(CGM);
6052 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
6053 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
6054 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
6055 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6056
6057 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6058 {Arg0, PacketSize, PacketAlign}));
6059 }
6060
6061 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
6062 case Builtin::BIto_global:
6063 case Builtin::BIto_local:
6064 case Builtin::BIto_private: {
6065 auto Arg0 = EmitScalarExpr(E->getArg(0));
6066 auto NewArgT = llvm::PointerType::get(
6068 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
6069 auto NewRetT = llvm::PointerType::get(
6071 CGM.getContext().getTargetAddressSpace(
6073 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
6074 llvm::Value *NewArg;
6075 if (Arg0->getType()->getPointerAddressSpace() !=
6076 NewArgT->getPointerAddressSpace())
6077 NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
6078 else
6079 NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
6080 auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
6081 auto NewCall =
6082 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
6083 return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
6084 ConvertType(E->getType())));
6085 }
6086
6087 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
6088 // Table 6.13.17.1 specifies four overload forms of enqueue_kernel.
6089 // The code below expands the builtin call to a call to one of the following
6090 // functions that an OpenCL runtime library will have to provide:
6091 // __enqueue_kernel_basic
6092 // __enqueue_kernel_varargs
6093 // __enqueue_kernel_basic_events
6094 // __enqueue_kernel_events_varargs
6095 case Builtin::BIenqueue_kernel: {
6096 StringRef Name; // Generated function call name
6097 unsigned NumArgs = E->getNumArgs();
6098
6099 llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
6100 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6101 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6102
6103 llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
6104 llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
6105 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
6106 llvm::Value *Range = NDRangeL.getAddress().emitRawPointer(*this);
6107
6108 // FIXME: Look through the addrspacecast which may exist to the stack
6109 // temporary as a hack.
6110 //
6111 // This is hardcoding the assumed ABI of the target function. This assumes
6112 // direct passing for every argument except NDRange, which is assumed to be
6113 // byval or byref indirect passed.
6114 //
6115 // This should be fixed to query a signature from CGOpenCLRuntime, and go
6116 // through EmitCallArgs to get the correct target ABI.
6117 Range = Range->stripPointerCasts();
6118
6119 llvm::Type *RangePtrTy = Range->getType();
6120
6121 if (NumArgs == 4) {
6122 // The most basic form of the call with parameters:
6123 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
6124 Name = "__enqueue_kernel_basic";
6125 llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangePtrTy, GenericVoidPtrTy,
6126 GenericVoidPtrTy};
6127 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6128
6129 auto Info =
6130 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
6131 llvm::Value *Kernel =
6132 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6133 llvm::Value *Block =
6134 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6135
6136 auto RTCall = EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6137 {Queue, Flags, Range, Kernel, Block});
6138 return RValue::get(RTCall);
6139 }
6140 assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
6141
6142 // Create a temporary array to hold the sizes of local pointer arguments
6143 // for the block. \p First is the position of the first size argument.
6144 auto CreateArrayForSizeVar =
6145 [=](unsigned First) -> std::pair<llvm::Value *, llvm::Value *> {
6146 llvm::APInt ArraySize(32, NumArgs - First);
6148 getContext().getSizeType(), ArraySize, nullptr,
6150 /*IndexTypeQuals=*/0);
6151 auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
6152 llvm::Value *TmpPtr = Tmp.getPointer();
6153 // The EmitLifetime* pair expect a naked Alloca as their last argument,
6154 // however for cases where the default AS is not the Alloca AS, Tmp is
6155 // actually the Alloca ascasted to the default AS, hence the
6156 // stripPointerCasts()
6157 llvm::Value *Alloca = TmpPtr->stripPointerCasts();
6158 llvm::Value *ElemPtr;
6159 EmitLifetimeStart(Alloca);
6160 // Each of the following arguments specifies the size of the corresponding
6161 // argument passed to the enqueued block.
6162 auto *Zero = llvm::ConstantInt::get(IntTy, 0);
6163 for (unsigned I = First; I < NumArgs; ++I) {
6164 auto *Index = llvm::ConstantInt::get(IntTy, I - First);
6165 auto *GEP =
6166 Builder.CreateGEP(Tmp.getElementType(), Alloca, {Zero, Index});
6167 if (I == First)
6168 ElemPtr = GEP;
6169 auto *V =
6170 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
6171 Builder.CreateAlignedStore(
6172 V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
6173 }
6174 // Return the Alloca itself rather than a potential ascast as this is only
6175 // used by the paired EmitLifetimeEnd.
6176 return {ElemPtr, Alloca};
6177 };
6178
6179 // Could have events and/or varargs.
6180 if (E->getArg(3)->getType()->isBlockPointerType()) {
6181 // No events passed, but has variadic arguments.
6182 Name = "__enqueue_kernel_varargs";
6183 auto Info =
6184 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
6185 llvm::Value *Kernel =
6186 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6187 auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6188 auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(4);
6189
6190 // Create a vector of the arguments, as well as a constant value to
6191 // express to the runtime the number of variadic arguments.
6192 llvm::Value *const Args[] = {Queue, Flags,
6193 Range, Kernel,
6194 Block, ConstantInt::get(IntTy, NumArgs - 4),
6195 ElemPtr};
6196 llvm::Type *const ArgTys[] = {
6197 QueueTy, IntTy, RangePtrTy, GenericVoidPtrTy,
6198 GenericVoidPtrTy, IntTy, ElemPtr->getType()};
6199
6200 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6201 auto Call = RValue::get(
6202 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6203 EmitLifetimeEnd(TmpPtr);
6204 return Call;
6205 }
6206 // Any calls now have event arguments passed.
6207 if (NumArgs >= 7) {
6208 llvm::PointerType *PtrTy = llvm::PointerType::get(
6209 CGM.getLLVMContext(),
6210 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
6211
6212 llvm::Value *NumEvents =
6213 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
6214
6215 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
6216 // to be a null pointer constant (including `0` literal), we can take it
6217 // into account and emit null pointer directly.
6218 llvm::Value *EventWaitList = nullptr;
6219 if (E->getArg(4)->isNullPointerConstant(
6221 EventWaitList = llvm::ConstantPointerNull::get(PtrTy);
6222 } else {
6223 EventWaitList =
6224 E->getArg(4)->getType()->isArrayType()
6226 : EmitScalarExpr(E->getArg(4));
6227 // Convert to generic address space.
6228 EventWaitList = Builder.CreatePointerCast(EventWaitList, PtrTy);
6229 }
6230 llvm::Value *EventRet = nullptr;
6231 if (E->getArg(5)->isNullPointerConstant(
6233 EventRet = llvm::ConstantPointerNull::get(PtrTy);
6234 } else {
6235 EventRet =
6236 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), PtrTy);
6237 }
6238
6239 auto Info =
6240 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
6241 llvm::Value *Kernel =
6242 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6243 llvm::Value *Block =
6244 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6245
6246 std::vector<llvm::Type *> ArgTys = {
6247 QueueTy, Int32Ty, RangePtrTy, Int32Ty,
6248 PtrTy, PtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
6249
6250 std::vector<llvm::Value *> Args = {Queue, Flags, Range,
6251 NumEvents, EventWaitList, EventRet,
6252 Kernel, Block};
6253
6254 if (NumArgs == 7) {
6255 // Has events but no variadics.
6256 Name = "__enqueue_kernel_basic_events";
6257 llvm::FunctionType *FTy =
6258 llvm::FunctionType::get(Int32Ty, ArgTys, false);
6259 return RValue::get(
6260 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6261 }
6262 // Has event info and variadics
6263 // Pass the number of variadics to the runtime function too.
6264 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
6265 ArgTys.push_back(Int32Ty);
6266 Name = "__enqueue_kernel_events_varargs";
6267
6268 auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(7);
6269 Args.push_back(ElemPtr);
6270 ArgTys.push_back(ElemPtr->getType());
6271
6272 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6273 auto Call = RValue::get(
6274 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6275 EmitLifetimeEnd(TmpPtr);
6276 return Call;
6277 }
6278 llvm_unreachable("Unexpected enqueue_kernel signature");
6279 }
6280 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
6281 // parameter.
6282 case Builtin::BIget_kernel_work_group_size: {
6283 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6284 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6285 auto Info =
6286 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
6287 Value *Kernel =
6288 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6289 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6291 CGM.CreateRuntimeFunction(
6292 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
6293 false),
6294 "__get_kernel_work_group_size_impl"),
6295 {Kernel, Arg}));
6296 }
6297 case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
6298 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6299 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6300 auto Info =
6301 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
6302 Value *Kernel =
6303 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6304 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6306 CGM.CreateRuntimeFunction(
6307 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
6308 false),
6309 "__get_kernel_preferred_work_group_size_multiple_impl"),
6310 {Kernel, Arg}));
6311 }
6312 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
6313 case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
6314 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6315 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6316 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
6317 llvm::Value *NDRange = NDRangeL.getAddress().emitRawPointer(*this);
6318 auto Info =
6319 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
6320 Value *Kernel =
6321 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6322 Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6323 const char *Name =
6324 BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
6325 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
6326 : "__get_kernel_sub_group_count_for_ndrange_impl";
6328 CGM.CreateRuntimeFunction(
6329 llvm::FunctionType::get(
6330 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
6331 false),
6332 Name),
6333 {NDRange, Kernel, Block}));
6334 }
6335 case Builtin::BI__builtin_store_half:
6336 case Builtin::BI__builtin_store_halff: {
6337 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
6338 Value *Val = EmitScalarExpr(E->getArg(0));
6340 Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
6341 Builder.CreateStore(HalfVal, Address);
6342 return RValue::get(nullptr);
6343 }
6344 case Builtin::BI__builtin_load_half: {
6346 Value *HalfVal = Builder.CreateLoad(Address);
6347 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
6348 }
6349 case Builtin::BI__builtin_load_halff: {
6351 Value *HalfVal = Builder.CreateLoad(Address);
6352 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
6353 }
6354 case Builtin::BI__builtin_printf:
6355 case Builtin::BIprintf:
6356 if (getTarget().getTriple().isNVPTX() ||
6357 getTarget().getTriple().isAMDGCN() ||
6358 (getTarget().getTriple().isSPIRV() &&
6359 getTarget().getTriple().getVendor() == Triple::VendorType::AMD)) {
6360 if (getTarget().getTriple().isNVPTX())
6362 if ((getTarget().getTriple().isAMDGCN() ||
6363 getTarget().getTriple().isSPIRV()) &&
6364 getLangOpts().HIP)
6366 }
6367
6368 break;
6369 case Builtin::BI__builtin_canonicalize:
6370 case Builtin::BI__builtin_canonicalizef:
6371 case Builtin::BI__builtin_canonicalizef16:
6372 case Builtin::BI__builtin_canonicalizel:
6373 return RValue::get(
6374 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::canonicalize));
6375
6376 case Builtin::BI__builtin_thread_pointer: {
6377 if (!getContext().getTargetInfo().isTLSSupported())
6378 CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
6379
6380 return RValue::get(Builder.CreateIntrinsic(llvm::Intrinsic::thread_pointer,
6381 {GlobalsInt8PtrTy}, {}));
6382 }
6383 case Builtin::BI__builtin_os_log_format:
6384 return emitBuiltinOSLogFormat(*E);
6385
6386 case Builtin::BI__xray_customevent: {
6388 return RValue::getIgnored();
6389
6390 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
6392 return RValue::getIgnored();
6393
6394 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
6395 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
6396 return RValue::getIgnored();
6397
6398 Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
6399 auto FTy = F->getFunctionType();
6400 auto Arg0 = E->getArg(0);
6401 auto Arg0Val = EmitScalarExpr(Arg0);
6402 auto Arg0Ty = Arg0->getType();
6403 auto PTy0 = FTy->getParamType(0);
6404 if (PTy0 != Arg0Val->getType()) {
6405 if (Arg0Ty->isArrayType())
6406 Arg0Val = EmitArrayToPointerDecay(Arg0).emitRawPointer(*this);
6407 else
6408 Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
6409 }
6410 auto Arg1 = EmitScalarExpr(E->getArg(1));
6411 auto PTy1 = FTy->getParamType(1);
6412 if (PTy1 != Arg1->getType())
6413 Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
6414 return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
6415 }
6416
6417 case Builtin::BI__xray_typedevent: {
6418 // TODO: There should be a way to always emit events even if the current
6419 // function is not instrumented. Losing events in a stream can cripple
6420 // a trace.
6422 return RValue::getIgnored();
6423
6424 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
6426 return RValue::getIgnored();
6427
6428 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
6429 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
6430 return RValue::getIgnored();
6431
6432 Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
6433 auto FTy = F->getFunctionType();
6434 auto Arg0 = EmitScalarExpr(E->getArg(0));
6435 auto PTy0 = FTy->getParamType(0);
6436 if (PTy0 != Arg0->getType())
6437 Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
6438 auto Arg1 = E->getArg(1);
6439 auto Arg1Val = EmitScalarExpr(Arg1);
6440 auto Arg1Ty = Arg1->getType();
6441 auto PTy1 = FTy->getParamType(1);
6442 if (PTy1 != Arg1Val->getType()) {
6443 if (Arg1Ty->isArrayType())
6444 Arg1Val = EmitArrayToPointerDecay(Arg1).emitRawPointer(*this);
6445 else
6446 Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
6447 }
6448 auto Arg2 = EmitScalarExpr(E->getArg(2));
6449 auto PTy2 = FTy->getParamType(2);
6450 if (PTy2 != Arg2->getType())
6451 Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
6452 return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
6453 }
6454
6455 case Builtin::BI__builtin_ms_va_start:
6456 case Builtin::BI__builtin_ms_va_end:
6457 return RValue::get(
6459 BuiltinID == Builtin::BI__builtin_ms_va_start));
6460
6461 case Builtin::BI__builtin_ms_va_copy: {
6462 // Lower this manually. We can't reliably determine whether or not any
6463 // given va_copy() is for a Win64 va_list from the calling convention
6464 // alone, because it's legal to do this from a System V ABI function.
6465 // With opaque pointer types, we won't have enough information in LLVM
6466 // IR to determine this from the argument types, either. Best to do it
6467 // now, while we have enough information.
6468 Address DestAddr = EmitMSVAListRef(E->getArg(0));
6469 Address SrcAddr = EmitMSVAListRef(E->getArg(1));
6470
6471 DestAddr = DestAddr.withElementType(Int8PtrTy);
6472 SrcAddr = SrcAddr.withElementType(Int8PtrTy);
6473
6474 Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
6475 return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
6476 }
6477
6478 case Builtin::BI__builtin_get_device_side_mangled_name: {
6479 auto Name = CGM.getCUDARuntime().getDeviceSideName(
6480 cast<DeclRefExpr>(E->getArg(0)->IgnoreImpCasts())->getDecl());
6481 auto Str = CGM.GetAddrOfConstantCString(Name, "");
6482 return RValue::get(Str.getPointer());
6483 }
6484 }
6485
6486 // If this is an alias for a lib function (e.g. __builtin_sin), emit
6487 // the call using the normal call path, but using the unmangled
6488 // version of the function name.
6489 const auto &BI = getContext().BuiltinInfo;
6490 if (!shouldEmitBuiltinAsIR(BuiltinID, BI, *this) &&
6491 BI.isLibFunction(BuiltinID))
6492 return emitLibraryCall(*this, FD, E,
6493 CGM.getBuiltinLibFunction(FD, BuiltinID));
6494
6495 // If this is a predefined lib function (e.g. malloc), emit the call
6496 // using exactly the normal call path.
6497 if (BI.isPredefinedLibFunction(BuiltinID))
6498 return emitLibraryCall(*this, FD, E, CGM.getRawFunctionPointer(FD));
6499
6500 // Check that a call to a target specific builtin has the correct target
6501 // features.
6502 // This is down here to avoid non-target specific builtins, however, if
6503 // generic builtins start to require generic target features then we
6504 // can move this up to the beginning of the function.
6505 checkTargetFeatures(E, FD);
6506
6507 if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
6508 LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
6509
6510 // See if we have a target specific intrinsic.
6511 std::string Name = getContext().BuiltinInfo.getName(BuiltinID);
6512 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
6513 StringRef Prefix =
6514 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
6515 if (!Prefix.empty()) {
6516 IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin(Prefix.data(), Name);
6517 if (IntrinsicID == Intrinsic::not_intrinsic && Prefix == "spv" &&
6518 getTarget().getTriple().getOS() == llvm::Triple::OSType::AMDHSA)
6519 IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin("amdgcn", Name);
6520 // NOTE we don't need to perform a compatibility flag check here since the
6521 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
6522 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
6523 if (IntrinsicID == Intrinsic::not_intrinsic)
6524 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
6525 }
6526
6527 if (IntrinsicID != Intrinsic::not_intrinsic) {
6529
6530 // Find out if any arguments are required to be integer constant
6531 // expressions.
6532 unsigned ICEArguments = 0;
6534 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
6535 assert(Error == ASTContext::GE_None && "Should not codegen an error");
6536
6537 Function *F = CGM.getIntrinsic(IntrinsicID);
6538 llvm::FunctionType *FTy = F->getFunctionType();
6539
6540 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
6541 Value *ArgValue = EmitScalarOrConstFoldImmArg(ICEArguments, i, E);
6542 // If the intrinsic arg type is different from the builtin arg type
6543 // we need to do a bit cast.
6544 llvm::Type *PTy = FTy->getParamType(i);
6545 if (PTy != ArgValue->getType()) {
6546 // XXX - vector of pointers?
6547 if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
6548 if (PtrTy->getAddressSpace() !=
6549 ArgValue->getType()->getPointerAddressSpace()) {
6550 ArgValue = Builder.CreateAddrSpaceCast(
6551 ArgValue, llvm::PointerType::get(getLLVMContext(),
6552 PtrTy->getAddressSpace()));
6553 }
6554 }
6555
6556 // Cast vector type (e.g., v256i32) to x86_amx, this only happen
6557 // in amx intrinsics.
6558 if (PTy->isX86_AMXTy())
6559 ArgValue = Builder.CreateIntrinsic(Intrinsic::x86_cast_vector_to_tile,
6560 {ArgValue->getType()}, {ArgValue});
6561 else
6562 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
6563 }
6564
6565 Args.push_back(ArgValue);
6566 }
6567
6568 Value *V = Builder.CreateCall(F, Args);
6569 QualType BuiltinRetType = E->getType();
6570
6571 llvm::Type *RetTy = VoidTy;
6572 if (!BuiltinRetType->isVoidType())
6573 RetTy = ConvertType(BuiltinRetType);
6574
6575 if (RetTy != V->getType()) {
6576 // XXX - vector of pointers?
6577 if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
6578 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
6579 V = Builder.CreateAddrSpaceCast(
6580 V, llvm::PointerType::get(getLLVMContext(),
6581 PtrTy->getAddressSpace()));
6582 }
6583 }
6584
6585 // Cast x86_amx to vector type (e.g., v256i32), this only happen
6586 // in amx intrinsics.
6587 if (V->getType()->isX86_AMXTy())
6588 V = Builder.CreateIntrinsic(Intrinsic::x86_cast_tile_to_vector, {RetTy},
6589 {V});
6590 else
6591 V = Builder.CreateBitCast(V, RetTy);
6592 }
6593
6594 if (RetTy->isVoidTy())
6595 return RValue::get(nullptr);
6596
6597 return RValue::get(V);
6598 }
6599
6600 // Some target-specific builtins can have aggregate return values, e.g.
6601 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
6602 // ReturnValue to be non-null, so that the target-specific emission code can
6603 // always just emit into it.
6605 if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) {
6606 Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp");
6607 ReturnValue = ReturnValueSlot(DestPtr, false);
6608 }
6609
6610 // Now see if we can emit a target-specific builtin.
6611 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
6612 switch (EvalKind) {
6613 case TEK_Scalar:
6614 if (V->getType()->isVoidTy())
6615 return RValue::get(nullptr);
6616 return RValue::get(V);
6617 case TEK_Aggregate:
6618 return RValue::getAggregate(ReturnValue.getAddress(),
6619 ReturnValue.isVolatile());
6620 case TEK_Complex:
6621 llvm_unreachable("No current target builtin returns complex");
6622 }
6623 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
6624 }
6625
6626 // EmitHLSLBuiltinExpr will check getLangOpts().HLSL
6627 if (Value *V = EmitHLSLBuiltinExpr(BuiltinID, E, ReturnValue)) {
6628 switch (EvalKind) {
6629 case TEK_Scalar:
6630 if (V->getType()->isVoidTy())
6631 return RValue::get(nullptr);
6632 return RValue::get(V);
6633 case TEK_Aggregate:
6634 return RValue::getAggregate(ReturnValue.getAddress(),
6635 ReturnValue.isVolatile());
6636 case TEK_Complex:
6637 llvm_unreachable("No current hlsl builtin returns complex");
6638 }
6639 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
6640 }
6641
6642 if (getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice)
6643 return EmitHipStdParUnsupportedBuiltin(this, FD);
6644
6645 ErrorUnsupported(E, "builtin function");
6646
6647 // Unknown builtin, for now just dump it out and return undef.
6648 return GetUndefRValue(E->getType());
6649}
6650
6651namespace {
6652struct BuiltinAlignArgs {
6653 llvm::Value *Src = nullptr;
6654 llvm::Type *SrcType = nullptr;
6655 llvm::Value *Alignment = nullptr;
6656 llvm::Value *Mask = nullptr;
6657 llvm::IntegerType *IntType = nullptr;
6658
6659 BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
6660 QualType AstType = E->getArg(0)->getType();
6661 if (AstType->isArrayType())
6662 Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(CGF);
6663 else
6664 Src = CGF.EmitScalarExpr(E->getArg(0));
6665 SrcType = Src->getType();
6666 if (SrcType->isPointerTy()) {
6667 IntType = IntegerType::get(
6668 CGF.getLLVMContext(),
6669 CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType));
6670 } else {
6671 assert(SrcType->isIntegerTy());
6672 IntType = cast<llvm::IntegerType>(SrcType);
6673 }
6674 Alignment = CGF.EmitScalarExpr(E->getArg(1));
6675 Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment");
6676 auto *One = llvm::ConstantInt::get(IntType, 1);
6677 Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
6678 }
6679};
6680} // namespace
6681
6682/// Generate (x & (y-1)) == 0.
6684 BuiltinAlignArgs Args(E, *this);
6685 llvm::Value *SrcAddress = Args.Src;
6686 if (Args.SrcType->isPointerTy())
6687 SrcAddress =
6688 Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr");
6689 return RValue::get(Builder.CreateICmpEQ(
6690 Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"),
6691 llvm::Constant::getNullValue(Args.IntType), "is_aligned"));
6692}
6693
6694/// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
6695/// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
6696/// llvm.ptrmask intrinsic (with a GEP before in the align_up case).
6698 BuiltinAlignArgs Args(E, *this);
6699 llvm::Value *SrcForMask = Args.Src;
6700 if (AlignUp) {
6701 // When aligning up we have to first add the mask to ensure we go over the
6702 // next alignment value and then align down to the next valid multiple.
6703 // By adding the mask, we ensure that align_up on an already aligned
6704 // value will not change the value.
6705 if (Args.Src->getType()->isPointerTy()) {
6706 if (getLangOpts().PointerOverflowDefined)
6707 SrcForMask =
6708 Builder.CreateGEP(Int8Ty, SrcForMask, Args.Mask, "over_boundary");
6709 else
6710 SrcForMask = EmitCheckedInBoundsGEP(Int8Ty, SrcForMask, Args.Mask,
6711 /*SignedIndices=*/true,
6712 /*isSubtraction=*/false,
6713 E->getExprLoc(), "over_boundary");
6714 } else {
6715 SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
6716 }
6717 }
6718 // Invert the mask to only clear the lower bits.
6719 llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
6720 llvm::Value *Result = nullptr;
6721 if (Args.Src->getType()->isPointerTy()) {
6722 Result = Builder.CreateIntrinsic(
6723 Intrinsic::ptrmask, {Args.SrcType, Args.IntType},
6724 {SrcForMask, InvertedMask}, nullptr, "aligned_result");
6725 } else {
6726 Result = Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
6727 }
6728 assert(Result->getType() == Args.SrcType);
6729 return RValue::get(Result);
6730}
#define V(N, I)
static char bitActionToX86BTCode(BitTest::ActionKind A)
static Value * EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering SuccessOrdering)
static void emitSincosBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static CanQualType getOSLogArgType(ASTContext &C, int Size)
Get the argument type for arguments to os_log_helper.
static Value * EmitOverflowCheckedAbs(CodeGenFunction &CGF, const CallExpr *E, bool SanitizeOverflow)
static llvm::Value * EmitBitCountExpr(CodeGenFunction &CGF, const Expr *E)
static Value * tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID, Value *V)
static bool areBOSTypesCompatible(int From, int To)
Checks if using the result of __builtin_object_size(p, From) in place of __builtin_object_size(p,...
static std::pair< llvm::Value *, llvm::Value * > GetCountFieldAndIndex(CodeGenFunction &CGF, const MemberExpr *ME, const FieldDecl *ArrayFD, const FieldDecl *CountFD, const Expr *Idx, llvm::IntegerType *ResType, bool IsSigned)
Value * EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, QualType T, llvm::Type *ResultType)
static bool TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty, llvm::SmallPtrSetImpl< const Decl * > &Seen)
Value * MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E, bool ReturnBool, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering)
Utility to insert an atomic cmpxchg instruction.
static Value * EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)
static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind, const CallExpr *E)
MSVC handles setjmp a bit differently on different platforms.
#define MUTATE_LDBL(func)
static Value * emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty)
Determine if the specified type requires laundering by checking if it is a dynamic class type or cont...
static Value * EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E)
static Value * EmitTargetArchBuiltinExpr(CodeGenFunction *CGF, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
Definition CGBuiltin.cpp:72
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, Instruction::BinaryOps Op, bool Invert=false)
Utility to insert an atomic instruction based Intrinsic::ID and the expression node,...
static bool HasNoIndirectArgumentsOrResults(CGFunctionInfo const &FnInfo)
Checks no arguments or results are passed indirectly in the ABI (i.e.
Value * EmitToInt(CodeGenFunction &CGF, llvm::Value *V, QualType T, llvm::IntegerType *IntType)
Emit the conversions required to turn the given value into an integer of the given size.
static Value * emitBinaryExpMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID, Intrinsic::ID ConstrainedIntrinsicID)
static llvm::Value * EmitBitTestIntrinsic(CodeGenFunction &CGF, unsigned BuiltinID, const CallExpr *E)
Emit a _bittest* intrinsic.
static Value * EmitSignBit(CodeGenFunction &CGF, Value *V)
Emit the computation of the sign bit for a floating point value.
static Value * EmitFAbs(CodeGenFunction &CGF, Value *V)
EmitFAbs - Emit a call to @llvm.fabs().
static llvm::Value * EmitPositiveResultOrZero(CodeGenFunction &CGF, llvm::Value *Res, llvm::Value *Index, llvm::IntegerType *ResType, bool IsSigned)
static bool shouldEmitBuiltinAsIR(unsigned BuiltinID, const Builtin::Context &BI, const CodeGenFunction &CGF)
Some builtins do not have library implementation on some targets and are instead emitted as LLVM IRs ...
Definition CGBuiltin.cpp:48
static bool isSpecialUnsignedMultiplySignedResult(unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, WidthAndSignedness ResultInfo)
static llvm::Value * getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType)
static Value * emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static RValue EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, const clang::Expr *Op2, WidthAndSignedness Op2Info, const clang::Expr *ResultArg, QualType ResultQTy, WidthAndSignedness ResultInfo)
Emit a checked mixed-sign multiply.
static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID)
static RValue EmitBinaryAtomic(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E)
static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size, Align AlignmentInBytes)
static Value * EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering SuccessOrdering=AtomicOrdering::SequentiallyConsistent)
This function should be invoked to emit atomic cmpxchg for Microsoft's _InterlockedCompareExchange* i...
static bool isSpecialMixedSignMultiply(unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, WidthAndSignedness ResultInfo)
Determine if a binop is a checked mixed-sign multiply we can specialize.
static Value * emitFrexpBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static llvm::Value * emitModfBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static Value * EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E)
static const FieldDecl * FindFlexibleArrayMemberField(CodeGenFunction &CGF, ASTContext &Ctx, const RecordDecl *RD)
Find a struct's flexible array member.
static Value * EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E)
static RValue EmitHipStdParUnsupportedBuiltin(CodeGenFunction *CGF, const FunctionDecl *FD)
static llvm::Value * EmitX86BitTestIntrinsic(CodeGenFunction &CGF, BitTest BT, const CallExpr *E, Value *BitBase, Value *BitPos)
static RValue EmitCheckedUnsignedMultiplySignedResult(CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, const clang::Expr *Op2, WidthAndSignedness Op2Info, const clang::Expr *ResultArg, QualType ResultQTy, WidthAndSignedness ResultInfo)
Address CheckAtomicAlignment(CodeGenFunction &CGF, const CallExpr *E)
static Value * EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E)
static llvm::AtomicOrdering getBitTestAtomicOrdering(BitTest::InterlockingKind I)
static bool GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FD, int64_t &Offset)
Calculate the offset of a struct field.
Value * MakeBinaryAtomicValue(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, AtomicOrdering Ordering)
Utility to insert an atomic instruction based on Intrinsic::ID and the expression node.
llvm::Value * EmitOverflowIntrinsic(CodeGenFunction &CGF, const Intrinsic::ID IntrinsicID, llvm::Value *X, llvm::Value *Y, llvm::Value *&Carry)
Emit a call to llvm.
static Value * EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW)
static Value * EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)
llvm::Value * emitBuiltinWithOneOverloadedType(clang::CodeGen::CodeGenFunction &CGF, const clang::CallExpr *E, unsigned IntrinsicID, llvm::StringRef Name="")
Definition CGBuiltin.h:63
static mlir::Value emitBinaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitUnaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitLibraryCall(CIRGenFunction &cgf, const FunctionDecl *fd, const CallExpr *e, mlir::Operation *calleeValue)
static WidthAndSignedness getIntegerWidthAndSignedness(const clang::ASTContext &astContext, const clang::QualType type)
static struct WidthAndSignedness EncompassingIntegerType(ArrayRef< struct WidthAndSignedness > types)
TokenType getType() const
Returns the token's type, e.g.
FormatToken * Next
The next token in the unwrapped line.
#define X(type, name)
Definition Value.h:97
static unsigned getCharWidth(tok::TokenKind kind, const TargetInfo &Target)
llvm::MachO::Record Record
Definition MachO.h:31
SanitizerHandler
static QualType getPointeeType(const MemRegion *R)
__DEVICE__ float modf(float __x, float *__iptr)
__DEVICE__ double nan(const char *)
APSInt & getInt()
Definition APValue.h:508
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType VoidPtrTy
IdentifierTable & Idents
Definition ASTContext.h:797
Builtin::Context & BuiltinInfo
Definition ASTContext.h:799
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, const Expr *SizeExpr, ArraySizeModifier ASM, unsigned IndexTypeQuals) const
Return the unique reference to the type for a constant array of the specified element type.
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CanQualType VoidTy
QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error, unsigned *IntegerConstantArgs=nullptr) const
Return the type for the specified builtin.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:916
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
@ GE_None
No error.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
QualType getElementType() const
Definition TypeBase.h:3742
static std::unique_ptr< AtomicScopeModel > create(AtomicScopeModelKind K)
Create an atomic scope model by AtomicScopeModelKind.
Definition SyncScope.h:298
static bool isCommaOp(Opcode Opc)
Definition Expr.h:4144
Expr * getRHS() const
Definition Expr.h:4093
Holds information about both target-independent and target-specific builtins, allowing easy queries b...
Definition Builtins.h:235
bool shouldGenerateFPMathIntrinsic(unsigned BuiltinID, llvm::Triple Trip, std::optional< bool > ErrnoOverwritten, bool MathErrnoEnabled, bool HasOptNoneAttr, bool IsOptimizationEnabled) const
Determine whether we can generate LLVM intrinsics for the given builtin ID, based on whether it has s...
Definition Builtins.cpp:225
bool isConstWithoutErrnoAndExceptions(unsigned ID) const
Return true if this function has no side effects and doesn't read memory, except for possibly errno o...
Definition Builtins.h:412
std::string getName(unsigned ID) const
Return the identifier name for the specified builtin, e.g.
Definition Builtins.cpp:80
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3150
bool hasStoredFPFeatures() const
Definition Expr.h:3105
SourceLocation getBeginLoc() const
Definition Expr.h:3280
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3129
Expr * getCallee()
Definition Expr.h:3093
FPOptionsOverride getFPFeatures() const
Definition Expr.h:3245
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3137
arg_range arguments()
Definition Expr.h:3198
CastKind getCastKind() const
Definition Expr.h:3723
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
Address withAlignment(CharUnits NewAlignment) const
Return address with different alignment, but same pointer and element type.
Definition Address.h:269
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition CGBuilder.h:146
llvm::StoreInst * CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr, CharUnits Align, bool IsVolatile=false)
Definition CGBuilder.h:153
llvm::AtomicRMWInst * CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Ordering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition CGBuilder.h:190
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition CGBuilder.h:408
llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(Address Addr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition CGBuilder.h:179
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:118
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition CGBuilder.h:138
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition CGBuilder.h:356
All available information about a concrete callee.
Definition CGCall.h:63
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition CGCall.h:137
llvm::DILocation * CreateTrapFailureMessageFor(llvm::DebugLoc TrapLocation, StringRef Category, StringRef FailureMsg)
Create a debug location from TrapLocation that adds an artificial inline frame where the frame name i...
CGFunctionInfo - Class to encapsulate the information about a function definition.
MutableArrayRef< ArgInfo > arguments()
llvm::Value * getPipeElemAlign(const Expr *PipeArg)
llvm::Value * getPipeElemSize(const Expr *PipeArg)
llvm::StructType * getLLVMType() const
Return the "complete object" LLVM type associated with this record.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition CGCall.h:274
void add(RValue rvalue, QualType type)
Definition CGCall.h:302
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E)
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Definition CGClass.cpp:2838
RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E)
RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID)
llvm::Value * performAddrSpaceCast(llvm::Value *Src, llvm::Type *DestTy)
llvm::Value * EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, const CallExpr *E)
SanitizerSet SanOpts
Sanitizers enabled for this function.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
llvm::Value * GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Definition CGExpr.cpp:1193
llvm::Type * ConvertType(QualType T)
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
BuiltinCheckKind
Specifies which type of sanitizer check to apply when handling a particular builtin.
llvm::Value * EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition SystemZ.cpp:39
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
Definition CGCall.cpp:5186
llvm::Value * EmitSEHAbnormalTermination()
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
Produce the code to do a retain.
Definition CGObjC.cpp:2328
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
llvm::Value * EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart)
Emits a call to an LLVM variable-argument intrinsic, either llvm.va_start or llvm....
llvm::Value * EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition AMDGPU.cpp:507
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:3998
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:7167
RValue emitBuiltinOSLogFormat(const CallExpr &E)
Emit IR for __builtin_os_log_format.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
llvm::Function * generateBuiltinOSLogHelperFunction(const analyze_os_log::OSLogBufferLayout &Layout, CharUnits BufferAlignment)
const LangOptions & getLangOpts() const
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
llvm::Value * EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
Definition RISCV.cpp:1073
llvm::Value * EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind)
Emits an argument for a call to a builtin.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3888
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
Definition CGCall.cpp:4682
const TargetInfo & getTarget() const
RValue emitRotate(const CallExpr *E, bool IsRotateRight)
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location, const AnnotateAttr *Attr)
Emit an annotation call (intrinsic).
llvm::Value * EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
Definition ARM.cpp:2766
CGCallee EmitCallee(const Expr *E)
Definition CGExpr.cpp:6529
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression, potentially with an active ...
llvm::Value * EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition ARM.cpp:7860
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:232
llvm::Value * EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition PPC.cpp:73
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:4146
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
Definition CGDecl.cpp:1356
llvm::MDNode * buildAllocToken(QualType AllocType)
Build metadata used by the AllocToken instrumentation.
Definition CGExpr.cpp:1316
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2223
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition CGCall.cpp:5342
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp)
Emit IR for __builtin_align_up/__builtin_align_down.
void EmitLifetimeEnd(llvm::Value *Addr)
Definition CGDecl.cpp:1368
llvm::Value * EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
llvm::Value * EmitDirectXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition DirectX.cpp:22
llvm::Value * EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm::Triple::ArchType Arch)
Definition ARM.cpp:5111
llvm::Value * EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4581
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
Definition CGDecl.cpp:2353
llvm::Value * EmitSPIRVBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition SPIR.cpp:22
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
Address EmitVAListRef(const Expr *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
Definition CGExpr.cpp:1608
RValue EmitBuiltinIsAligned(const CallExpr *E)
Emit IR for __builtin_is_aligned.
RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, const CallExpr *TheCallExpr, bool IsDelete)
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition Hexagon.cpp:77
CodeGenTypes & getTypes() const
llvm::Value * EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition X86.cpp:793
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1591
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:189
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
Definition CGExpr.cpp:4566
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4488
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2257
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
Definition CGExpr.cpp:1247
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
llvm::Value * EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition NVPTX.cpp:428
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
Definition CGExpr.cpp:4476
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1707
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
EmitTargetBuiltinExpr - Emit the given builtin call.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
llvm::Value * EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Given a number of pointers, inform the optimizer that they're being intrinsically used up until this ...
Definition CGObjC.cpp:2167
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:640
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
llvm::Constant * getBuiltinLibFunction(const FunctionDecl *FD, unsigned BuiltinID)
Given a builtin id for a function like "__builtin_fabsf", return a Function* for "fabsf".
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
const llvm::Triple & getTriple() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition CGCall.cpp:1801
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
LValue - This represents an lvalue references.
Definition CGValue.h:183
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition CGValue.h:373
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
static RValue getIgnored()
Definition CGValue.h:94
static RValue get(llvm::Value *V)
Definition CGValue.h:99
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition CGValue.h:126
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition CGValue.h:109
An abstract representation of an aligned address.
Definition Address.h:42
static RawAddress invalid()
Definition Address.h:61
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition CGCall.h:379
virtual bool supportsLibCall() const
supportsLibCall - Query to whether or not target supports all lib calls.
Definition TargetInfo.h:78
virtual llvm::Value * encodeReturnAddress(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Performs the code-generation required to convert the address of an instruction into a return address ...
Definition TargetInfo.h:176
virtual llvm::Value * decodeReturnAddress(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Performs the code-generation required to convert a return address as stored by the system into the ac...
Definition TargetInfo.h:166
virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const
Determines the DWARF register number for the stack pointer, for exception-handling purposes.
Definition TargetInfo.h:148
virtual llvm::Value * testFPKind(llvm::Value *V, unsigned BuiltinID, CGBuilderTy &Builder, CodeGenModule &CGM) const
Performs a target specific test of a floating point value for things like IsNaN, Infinity,...
Definition TargetInfo.h:185
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3283
Represents a concrete matrix type with constant number of rows and columns.
Definition TypeBase.h:4395
Represents a sugar type with __counted_by or __sized_by annotations, including their _or_null variant...
Definition TypeBase.h:3444
DynamicCountPointerKind getKind() const
Definition TypeBase.h:3474
static bool isFlexibleArrayMemberLike(const ASTContext &Context, const Decl *D, QualType Ty, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, bool IgnoreTemplateOrMacroSubstitution)
Whether it resembles a flexible array member.
Definition DeclBase.cpp:459
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition DeclBase.h:593
FunctionDecl * getAsFunction() LLVM_READONLY
Returns the function itself, or the templated function if this is a function template.
Definition DeclBase.cpp:273
bool hasAttr() const
Definition DeclBase.h:577
Concrete class used by the front-end to report problems and issues.
Definition Diagnostic.h:232
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3117
Expr * IgnoreParenCasts() LLVM_READONLY
Skip past any parentheses and casts which might surround this expression until reaching a fixed point...
Definition Expr.cpp:3095
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3090
bool EvaluateAsFloat(llvm::APFloat &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsFloat - Return true if this is a constant which we can fold and convert to a floating point...
bool isPRValue() const
Definition Expr.h:285
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
Definition Expr.h:838
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3670
std::optional< std::string > tryEvaluateString(ASTContext &Ctx) const
If the current Expr can be evaluated to a pointer to a null-terminated constant string,...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3070
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Definition Expr.cpp:4050
std::optional< uint64_t > tryEvaluateObjectSize(const ASTContext &Ctx, unsigned Type) const
If the current Expr is a pointer, this will try to statically determine the number of bytes available...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
const ValueDecl * getAsBuiltinConstantDeclRef(const ASTContext &Context) const
If this expression is an unambiguous reference to a single declaration, in the style of __builtin_fun...
Definition Expr.cpp:226
Represents difference between two FPOptions values.
LangOptions::FPExceptionModeKind getExceptionMode() const
Represents a member of a struct/union/class.
Definition Decl.h:3160
const FieldDecl * findCountedByField() const
Find the FieldDecl specified in a FAM's "counted_by" attribute.
Definition Decl.cpp:4856
Represents a function declaration or definition.
Definition Decl.h:2000
const ParmVarDecl * getParamDecl(unsigned i) const
Definition Decl.h:2797
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition Decl.cpp:3763
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5315
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
IdentifierInfo & get(StringRef Name)
Return the identifier token info for the specified named identifier.
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition Decl.cpp:5603
@ FPE_Ignore
Assume that floating-point exceptions are masked.
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3367
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3450
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
std::string getNameAsString() const
Get a human-readable name for the declaration, even if it is one of the special kinds of names (C++ c...
Definition Decl.h:317
const Expr * getSubExpr() const
Definition Expr.h:2202
PipeType - OpenCL20.
Definition TypeBase.h:8206
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3336
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8472
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8514
Represents a struct/union/class.
Definition Decl.h:4327
field_range fields() const
Definition Decl.h:4530
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
Encodes a location in the source.
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:355
bool isUnion() const
Definition Decl.h:3928
Exposes information about the current target.
Definition TargetInfo.h:226
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool isBigEndian() const
virtual bool checkArithmeticFenceSupported() const
Controls if __arithmetic_fence is supported in the targeted backend.
unsigned getSuitableAlign() const
Return the alignment that is the largest alignment ever used for any scalar/SIMD data type on the tar...
Definition TargetInfo.h:747
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
The base class of the type hierarchy.
Definition TypeBase.h:1839
bool isBlockPointerType() const
Definition TypeBase.h:8645
bool isVoidType() const
Definition TypeBase.h:8991
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2230
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isArrayType() const
Definition TypeBase.h:8724
bool isCountAttributedType() const
Definition Type.cpp:742
bool isPointerType() const
Definition TypeBase.h:8625
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:9035
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9285
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1922
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9218
Expr * getSubExpr() const
Definition Expr.h:2288
QualType getType() const
Definition Decl.h:723
QualType getType() const
Definition Value.cpp:237
Represents a GCC generic vector type.
Definition TypeBase.h:4183
QualType getElementType() const
Definition TypeBase.h:4197
SmallVector< OSLogBufferItem, 4 > Items
Definition OSLog.h:113
unsigned char getNumArgsByte() const
Definition OSLog.h:148
unsigned char getSummaryByte() const
Definition OSLog.h:139
Defines the clang::TargetInfo interface.
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
Definition CGValue.h:155
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:146
llvm::Constant * initializationPatternFor(CodeGenModule &, llvm::Type *)
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
constexpr XRayInstrMask Typed
Definition XRayInstr.h:42
constexpr XRayInstrMask Custom
Definition XRayInstr.h:41
bool computeOSLogBufferLayout(clang::ASTContext &Ctx, const clang::CallExpr *E, OSLogBufferLayout &layout)
Definition OSLog.cpp:192
bool Mul(InterpState &S, CodePtr OpPC)
Definition Interp.h:366
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ Success
Annotation was successful.
Definition Parser.h:65
@ Vector
'vector' clause, allowed on 'loop', Combined, and 'routine' directives.
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Expr * Cond
};
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
SyncScope
Defines sync scope values used internally by clang.
Definition SyncScope.h:42
llvm::StringRef getAsString(SyncScope S)
Definition SyncScope.h:62
U cast(CodeGen::Address addr)
Definition Address.h:327
@ Other
Other implicit parameter.
Definition Decl.h:1746
long int64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650
void clear(SanitizerMask K=SanitizerKind::All)
Disable the sanitizers specified in K.
Definition Sanitizers.h:195
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition Sanitizers.h:187
#define sinh(__x)
Definition tgmath.h:373
#define asin(__x)
Definition tgmath.h:112
#define scalbln(__x, __y)
Definition tgmath.h:1182
#define sqrt(__x)
Definition tgmath.h:520
#define acos(__x)
Definition tgmath.h:83
#define fmin(__x, __y)
Definition tgmath.h:780
#define exp(__x)
Definition tgmath.h:431
#define ilogb(__x)
Definition tgmath.h:851
#define copysign(__x, __y)
Definition tgmath.h:618
#define erf(__x)
Definition tgmath.h:636
#define atanh(__x)
Definition tgmath.h:228
#define remquo(__x, __y, __z)
Definition tgmath.h:1111
#define nextafter(__x, __y)
Definition tgmath.h:1055
#define frexp(__x, __y)
Definition tgmath.h:816
#define asinh(__x)
Definition tgmath.h:199
#define erfc(__x)
Definition tgmath.h:653
#define atan2(__x, __y)
Definition tgmath.h:566
#define nexttoward(__x, __y)
Definition tgmath.h:1073
#define hypot(__x, __y)
Definition tgmath.h:833
#define exp2(__x)
Definition tgmath.h:670
#define sin(__x)
Definition tgmath.h:286
#define cbrt(__x)
Definition tgmath.h:584
#define log2(__x)
Definition tgmath.h:970
#define llround(__x)
Definition tgmath.h:919
#define cosh(__x)
Definition tgmath.h:344
#define trunc(__x)
Definition tgmath.h:1216
#define fmax(__x, __y)
Definition tgmath.h:762
#define ldexp(__x, __y)
Definition tgmath.h:868
#define acosh(__x)
Definition tgmath.h:170
#define tgamma(__x)
Definition tgmath.h:1199
#define scalbn(__x, __y)
Definition tgmath.h:1165
#define round(__x)
Definition tgmath.h:1148
#define fmod(__x, __y)
Definition tgmath.h:798
#define llrint(__x)
Definition tgmath.h:902
#define tan(__x)
Definition tgmath.h:315
#define cos(__x)
Definition tgmath.h:257
#define log10(__x)
Definition tgmath.h:936
#define fabs(__x)
Definition tgmath.h:549
#define pow(__x, __y)
Definition tgmath.h:490
#define log1p(__x)
Definition tgmath.h:953
#define rint(__x)
Definition tgmath.h:1131
#define expm1(__x)
Definition tgmath.h:687
#define remainder(__x, __y)
Definition tgmath.h:1090
#define fdim(__x, __y)
Definition tgmath.h:704
#define lgamma(__x)
Definition tgmath.h:885
#define tanh(__x)
Definition tgmath.h:402
#define lrint(__x)
Definition tgmath.h:1004
#define atan(__x)
Definition tgmath.h:141
#define floor(__x)
Definition tgmath.h:722
#define ceil(__x)
Definition tgmath.h:601
#define log(__x)
Definition tgmath.h:460
#define logb(__x)
Definition tgmath.h:987
#define nearbyint(__x)
Definition tgmath.h:1038
#define lround(__x)
Definition tgmath.h:1021
#define fma(__x, __y, __z)
Definition tgmath.h:742