clang 23.0.0git
CGBuiltin.cpp
Go to the documentation of this file.
1//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGBuiltin.h"
14#include "ABIInfo.h"
15#include "CGCUDARuntime.h"
16#include "CGCXXABI.h"
17#include "CGDebugInfo.h"
18#include "CGObjCRuntime.h"
19#include "CGOpenCLRuntime.h"
20#include "CGRecordLayout.h"
21#include "CGValue.h"
22#include "CodeGenFunction.h"
23#include "CodeGenModule.h"
24#include "ConstantEmitter.h"
25#include "PatternInit.h"
26#include "TargetInfo.h"
27#include "clang/AST/OSLog.h"
31#include "llvm/IR/InlineAsm.h"
32#include "llvm/IR/Instruction.h"
33#include "llvm/IR/Intrinsics.h"
34#include "llvm/IR/IntrinsicsX86.h"
35#include "llvm/IR/MatrixBuilder.h"
36#include "llvm/Support/ConvertUTF.h"
37#include "llvm/Support/ScopedPrinter.h"
38#include <optional>
39#include <utility>
40
41using namespace clang;
42using namespace CodeGen;
43using namespace llvm;
44
45/// Some builtins do not have library implementation on some targets and
46/// are instead emitted as LLVM IRs by some target builtin emitters.
47/// FIXME: Remove this when library support is added
48static bool shouldEmitBuiltinAsIR(unsigned BuiltinID,
49 const Builtin::Context &BI,
50 const CodeGenFunction &CGF) {
51 if (!CGF.CGM.getLangOpts().MathErrno &&
55 switch (BuiltinID) {
56 default:
57 return false;
58 case Builtin::BIlogbf:
59 case Builtin::BI__builtin_logbf:
60 case Builtin::BIlogb:
61 case Builtin::BI__builtin_logb:
62 case Builtin::BIscalbnf:
63 case Builtin::BI__builtin_scalbnf:
64 case Builtin::BIscalbn:
65 case Builtin::BI__builtin_scalbn:
66 return true;
67 }
68 }
69 return false;
70}
71
73 unsigned BuiltinID, const CallExpr *E,
74 ReturnValueSlot ReturnValue,
75 llvm::Triple::ArchType Arch) {
76 // When compiling in HipStdPar mode we have to be conservative in rejecting
77 // target specific features in the FE, and defer the possible error to the
78 // AcceleratorCodeSelection pass, wherein iff an unsupported target builtin is
79 // referenced by an accelerator executable function, we emit an error.
80 // Returning nullptr here leads to the builtin being handled in
81 // EmitStdParUnsupportedBuiltin.
82 if (CGF->getLangOpts().HIPStdPar && CGF->getLangOpts().CUDAIsDevice &&
83 Arch != CGF->getTarget().getTriple().getArch())
84 return nullptr;
85
86 switch (Arch) {
87 case llvm::Triple::arm:
88 case llvm::Triple::armeb:
89 case llvm::Triple::thumb:
90 case llvm::Triple::thumbeb:
91 return CGF->EmitARMBuiltinExpr(BuiltinID, E, ReturnValue, Arch);
92 case llvm::Triple::aarch64:
93 case llvm::Triple::aarch64_32:
94 case llvm::Triple::aarch64_be:
95 return CGF->EmitAArch64BuiltinExpr(BuiltinID, E, Arch);
96 case llvm::Triple::bpfeb:
97 case llvm::Triple::bpfel:
98 return CGF->EmitBPFBuiltinExpr(BuiltinID, E);
99 case llvm::Triple::dxil:
100 return CGF->EmitDirectXBuiltinExpr(BuiltinID, E);
101 case llvm::Triple::x86:
102 case llvm::Triple::x86_64:
103 return CGF->EmitX86BuiltinExpr(BuiltinID, E);
104 case llvm::Triple::ppc:
105 case llvm::Triple::ppcle:
106 case llvm::Triple::ppc64:
107 case llvm::Triple::ppc64le:
108 return CGF->EmitPPCBuiltinExpr(BuiltinID, E);
109 case llvm::Triple::r600:
110 case llvm::Triple::amdgcn:
111 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
112 case llvm::Triple::systemz:
113 return CGF->EmitSystemZBuiltinExpr(BuiltinID, E);
114 case llvm::Triple::nvptx:
115 case llvm::Triple::nvptx64:
116 return CGF->EmitNVPTXBuiltinExpr(BuiltinID, E);
117 case llvm::Triple::wasm32:
118 case llvm::Triple::wasm64:
119 return CGF->EmitWebAssemblyBuiltinExpr(BuiltinID, E);
120 case llvm::Triple::hexagon:
121 return CGF->EmitHexagonBuiltinExpr(BuiltinID, E);
122 case llvm::Triple::riscv32:
123 case llvm::Triple::riscv64:
124 case llvm::Triple::riscv32be:
125 case llvm::Triple::riscv64be:
126 return CGF->EmitRISCVBuiltinExpr(BuiltinID, E, ReturnValue);
127 case llvm::Triple::spirv32:
128 case llvm::Triple::spirv64:
129 if (CGF->getTarget().getTriple().getOS() == llvm::Triple::OSType::AMDHSA)
130 return CGF->EmitAMDGPUBuiltinExpr(BuiltinID, E);
131 [[fallthrough]];
132 case llvm::Triple::spirv:
133 return CGF->EmitSPIRVBuiltinExpr(BuiltinID, E);
134 default:
135 return nullptr;
136 }
137}
138
140 const CallExpr *E,
142 if (getContext().BuiltinInfo.isAuxBuiltinID(BuiltinID)) {
143 assert(getContext().getAuxTargetInfo() && "Missing aux target info");
145 this, getContext().BuiltinInfo.getAuxBuiltinID(BuiltinID), E,
146 ReturnValue, getContext().getAuxTargetInfo()->getTriple().getArch());
147 }
148
149 return EmitTargetArchBuiltinExpr(this, BuiltinID, E, ReturnValue,
150 getTarget().getTriple().getArch());
151}
152
153static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size,
154 Align AlignmentInBytes) {
155 ConstantInt *Byte;
156 switch (CGF.getLangOpts().getTrivialAutoVarInit()) {
158 // Nothing to initialize.
159 return;
161 Byte = CGF.Builder.getInt8(0x00);
162 break;
164 llvm::Type *Int8 = llvm::IntegerType::getInt8Ty(CGF.CGM.getLLVMContext());
165 Byte = llvm::dyn_cast<llvm::ConstantInt>(
166 initializationPatternFor(CGF.CGM, Int8));
167 break;
168 }
169 }
170 if (CGF.CGM.stopAutoInit())
171 return;
172 auto *I = CGF.Builder.CreateMemSet(AI, Byte, Size, AlignmentInBytes);
173 I->addAnnotationMetadata("auto-init");
174}
175
176/// getBuiltinLibFunction - Given a builtin id for a function like
177/// "__builtin_fabsf", return a Function* for "fabsf".
179 unsigned BuiltinID) {
180 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
181
182 // Get the name, skip over the __builtin_ prefix (if necessary). We may have
183 // to build this up so provide a small stack buffer to handle the vast
184 // majority of names.
186 GlobalDecl D(FD);
187
188 // TODO: This list should be expanded or refactored after all GCC-compatible
189 // std libcall builtins are implemented.
190 static const SmallDenseMap<unsigned, StringRef, 64> F128Builtins{
191 {Builtin::BI__builtin___fprintf_chk, "__fprintf_chkieee128"},
192 {Builtin::BI__builtin___printf_chk, "__printf_chkieee128"},
193 {Builtin::BI__builtin___snprintf_chk, "__snprintf_chkieee128"},
194 {Builtin::BI__builtin___sprintf_chk, "__sprintf_chkieee128"},
195 {Builtin::BI__builtin___vfprintf_chk, "__vfprintf_chkieee128"},
196 {Builtin::BI__builtin___vprintf_chk, "__vprintf_chkieee128"},
197 {Builtin::BI__builtin___vsnprintf_chk, "__vsnprintf_chkieee128"},
198 {Builtin::BI__builtin___vsprintf_chk, "__vsprintf_chkieee128"},
199 {Builtin::BI__builtin_fprintf, "__fprintfieee128"},
200 {Builtin::BI__builtin_printf, "__printfieee128"},
201 {Builtin::BI__builtin_snprintf, "__snprintfieee128"},
202 {Builtin::BI__builtin_sprintf, "__sprintfieee128"},
203 {Builtin::BI__builtin_vfprintf, "__vfprintfieee128"},
204 {Builtin::BI__builtin_vprintf, "__vprintfieee128"},
205 {Builtin::BI__builtin_vsnprintf, "__vsnprintfieee128"},
206 {Builtin::BI__builtin_vsprintf, "__vsprintfieee128"},
207 {Builtin::BI__builtin_fscanf, "__fscanfieee128"},
208 {Builtin::BI__builtin_scanf, "__scanfieee128"},
209 {Builtin::BI__builtin_sscanf, "__sscanfieee128"},
210 {Builtin::BI__builtin_vfscanf, "__vfscanfieee128"},
211 {Builtin::BI__builtin_vscanf, "__vscanfieee128"},
212 {Builtin::BI__builtin_vsscanf, "__vsscanfieee128"},
213 {Builtin::BI__builtin_nexttowardf128, "__nexttowardieee128"},
214 };
215
216 // The AIX library functions frexpl, ldexpl, and modfl are for 128-bit
217 // IBM 'long double' (i.e. __ibm128). Map to the 'double' versions
218 // if it is 64-bit 'long double' mode.
219 static const SmallDenseMap<unsigned, StringRef, 4> AIXLongDouble64Builtins{
220 {Builtin::BI__builtin_frexpl, "frexp"},
221 {Builtin::BI__builtin_ldexpl, "ldexp"},
222 {Builtin::BI__builtin_modfl, "modf"},
223 };
224
225 // If the builtin has been declared explicitly with an assembler label,
226 // use the mangled name. This differs from the plain label on platforms
227 // that prefix labels.
228 if (FD->hasAttr<AsmLabelAttr>())
229 Name = getMangledName(D);
230 else {
231 // TODO: This mutation should also be applied to other targets other than
232 // PPC, after backend supports IEEE 128-bit style libcalls.
233 if (getTriple().isPPC64() &&
234 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad() &&
235 F128Builtins.contains(BuiltinID))
236 Name = F128Builtins.lookup(BuiltinID);
237 else if (getTriple().isOSAIX() &&
238 &getTarget().getLongDoubleFormat() ==
239 &llvm::APFloat::IEEEdouble() &&
240 AIXLongDouble64Builtins.contains(BuiltinID))
241 Name = AIXLongDouble64Builtins.lookup(BuiltinID);
242 else
243 Name = Context.BuiltinInfo.getName(BuiltinID).substr(10);
244 }
245
246 llvm::FunctionType *Ty =
247 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
248
249 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
250}
251
252/// Emit the conversions required to turn the given value into an
253/// integer of the given size.
254Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
255 QualType T, llvm::IntegerType *IntType) {
256 V = CGF.EmitToMemory(V, T);
257
258 if (V->getType()->isPointerTy())
259 return CGF.Builder.CreatePtrToInt(V, IntType);
260
261 assert(V->getType() == IntType);
262 return V;
263}
264
265Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
266 QualType T, llvm::Type *ResultType) {
267 V = CGF.EmitFromMemory(V, T);
268
269 if (ResultType->isPointerTy())
270 return CGF.Builder.CreateIntToPtr(V, ResultType);
271
272 assert(V->getType() == ResultType);
273 return V;
274}
275
277 ASTContext &Ctx = CGF.getContext();
278 Address Ptr = CGF.EmitPointerWithAlignment(E->getArg(0));
279 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
280 unsigned Bytes = Ptr.getElementType()->isPointerTy()
282 : DL.getTypeStoreSize(Ptr.getElementType());
283 unsigned Align = Ptr.getAlignment().getQuantity();
284 if (Align % Bytes != 0) {
285 DiagnosticsEngine &Diags = CGF.CGM.getDiags();
286 Diags.Report(E->getBeginLoc(), diag::warn_sync_op_misaligned);
287 // Force address to be at least naturally-aligned.
288 return Ptr.withAlignment(CharUnits::fromQuantity(Bytes));
289 }
290 return Ptr;
291}
292
293/// Utility to insert an atomic instruction based on Intrinsic::ID
294/// and the expression node.
296 CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E,
297 AtomicOrdering Ordering) {
298
299 QualType T = E->getType();
300 assert(E->getArg(0)->getType()->isPointerType());
301 assert(CGF.getContext().hasSameUnqualifiedType(T,
302 E->getArg(0)->getType()->getPointeeType()));
303 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
304
305 Address DestAddr = CheckAtomicAlignment(CGF, E);
306
307 llvm::IntegerType *IntType = llvm::IntegerType::get(
308 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
309
310 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1));
311 llvm::Type *ValueType = Val->getType();
312 Val = EmitToInt(CGF, Val, T, IntType);
313
314 llvm::Value *Result =
315 CGF.Builder.CreateAtomicRMW(Kind, DestAddr, Val, Ordering);
316 return EmitFromInt(CGF, Result, T, ValueType);
317}
318
320 Value *Val = CGF.EmitScalarExpr(E->getArg(0));
322
323 Val = CGF.EmitToMemory(Val, E->getArg(0)->getType());
324 LValue LV = CGF.MakeAddrLValue(Addr, E->getArg(0)->getType());
325 LV.setNontemporal(true);
326 CGF.EmitStoreOfScalar(Val, LV, false);
327 return nullptr;
328}
329
332
333 LValue LV = CGF.MakeAddrLValue(Addr, E->getType());
334 LV.setNontemporal(true);
335 return CGF.EmitLoadOfScalar(LV, E->getExprLoc());
336}
337
339 llvm::AtomicRMWInst::BinOp Kind,
340 const CallExpr *E) {
341 return RValue::get(MakeBinaryAtomicValue(CGF, Kind, E));
342}
343
344/// Utility to insert an atomic instruction based Intrinsic::ID and
345/// the expression node, where the return value is the result of the
346/// operation.
348 llvm::AtomicRMWInst::BinOp Kind,
349 const CallExpr *E,
350 Instruction::BinaryOps Op,
351 bool Invert = false) {
352 QualType T = E->getType();
353 assert(E->getArg(0)->getType()->isPointerType());
354 assert(CGF.getContext().hasSameUnqualifiedType(T,
355 E->getArg(0)->getType()->getPointeeType()));
356 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
357
358 Address DestAddr = CheckAtomicAlignment(CGF, E);
359
360 llvm::IntegerType *IntType = llvm::IntegerType::get(
361 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
362
363 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(1));
364 llvm::Type *ValueType = Val->getType();
365 Val = EmitToInt(CGF, Val, T, IntType);
366
367 llvm::Value *Result = CGF.Builder.CreateAtomicRMW(
368 Kind, DestAddr, Val, llvm::AtomicOrdering::SequentiallyConsistent);
369 Result = CGF.Builder.CreateBinOp(Op, Result, Val);
370 if (Invert)
371 Result =
372 CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
373 llvm::ConstantInt::getAllOnesValue(IntType));
374 Result = EmitFromInt(CGF, Result, T, ValueType);
375 return RValue::get(Result);
376}
377
378/// Utility to insert an atomic cmpxchg instruction.
379///
380/// @param CGF The current codegen function.
381/// @param E Builtin call expression to convert to cmpxchg.
382/// arg0 - address to operate on
383/// arg1 - value to compare with
384/// arg2 - new value
385/// @param ReturnBool Specifies whether to return success flag of
386/// cmpxchg result or the old value.
387///
388/// @returns result of cmpxchg, according to ReturnBool
389///
390/// Note: In order to lower Microsoft's _InterlockedCompareExchange* intrinsics
391/// invoke the function EmitAtomicCmpXchgForMSIntrin.
393 bool ReturnBool,
394 llvm::AtomicOrdering SuccessOrdering,
395 llvm::AtomicOrdering FailureOrdering) {
396 QualType T = ReturnBool ? E->getArg(1)->getType() : E->getType();
397 Address DestAddr = CheckAtomicAlignment(CGF, E);
398
399 llvm::IntegerType *IntType = llvm::IntegerType::get(
400 CGF.getLLVMContext(), CGF.getContext().getTypeSize(T));
401
402 Value *Cmp = CGF.EmitScalarExpr(E->getArg(1));
403 llvm::Type *ValueType = Cmp->getType();
404 Cmp = EmitToInt(CGF, Cmp, T, IntType);
405 Value *New = EmitToInt(CGF, CGF.EmitScalarExpr(E->getArg(2)), T, IntType);
406
408 DestAddr, Cmp, New, SuccessOrdering, FailureOrdering);
409 if (ReturnBool)
410 // Extract boolean success flag and zext it to int.
411 return CGF.Builder.CreateZExt(CGF.Builder.CreateExtractValue(Pair, 1),
412 CGF.ConvertType(E->getType()));
413 else
414 // Extract old value and emit it using the same type as compare value.
415 return EmitFromInt(CGF, CGF.Builder.CreateExtractValue(Pair, 0), T,
416 ValueType);
417}
418
419/// This function should be invoked to emit atomic cmpxchg for Microsoft's
420/// _InterlockedCompareExchange* intrinsics which have the following signature:
421/// T _InterlockedCompareExchange(T volatile *Destination,
422/// T Exchange,
423/// T Comparand);
424///
425/// Whereas the llvm 'cmpxchg' instruction has the following syntax:
426/// cmpxchg *Destination, Comparand, Exchange.
427/// So we need to swap Comparand and Exchange when invoking
428/// CreateAtomicCmpXchg. That is the reason we could not use the above utility
429/// function MakeAtomicCmpXchgValue since it expects the arguments to be
430/// already swapped.
431
432static
434 AtomicOrdering SuccessOrdering = AtomicOrdering::SequentiallyConsistent) {
435 assert(E->getArg(0)->getType()->isPointerType());
437 E->getType(), E->getArg(0)->getType()->getPointeeType()));
438 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
439 E->getArg(1)->getType()));
440 assert(CGF.getContext().hasSameUnqualifiedType(E->getType(),
441 E->getArg(2)->getType()));
442
443 Address DestAddr = CheckAtomicAlignment(CGF, E);
444
445 auto *Exchange = CGF.EmitScalarExpr(E->getArg(1));
446 auto *RTy = Exchange->getType();
447
448 auto *Comparand = CGF.EmitScalarExpr(E->getArg(2));
449
450 if (RTy->isPointerTy()) {
451 Exchange = CGF.Builder.CreatePtrToInt(Exchange, CGF.IntPtrTy);
452 Comparand = CGF.Builder.CreatePtrToInt(Comparand, CGF.IntPtrTy);
453 }
454
455 // For Release ordering, the failure ordering should be Monotonic.
456 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release ?
457 AtomicOrdering::Monotonic :
458 SuccessOrdering;
459
460 // The atomic instruction is marked volatile for consistency with MSVC. This
461 // blocks the few atomics optimizations that LLVM has. If we want to optimize
462 // _Interlocked* operations in the future, we will have to remove the volatile
463 // marker.
464 auto *CmpXchg = CGF.Builder.CreateAtomicCmpXchg(
465 DestAddr, Comparand, Exchange, SuccessOrdering, FailureOrdering);
466 CmpXchg->setVolatile(true);
467
468 auto *Result = CGF.Builder.CreateExtractValue(CmpXchg, 0);
469 if (RTy->isPointerTy()) {
470 Result = CGF.Builder.CreateIntToPtr(Result, RTy);
471 }
472
473 return Result;
474}
475
476// 64-bit Microsoft platforms support 128 bit cmpxchg operations. They are
477// prototyped like this:
478//
479// unsigned char _InterlockedCompareExchange128...(
480// __int64 volatile * _Destination,
481// __int64 _ExchangeHigh,
482// __int64 _ExchangeLow,
483// __int64 * _ComparandResult);
484//
485// Note that Destination is assumed to be at least 16-byte aligned, despite
486// being typed int64.
487
489 const CallExpr *E,
490 AtomicOrdering SuccessOrdering) {
491 assert(E->getNumArgs() == 4);
492 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
493 llvm::Value *ExchangeHigh = CGF.EmitScalarExpr(E->getArg(1));
494 llvm::Value *ExchangeLow = CGF.EmitScalarExpr(E->getArg(2));
495 Address ComparandAddr = CGF.EmitPointerWithAlignment(E->getArg(3));
496
497 assert(DestPtr->getType()->isPointerTy());
498 assert(!ExchangeHigh->getType()->isPointerTy());
499 assert(!ExchangeLow->getType()->isPointerTy());
500
501 // For Release ordering, the failure ordering should be Monotonic.
502 auto FailureOrdering = SuccessOrdering == AtomicOrdering::Release
503 ? AtomicOrdering::Monotonic
504 : SuccessOrdering;
505
506 // Convert to i128 pointers and values. Alignment is also overridden for
507 // destination pointer.
508 llvm::Type *Int128Ty = llvm::IntegerType::get(CGF.getLLVMContext(), 128);
509 Address DestAddr(DestPtr, Int128Ty,
511 ComparandAddr = ComparandAddr.withElementType(Int128Ty);
512
513 // (((i128)hi) << 64) | ((i128)lo)
514 ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
515 ExchangeLow = CGF.Builder.CreateZExt(ExchangeLow, Int128Ty);
516 ExchangeHigh =
517 CGF.Builder.CreateShl(ExchangeHigh, llvm::ConstantInt::get(Int128Ty, 64));
518 llvm::Value *Exchange = CGF.Builder.CreateOr(ExchangeHigh, ExchangeLow);
519
520 // Load the comparand for the instruction.
521 llvm::Value *Comparand = CGF.Builder.CreateLoad(ComparandAddr);
522
523 auto *CXI = CGF.Builder.CreateAtomicCmpXchg(DestAddr, Comparand, Exchange,
524 SuccessOrdering, FailureOrdering);
525
526 // The atomic instruction is marked volatile for consistency with MSVC. This
527 // blocks the few atomics optimizations that LLVM has. If we want to optimize
528 // _Interlocked* operations in the future, we will have to remove the volatile
529 // marker.
530 CXI->setVolatile(true);
531
532 // Store the result as an outparameter.
533 CGF.Builder.CreateStore(CGF.Builder.CreateExtractValue(CXI, 0),
534 ComparandAddr);
535
536 // Get the success boolean and zero extend it to i8.
537 Value *Success = CGF.Builder.CreateExtractValue(CXI, 1);
538 return CGF.Builder.CreateZExt(Success, CGF.Int8Ty);
539}
540
542 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
543 assert(E->getArg(0)->getType()->isPointerType());
544
545 auto *IntTy = CGF.ConvertType(E->getType());
546 Address DestAddr = CheckAtomicAlignment(CGF, E);
547 auto *Result = CGF.Builder.CreateAtomicRMW(
548 AtomicRMWInst::Add, DestAddr, ConstantInt::get(IntTy, 1), Ordering);
549 return CGF.Builder.CreateAdd(Result, ConstantInt::get(IntTy, 1));
550}
551
553 CodeGenFunction &CGF, const CallExpr *E,
554 AtomicOrdering Ordering = AtomicOrdering::SequentiallyConsistent) {
555 assert(E->getArg(0)->getType()->isPointerType());
556
557 auto *IntTy = CGF.ConvertType(E->getType());
558 Address DestAddr = CheckAtomicAlignment(CGF, E);
559 auto *Result = CGF.Builder.CreateAtomicRMW(
560 AtomicRMWInst::Sub, DestAddr, ConstantInt::get(IntTy, 1), Ordering);
561 return CGF.Builder.CreateSub(Result, ConstantInt::get(IntTy, 1));
562}
563
564// Build a plain volatile load.
566 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
567 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
568 CharUnits LoadSize = CGF.getContext().getTypeSizeInChars(ElTy);
569 llvm::Type *ITy =
570 llvm::IntegerType::get(CGF.getLLVMContext(), LoadSize.getQuantity() * 8);
571 llvm::LoadInst *Load = CGF.Builder.CreateAlignedLoad(ITy, Ptr, LoadSize);
572 Load->setAtomic(llvm::AtomicOrdering::Monotonic);
573 Load->setVolatile(true);
574 return Load;
575}
576
577// Build a plain volatile store.
579 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
580 Value *Value = CGF.EmitScalarExpr(E->getArg(1));
581 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
582 CharUnits StoreSize = CGF.getContext().getTypeSizeInChars(ElTy);
583 llvm::StoreInst *Store =
584 CGF.Builder.CreateAlignedStore(Value, Ptr, StoreSize);
585 Store->setAtomic(llvm::AtomicOrdering::Monotonic);
586 Store->setVolatile(true);
587 return Store;
588}
589
590// Emit a simple mangled intrinsic that has 1 argument and a return type
591// matching the argument type. Depending on mode, this may be a constrained
592// floating-point intrinsic.
594 const CallExpr *E, unsigned IntrinsicID,
595 unsigned ConstrainedIntrinsicID) {
596 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
597
598 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
599 if (CGF.Builder.getIsFPConstrained()) {
600 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
601 return CGF.Builder.CreateConstrainedFPCall(F, { Src0 });
602 } else {
603 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
604 return CGF.Builder.CreateCall(F, Src0);
605 }
606}
607
608// Emit an intrinsic that has 2 operands of the same type as its result.
609// Depending on mode, this may be a constrained floating-point intrinsic.
611 const CallExpr *E, unsigned IntrinsicID,
612 unsigned ConstrainedIntrinsicID) {
613 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
614 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
615
616 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
617 if (CGF.Builder.getIsFPConstrained()) {
618 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
619 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1 });
620 } else {
621 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
622 return CGF.Builder.CreateCall(F, { Src0, Src1 });
623 }
624}
625
626// Has second type mangled argument.
627static Value *
629 Intrinsic::ID IntrinsicID,
630 Intrinsic::ID ConstrainedIntrinsicID) {
631 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
632 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
633
634 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
635 if (CGF.Builder.getIsFPConstrained()) {
636 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
637 {Src0->getType(), Src1->getType()});
638 return CGF.Builder.CreateConstrainedFPCall(F, {Src0, Src1});
639 }
640
641 Function *F =
642 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), Src1->getType()});
643 return CGF.Builder.CreateCall(F, {Src0, Src1});
644}
645
646// Emit an intrinsic that has 3 operands of the same type as its result.
647// Depending on mode, this may be a constrained floating-point intrinsic.
649 const CallExpr *E, unsigned IntrinsicID,
650 unsigned ConstrainedIntrinsicID) {
651 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
652 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
653 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
654
655 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
656 if (CGF.Builder.getIsFPConstrained()) {
657 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID, Src0->getType());
658 return CGF.Builder.CreateConstrainedFPCall(F, { Src0, Src1, Src2 });
659 } else {
660 Function *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
661 return CGF.Builder.CreateCall(F, { Src0, Src1, Src2 });
662 }
663}
664
665// Emit an intrinsic that has overloaded integer result and fp operand.
666static Value *
668 unsigned IntrinsicID,
669 unsigned ConstrainedIntrinsicID) {
670 llvm::Type *ResultType = CGF.ConvertType(E->getType());
671 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
672
673 if (CGF.Builder.getIsFPConstrained()) {
674 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
675 Function *F = CGF.CGM.getIntrinsic(ConstrainedIntrinsicID,
676 {ResultType, Src0->getType()});
677 return CGF.Builder.CreateConstrainedFPCall(F, {Src0});
678 } else {
679 Function *F =
680 CGF.CGM.getIntrinsic(IntrinsicID, {ResultType, Src0->getType()});
681 return CGF.Builder.CreateCall(F, Src0);
682 }
683}
684
686 Intrinsic::ID IntrinsicID) {
687 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
688 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
689
690 QualType IntPtrTy = E->getArg(1)->getType()->getPointeeType();
691 llvm::Type *IntTy = CGF.ConvertType(IntPtrTy);
692 llvm::Function *F =
693 CGF.CGM.getIntrinsic(IntrinsicID, {Src0->getType(), IntTy});
694 llvm::Value *Call = CGF.Builder.CreateCall(F, Src0);
695
696 llvm::Value *Exp = CGF.Builder.CreateExtractValue(Call, 1);
697 LValue LV = CGF.MakeNaturalAlignAddrLValue(Src1, IntPtrTy);
698 CGF.EmitStoreOfScalar(Exp, LV);
699
700 return CGF.Builder.CreateExtractValue(Call, 0);
701}
702
703static void emitSincosBuiltin(CodeGenFunction &CGF, const CallExpr *E,
704 Intrinsic::ID IntrinsicID) {
705 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(0));
706 llvm::Value *Dest0 = CGF.EmitScalarExpr(E->getArg(1));
707 llvm::Value *Dest1 = CGF.EmitScalarExpr(E->getArg(2));
708
709 llvm::Function *F = CGF.CGM.getIntrinsic(IntrinsicID, {Val->getType()});
710 llvm::Value *Call = CGF.Builder.CreateCall(F, Val);
711
712 llvm::Value *SinResult = CGF.Builder.CreateExtractValue(Call, 0);
713 llvm::Value *CosResult = CGF.Builder.CreateExtractValue(Call, 1);
714
715 QualType DestPtrType = E->getArg(1)->getType()->getPointeeType();
716 LValue SinLV = CGF.MakeNaturalAlignAddrLValue(Dest0, DestPtrType);
717 LValue CosLV = CGF.MakeNaturalAlignAddrLValue(Dest1, DestPtrType);
718
719 llvm::StoreInst *StoreSin =
720 CGF.Builder.CreateStore(SinResult, SinLV.getAddress());
721 llvm::StoreInst *StoreCos =
722 CGF.Builder.CreateStore(CosResult, CosLV.getAddress());
723
724 // Mark the two stores as non-aliasing with each other. The order of stores
725 // emitted by this builtin is arbitrary, enforcing a particular order will
726 // prevent optimizations later on.
727 llvm::MDBuilder MDHelper(CGF.getLLVMContext());
728 MDNode *Domain = MDHelper.createAnonymousAliasScopeDomain();
729 MDNode *AliasScope = MDHelper.createAnonymousAliasScope(Domain);
730 MDNode *AliasScopeList = MDNode::get(Call->getContext(), AliasScope);
731 StoreSin->setMetadata(LLVMContext::MD_alias_scope, AliasScopeList);
732 StoreCos->setMetadata(LLVMContext::MD_noalias, AliasScopeList);
733}
734
735static llvm::Value *emitModfBuiltin(CodeGenFunction &CGF, const CallExpr *E,
736 Intrinsic::ID IntrinsicID) {
737 llvm::Value *Val = CGF.EmitScalarExpr(E->getArg(0));
738 llvm::Value *IntPartDest = CGF.EmitScalarExpr(E->getArg(1));
739
740 llvm::Value *Call =
741 CGF.Builder.CreateIntrinsic(IntrinsicID, {Val->getType()}, Val);
742
743 llvm::Value *FractionalResult = CGF.Builder.CreateExtractValue(Call, 0);
744 llvm::Value *IntegralResult = CGF.Builder.CreateExtractValue(Call, 1);
745
746 QualType DestPtrType = E->getArg(1)->getType()->getPointeeType();
747 LValue IntegralLV = CGF.MakeNaturalAlignAddrLValue(IntPartDest, DestPtrType);
748 CGF.EmitStoreOfScalar(IntegralResult, IntegralLV);
749
750 return FractionalResult;
751}
752
753/// EmitFAbs - Emit a call to @llvm.fabs().
755 Function *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
756 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
757 Call->setDoesNotAccessMemory();
758 return Call;
759}
760
761/// Emit the computation of the sign bit for a floating point value. Returns
762/// the i1 sign bit value.
764 LLVMContext &C = CGF.CGM.getLLVMContext();
765
766 llvm::Type *Ty = V->getType();
767 int Width = Ty->getPrimitiveSizeInBits();
768 llvm::Type *IntTy = llvm::IntegerType::get(C, Width);
769 V = CGF.Builder.CreateBitCast(V, IntTy);
770 if (Ty->isPPC_FP128Ty()) {
771 // We want the sign bit of the higher-order double. The bitcast we just
772 // did works as if the double-double was stored to memory and then
773 // read as an i128. The "store" will put the higher-order double in the
774 // lower address in both little- and big-Endian modes, but the "load"
775 // will treat those bits as a different part of the i128: the low bits in
776 // little-Endian, the high bits in big-Endian. Therefore, on big-Endian
777 // we need to shift the high bits down to the low before truncating.
778 Width >>= 1;
779 if (CGF.getTarget().isBigEndian()) {
780 Value *ShiftCst = llvm::ConstantInt::get(IntTy, Width);
781 V = CGF.Builder.CreateLShr(V, ShiftCst);
782 }
783 // We are truncating value in order to extract the higher-order
784 // double, which we will be using to extract the sign from.
785 IntTy = llvm::IntegerType::get(C, Width);
786 V = CGF.Builder.CreateTrunc(V, IntTy);
787 }
788 Value *Zero = llvm::Constant::getNullValue(IntTy);
789 return CGF.Builder.CreateICmpSLT(V, Zero);
790}
791
792/// Checks no arguments or results are passed indirectly in the ABI (i.e. via a
793/// hidden pointer). This is used to check annotating FP libcalls (that could
794/// set `errno`) with "int" TBAA metadata is safe. If any floating-point
795/// arguments are passed indirectly, setup for the call could be incorrectly
796/// optimized out.
798 auto IsIndirect = [&](ABIArgInfo const &info) {
799 return info.isIndirect() || info.isIndirectAliased() || info.isInAlloca();
800 };
801 return !IsIndirect(FnInfo.getReturnInfo()) &&
802 llvm::none_of(FnInfo.arguments(),
803 [&](CGFunctionInfoArgInfo const &ArgInfo) {
804 return IsIndirect(ArgInfo.info);
805 });
806}
807
809 const CallExpr *E, llvm::Constant *calleeValue) {
810 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, E);
811 CGCallee callee = CGCallee::forDirect(calleeValue, GlobalDecl(FD));
812 llvm::CallBase *callOrInvoke = nullptr;
813 CGFunctionInfo const *FnInfo = nullptr;
814 RValue Call =
815 CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot(),
816 /*Chain=*/nullptr, &callOrInvoke, &FnInfo);
817
818 if (unsigned BuiltinID = FD->getBuiltinID()) {
819 // Check whether a FP math builtin function, such as BI__builtin_expf
820 ASTContext &Context = CGF.getContext();
821 bool ConstWithoutErrnoAndExceptions =
823 // Restrict to target with errno, for example, MacOS doesn't set errno.
824 // TODO: Support builtin function with complex type returned, eg: cacosh
825 if (ConstWithoutErrnoAndExceptions && CGF.CGM.getLangOpts().MathErrno &&
826 !CGF.Builder.getIsFPConstrained() && Call.isScalar() &&
828 // Emit "int" TBAA metadata on FP math libcalls.
829 clang::QualType IntTy = Context.IntTy;
830 TBAAAccessInfo TBAAInfo = CGF.CGM.getTBAAAccessInfo(IntTy);
831 CGF.CGM.DecorateInstructionWithTBAA(callOrInvoke, TBAAInfo);
832 }
833 }
834 return Call;
835}
836
837/// Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
838/// depending on IntrinsicID.
839///
840/// \arg CGF The current codegen function.
841/// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
842/// \arg X The first argument to the llvm.*.with.overflow.*.
843/// \arg Y The second argument to the llvm.*.with.overflow.*.
844/// \arg Carry The carry returned by the llvm.*.with.overflow.*.
845/// \returns The result (i.e. sum/product) returned by the intrinsic.
847 const Intrinsic::ID IntrinsicID,
848 llvm::Value *X, llvm::Value *Y,
849 llvm::Value *&Carry) {
850 // Make sure we have integers of the same width.
851 assert(X->getType() == Y->getType() &&
852 "Arguments must be the same type. (Did you forget to make sure both "
853 "arguments have the same integer width?)");
854
855 Function *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
856 llvm::Value *Tmp = CGF.Builder.CreateCall(Callee, {X, Y});
857 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
858 return CGF.Builder.CreateExtractValue(Tmp, 0);
859}
860
861namespace {
862 struct WidthAndSignedness {
863 unsigned Width;
864 bool Signed;
865 };
866}
867
868static WidthAndSignedness
870 const clang::QualType Type) {
871 assert(Type->isIntegerType() && "Given type is not an integer.");
872 unsigned Width = context.getIntWidth(Type);
874 return {Width, Signed};
875}
876
877// Given one or more integer types, this function produces an integer type that
878// encompasses them: any value in one of the given types could be expressed in
879// the encompassing type.
880static struct WidthAndSignedness
881EncompassingIntegerType(ArrayRef<struct WidthAndSignedness> Types) {
882 assert(Types.size() > 0 && "Empty list of types.");
883
884 // If any of the given types is signed, we must return a signed type.
885 bool Signed = false;
886 for (const auto &Type : Types) {
887 Signed |= Type.Signed;
888 }
889
890 // The encompassing type must have a width greater than or equal to the width
891 // of the specified types. Additionally, if the encompassing type is signed,
892 // its width must be strictly greater than the width of any unsigned types
893 // given.
894 unsigned Width = 0;
895 for (const auto &Type : Types) {
896 unsigned MinWidth = Type.Width + (Signed && !Type.Signed);
897 if (Width < MinWidth) {
898 Width = MinWidth;
899 }
900 }
901
902 return {Width, Signed};
903}
904
905Value *CodeGenFunction::EmitVAStartEnd(Value *ArgValue, bool IsStart) {
906 Intrinsic::ID inst = IsStart ? Intrinsic::vastart : Intrinsic::vaend;
907 return Builder.CreateCall(CGM.getIntrinsic(inst, {ArgValue->getType()}),
908 ArgValue);
909}
910
911/// Checks if using the result of __builtin_object_size(p, @p From) in place of
912/// __builtin_object_size(p, @p To) is correct
913static bool areBOSTypesCompatible(int From, int To) {
914 // Note: Our __builtin_object_size implementation currently treats Type=0 and
915 // Type=2 identically. Encoding this implementation detail here may make
916 // improving __builtin_object_size difficult in the future, so it's omitted.
917 return From == To || (From == 0 && To == 1) || (From == 3 && To == 2);
918}
919
920static llvm::Value *
921getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
922 return ConstantInt::get(ResType, (Type & 2) ? 0 : -1, /*isSigned=*/true);
923}
924
925llvm::Value *
926CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
927 llvm::IntegerType *ResType,
928 llvm::Value *EmittedE,
929 bool IsDynamic) {
930 if (std::optional<uint64_t> ObjectSize =
932 return ConstantInt::get(ResType, *ObjectSize, /*isSigned=*/true);
933 return emitBuiltinObjectSize(E, Type, ResType, EmittedE, IsDynamic);
934}
935
936namespace {
937
938/// StructFieldAccess is a simple visitor class to grab the first MemberExpr
939/// from an Expr. It records any ArraySubscriptExpr we meet along the way.
940class StructFieldAccess
941 : public ConstStmtVisitor<StructFieldAccess, const Expr *> {
942 bool AddrOfSeen = false;
943
944public:
945 const Expr *ArrayIndex = nullptr;
946 QualType ArrayElementTy;
947
948 const Expr *VisitMemberExpr(const MemberExpr *E) {
949 if (AddrOfSeen && E->getType()->isArrayType())
950 // Avoid forms like '&ptr->array'.
951 return nullptr;
952 return E;
953 }
954
955 const Expr *VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
956 if (ArrayIndex)
957 // We don't support multiple subscripts.
958 return nullptr;
959
960 AddrOfSeen = false; // '&ptr->array[idx]' is okay.
961 ArrayIndex = E->getIdx();
962 ArrayElementTy = E->getBase()->getType();
963 return Visit(E->getBase());
964 }
965 const Expr *VisitCastExpr(const CastExpr *E) {
966 if (E->getCastKind() == CK_LValueToRValue)
967 return E;
968 return Visit(E->getSubExpr());
969 }
970 const Expr *VisitParenExpr(const ParenExpr *E) {
971 return Visit(E->getSubExpr());
972 }
973 const Expr *VisitUnaryAddrOf(const clang::UnaryOperator *E) {
974 AddrOfSeen = true;
975 return Visit(E->getSubExpr());
976 }
977 const Expr *VisitUnaryDeref(const clang::UnaryOperator *E) {
978 AddrOfSeen = false;
979 return Visit(E->getSubExpr());
980 }
981 const Expr *VisitBinaryOperator(const clang::BinaryOperator *Op) {
982 return Op->isCommaOp() ? Visit(Op->getRHS()) : nullptr;
983 }
984};
985
986} // end anonymous namespace
987
988/// Find a struct's flexible array member. It may be embedded inside multiple
989/// sub-structs, but must still be the last field.
991 ASTContext &Ctx,
992 const RecordDecl *RD) {
993 const LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel =
994 CGF.getLangOpts().getStrictFlexArraysLevel();
995
996 if (RD->isImplicit())
997 return nullptr;
998
999 for (const FieldDecl *FD : RD->fields()) {
1001 Ctx, FD, FD->getType(), StrictFlexArraysLevel,
1002 /*IgnoreTemplateOrMacroSubstitution=*/true))
1003 return FD;
1004
1005 if (const auto *RD = FD->getType()->getAsRecordDecl())
1006 if (const FieldDecl *FD = FindFlexibleArrayMemberField(CGF, Ctx, RD))
1007 return FD;
1008 }
1009
1010 return nullptr;
1011}
1012
1013/// Calculate the offset of a struct field. It may be embedded inside multiple
1014/// sub-structs.
1015static bool GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD,
1016 const FieldDecl *FD, int64_t &Offset) {
1017 if (RD->isImplicit())
1018 return false;
1019
1020 // Keep track of the field number ourselves, because the other methods
1021 // (CGRecordLayout::getLLVMFieldNo) aren't always equivalent to how the AST
1022 // is laid out.
1023 uint32_t FieldNo = 0;
1024 const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
1025
1026 for (const FieldDecl *Field : RD->fields()) {
1027 if (Field == FD) {
1028 Offset += Layout.getFieldOffset(FieldNo);
1029 return true;
1030 }
1031
1032 if (const auto *RD = Field->getType()->getAsRecordDecl()) {
1033 if (GetFieldOffset(Ctx, RD, FD, Offset)) {
1034 Offset += Layout.getFieldOffset(FieldNo);
1035 return true;
1036 }
1037 }
1038
1039 if (!RD->isUnion())
1040 ++FieldNo;
1041 }
1042
1043 return false;
1044}
1045
1046static std::optional<int64_t>
1047GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FD) {
1048 int64_t Offset = 0;
1049
1050 if (GetFieldOffset(Ctx, RD, FD, Offset))
1051 return std::optional<int64_t>(Offset);
1052
1053 return std::nullopt;
1054}
1055
1056llvm::Value *CodeGenFunction::emitCountedBySize(const Expr *E,
1057 llvm::Value *EmittedE,
1058 unsigned Type,
1059 llvm::IntegerType *ResType) {
1060 // Note: If the whole struct is specificed in the __bdos (i.e. Visitor
1061 // returns a DeclRefExpr). The calculation of the whole size of the structure
1062 // with a flexible array member can be done in two ways:
1063 //
1064 // 1) sizeof(struct S) + count * sizeof(typeof(fam))
1065 // 2) offsetof(struct S, fam) + count * sizeof(typeof(fam))
1066 //
1067 // The first will add additional padding after the end of the array
1068 // allocation while the second method is more precise, but not quite expected
1069 // from programmers. See
1070 // https://lore.kernel.org/lkml/ZvV6X5FPBBW7CO1f@archlinux/ for a discussion
1071 // of the topic.
1072 //
1073 // GCC isn't (currently) able to calculate __bdos on a pointer to the whole
1074 // structure. Therefore, because of the above issue, we choose to match what
1075 // GCC does for consistency's sake.
1076
1077 StructFieldAccess Visitor;
1078 E = Visitor.Visit(E);
1079 if (!E)
1080 return nullptr;
1081
1082 const Expr *Idx = Visitor.ArrayIndex;
1083 if (Idx) {
1084 if (Idx->HasSideEffects(getContext()))
1085 // We can't have side-effects.
1086 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1087
1088 if (const auto *IL = dyn_cast<IntegerLiteral>(Idx)) {
1089 int64_t Val = IL->getValue().getSExtValue();
1090 if (Val < 0)
1091 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1092
1093 // The index is 0, so we don't need to take it into account.
1094 if (Val == 0)
1095 Idx = nullptr;
1096 }
1097 }
1098
1099 // __counted_by on either a flexible array member or a pointer into a struct
1100 // with a flexible array member.
1101 if (const auto *ME = dyn_cast<MemberExpr>(E))
1102 return emitCountedByMemberSize(ME, Idx, EmittedE, Visitor.ArrayElementTy,
1103 Type, ResType);
1104
1105 // __counted_by on a pointer in a struct.
1106 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(E);
1107 ICE && ICE->getCastKind() == CK_LValueToRValue)
1108 return emitCountedByPointerSize(ICE, Idx, EmittedE, Visitor.ArrayElementTy,
1109 Type, ResType);
1110
1111 return nullptr;
1112}
1113
1115 llvm::Value *Res,
1116 llvm::Value *Index,
1117 llvm::IntegerType *ResType,
1118 bool IsSigned) {
1119 // cmp = (array_size >= 0)
1120 Value *Cmp = CGF.Builder.CreateIsNotNeg(Res);
1121 if (Index)
1122 // cmp = (cmp && index >= 0)
1123 Cmp = CGF.Builder.CreateAnd(CGF.Builder.CreateIsNotNeg(Index), Cmp);
1124
1125 // return cmp ? result : 0
1126 return CGF.Builder.CreateSelect(Cmp, Res,
1127 ConstantInt::get(ResType, 0, IsSigned));
1128}
1129
1130static std::pair<llvm::Value *, llvm::Value *>
1132 const FieldDecl *ArrayFD, const FieldDecl *CountFD,
1133 const Expr *Idx, llvm::IntegerType *ResType,
1134 bool IsSigned) {
1135 // count = ptr->count;
1136 Value *Count = CGF.EmitLoadOfCountedByField(ME, ArrayFD, CountFD);
1137 if (!Count)
1138 return std::make_pair<Value *>(nullptr, nullptr);
1139 Count = CGF.Builder.CreateIntCast(Count, ResType, IsSigned, "count");
1140
1141 // index = ptr->index;
1142 Value *Index = nullptr;
1143 if (Idx) {
1144 bool IdxSigned = Idx->getType()->isSignedIntegerType();
1145 Index = CGF.EmitScalarExpr(Idx);
1146 Index = CGF.Builder.CreateIntCast(Index, ResType, IdxSigned, "index");
1147 }
1148
1149 return std::make_pair(Count, Index);
1150}
1151
1152llvm::Value *CodeGenFunction::emitCountedByPointerSize(
1153 const ImplicitCastExpr *E, const Expr *Idx, llvm::Value *EmittedE,
1154 QualType CastedArrayElementTy, unsigned Type, llvm::IntegerType *ResType) {
1155 assert(E->getCastKind() == CK_LValueToRValue &&
1156 "must be an LValue to RValue cast");
1157
1158 const MemberExpr *ME =
1159 dyn_cast<MemberExpr>(E->getSubExpr()->IgnoreParenNoopCasts(getContext()));
1160 if (!ME)
1161 return nullptr;
1162
1163 const auto *ArrayBaseFD = dyn_cast<FieldDecl>(ME->getMemberDecl());
1164 if (!ArrayBaseFD || !ArrayBaseFD->getType()->isPointerType() ||
1165 !ArrayBaseFD->getType()->isCountAttributedType())
1166 return nullptr;
1167
1168 // Get the 'count' FieldDecl.
1169 const FieldDecl *CountFD = ArrayBaseFD->findCountedByField();
1170 if (!CountFD)
1171 // Can't find the field referenced by the "counted_by" attribute.
1172 return nullptr;
1173
1174 // Calculate the array's object size using these formulae. (Note: if the
1175 // calculation is negative, we return 0.):
1176 //
1177 // struct p;
1178 // struct s {
1179 // /* ... */
1180 // struct p **array __attribute__((counted_by(count)));
1181 // int count;
1182 // };
1183 //
1184 // 1) 'ptr->array':
1185 //
1186 // count = ptr->count;
1187 //
1188 // array_element_size = sizeof (*ptr->array);
1189 // array_size = count * array_element_size;
1190 //
1191 // result = array_size;
1192 //
1193 // cmp = (result >= 0)
1194 // return cmp ? result : 0;
1195 //
1196 // 2) '&((cast) ptr->array)[idx]':
1197 //
1198 // count = ptr->count;
1199 // index = idx;
1200 //
1201 // array_element_size = sizeof (*ptr->array);
1202 // array_size = count * array_element_size;
1203 //
1204 // casted_array_element_size = sizeof (*((cast) ptr->array));
1205 //
1206 // index_size = index * casted_array_element_size;
1207 // result = array_size - index_size;
1208 //
1209 // cmp = (result >= 0)
1210 // if (index)
1211 // cmp = (cmp && index > 0)
1212 // return cmp ? result : 0;
1213
1214 auto GetElementBaseSize = [&](QualType ElementTy) {
1215 CharUnits ElementSize =
1216 getContext().getTypeSizeInChars(ElementTy->getPointeeType());
1217
1218 if (ElementSize.isZero()) {
1219 // This might be a __sized_by (or __counted_by) on a
1220 // 'void *', which counts bytes, not elements.
1221 [[maybe_unused]] auto *CAT = ElementTy->getAs<CountAttributedType>();
1222 assert(CAT && "must have an CountAttributedType");
1223
1224 ElementSize = CharUnits::One();
1225 }
1226
1227 return std::optional<CharUnits>(ElementSize);
1228 };
1229
1230 // Get the sizes of the original array element and the casted array element,
1231 // if different.
1232 std::optional<CharUnits> ArrayElementBaseSize =
1233 GetElementBaseSize(ArrayBaseFD->getType());
1234 if (!ArrayElementBaseSize)
1235 return nullptr;
1236
1237 std::optional<CharUnits> CastedArrayElementBaseSize = ArrayElementBaseSize;
1238 if (!CastedArrayElementTy.isNull() && CastedArrayElementTy->isPointerType()) {
1239 CastedArrayElementBaseSize = GetElementBaseSize(CastedArrayElementTy);
1240 if (!CastedArrayElementBaseSize)
1241 return nullptr;
1242 }
1243
1244 bool IsSigned = CountFD->getType()->isSignedIntegerType();
1245
1246 // count = ptr->count;
1247 // index = ptr->index;
1248 Value *Count, *Index;
1249 std::tie(Count, Index) = GetCountFieldAndIndex(
1250 *this, ME, ArrayBaseFD, CountFD, Idx, ResType, IsSigned);
1251 if (!Count)
1252 return nullptr;
1253
1254 // array_element_size = sizeof (*ptr->array)
1255 auto *ArrayElementSize = llvm::ConstantInt::get(
1256 ResType, ArrayElementBaseSize->getQuantity(), IsSigned);
1257
1258 // casted_array_element_size = sizeof (*((cast) ptr->array));
1259 auto *CastedArrayElementSize = llvm::ConstantInt::get(
1260 ResType, CastedArrayElementBaseSize->getQuantity(), IsSigned);
1261
1262 // array_size = count * array_element_size;
1263 Value *ArraySize = Builder.CreateMul(Count, ArrayElementSize, "array_size",
1264 !IsSigned, IsSigned);
1265
1266 // Option (1) 'ptr->array'
1267 // result = array_size
1268 Value *Result = ArraySize;
1269
1270 if (Idx) { // Option (2) '&((cast) ptr->array)[idx]'
1271 // index_size = index * casted_array_element_size;
1272 Value *IndexSize = Builder.CreateMul(Index, CastedArrayElementSize,
1273 "index_size", !IsSigned, IsSigned);
1274
1275 // result = result - index_size;
1276 Result =
1277 Builder.CreateSub(Result, IndexSize, "result", !IsSigned, IsSigned);
1278 }
1279
1280 return EmitPositiveResultOrZero(*this, Result, Index, ResType, IsSigned);
1281}
1282
1283llvm::Value *CodeGenFunction::emitCountedByMemberSize(
1284 const MemberExpr *ME, const Expr *Idx, llvm::Value *EmittedE,
1285 QualType CastedArrayElementTy, unsigned Type, llvm::IntegerType *ResType) {
1286 const auto *FD = dyn_cast<FieldDecl>(ME->getMemberDecl());
1287 if (!FD)
1288 return nullptr;
1289
1290 // Find the flexible array member and check that it has the __counted_by
1291 // attribute.
1292 ASTContext &Ctx = getContext();
1293 const RecordDecl *RD = FD->getDeclContext()->getOuterLexicalRecordContext();
1294 const FieldDecl *FlexibleArrayMemberFD = nullptr;
1295
1297 Ctx, FD, FD->getType(), getLangOpts().getStrictFlexArraysLevel(),
1298 /*IgnoreTemplateOrMacroSubstitution=*/true))
1299 FlexibleArrayMemberFD = FD;
1300 else
1301 FlexibleArrayMemberFD = FindFlexibleArrayMemberField(*this, Ctx, RD);
1302
1303 if (!FlexibleArrayMemberFD ||
1304 !FlexibleArrayMemberFD->getType()->isCountAttributedType())
1305 return nullptr;
1306
1307 // Get the 'count' FieldDecl.
1308 const FieldDecl *CountFD = FlexibleArrayMemberFD->findCountedByField();
1309 if (!CountFD)
1310 // Can't find the field referenced by the "counted_by" attribute.
1311 return nullptr;
1312
1313 // Calculate the flexible array member's object size using these formulae.
1314 // (Note: if the calculation is negative, we return 0.):
1315 //
1316 // struct p;
1317 // struct s {
1318 // /* ... */
1319 // int count;
1320 // struct p *array[] __attribute__((counted_by(count)));
1321 // };
1322 //
1323 // 1) 'ptr->array':
1324 //
1325 // count = ptr->count;
1326 //
1327 // flexible_array_member_element_size = sizeof (*ptr->array);
1328 // flexible_array_member_size =
1329 // count * flexible_array_member_element_size;
1330 //
1331 // result = flexible_array_member_size;
1332 //
1333 // cmp = (result >= 0)
1334 // return cmp ? result : 0;
1335 //
1336 // 2) '&((cast) ptr->array)[idx]':
1337 //
1338 // count = ptr->count;
1339 // index = idx;
1340 //
1341 // flexible_array_member_element_size = sizeof (*ptr->array);
1342 // flexible_array_member_size =
1343 // count * flexible_array_member_element_size;
1344 //
1345 // casted_flexible_array_member_element_size =
1346 // sizeof (*((cast) ptr->array));
1347 // index_size = index * casted_flexible_array_member_element_size;
1348 //
1349 // result = flexible_array_member_size - index_size;
1350 //
1351 // cmp = (result >= 0)
1352 // if (index != 0)
1353 // cmp = (cmp && index >= 0)
1354 // return cmp ? result : 0;
1355 //
1356 // 3) '&ptr->field':
1357 //
1358 // count = ptr->count;
1359 // sizeof_struct = sizeof (struct s);
1360 //
1361 // flexible_array_member_element_size = sizeof (*ptr->array);
1362 // flexible_array_member_size =
1363 // count * flexible_array_member_element_size;
1364 //
1365 // field_offset = offsetof (struct s, field);
1366 // offset_diff = sizeof_struct - field_offset;
1367 //
1368 // result = offset_diff + flexible_array_member_size;
1369 //
1370 // cmp = (result >= 0)
1371 // return cmp ? result : 0;
1372 //
1373 // 4) '&((cast) ptr->field_array)[idx]':
1374 //
1375 // count = ptr->count;
1376 // index = idx;
1377 // sizeof_struct = sizeof (struct s);
1378 //
1379 // flexible_array_member_element_size = sizeof (*ptr->array);
1380 // flexible_array_member_size =
1381 // count * flexible_array_member_element_size;
1382 //
1383 // casted_field_element_size = sizeof (*((cast) ptr->field_array));
1384 // field_offset = offsetof (struct s, field)
1385 // field_offset += index * casted_field_element_size;
1386 //
1387 // offset_diff = sizeof_struct - field_offset;
1388 //
1389 // result = offset_diff + flexible_array_member_size;
1390 //
1391 // cmp = (result >= 0)
1392 // if (index != 0)
1393 // cmp = (cmp && index >= 0)
1394 // return cmp ? result : 0;
1395
1396 bool IsSigned = CountFD->getType()->isSignedIntegerType();
1397
1398 QualType FlexibleArrayMemberTy = FlexibleArrayMemberFD->getType();
1399
1400 // Explicit cast because otherwise the CharWidth will promote an i32's into
1401 // u64's leading to overflows.
1402 int64_t CharWidth = static_cast<int64_t>(CGM.getContext().getCharWidth());
1403
1404 // field_offset = offsetof (struct s, field);
1405 Value *FieldOffset = nullptr;
1406 if (FlexibleArrayMemberFD != FD) {
1407 std::optional<int64_t> Offset = GetFieldOffset(Ctx, RD, FD);
1408 if (!Offset)
1409 return nullptr;
1410 FieldOffset =
1411 llvm::ConstantInt::get(ResType, *Offset / CharWidth, IsSigned);
1412 }
1413
1414 // count = ptr->count;
1415 // index = ptr->index;
1416 Value *Count, *Index;
1417 std::tie(Count, Index) = GetCountFieldAndIndex(
1418 *this, ME, FlexibleArrayMemberFD, CountFD, Idx, ResType, IsSigned);
1419 if (!Count)
1420 return nullptr;
1421
1422 // flexible_array_member_element_size = sizeof (*ptr->array);
1423 const ArrayType *ArrayTy = Ctx.getAsArrayType(FlexibleArrayMemberTy);
1424 CharUnits BaseSize = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
1425 auto *FlexibleArrayMemberElementSize =
1426 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1427
1428 // flexible_array_member_size = count * flexible_array_member_element_size;
1429 Value *FlexibleArrayMemberSize =
1430 Builder.CreateMul(Count, FlexibleArrayMemberElementSize,
1431 "flexible_array_member_size", !IsSigned, IsSigned);
1432
1433 Value *Result = nullptr;
1434 if (FlexibleArrayMemberFD == FD) {
1435 if (Idx) { // Option (2) '&((cast) ptr->array)[idx]'
1436 // casted_flexible_array_member_element_size =
1437 // sizeof (*((cast) ptr->array));
1438 llvm::ConstantInt *CastedFlexibleArrayMemberElementSize =
1439 FlexibleArrayMemberElementSize;
1440 if (!CastedArrayElementTy.isNull() &&
1441 CastedArrayElementTy->isPointerType()) {
1442 CharUnits BaseSize =
1443 Ctx.getTypeSizeInChars(CastedArrayElementTy->getPointeeType());
1444 CastedFlexibleArrayMemberElementSize =
1445 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1446 }
1447
1448 // index_size = index * casted_flexible_array_member_element_size;
1449 Value *IndexSize =
1450 Builder.CreateMul(Index, CastedFlexibleArrayMemberElementSize,
1451 "index_size", !IsSigned, IsSigned);
1452
1453 // result = flexible_array_member_size - index_size;
1454 Result = Builder.CreateSub(FlexibleArrayMemberSize, IndexSize, "result",
1455 !IsSigned, IsSigned);
1456 } else { // Option (1) 'ptr->array'
1457 // result = flexible_array_member_size;
1458 Result = FlexibleArrayMemberSize;
1459 }
1460 } else {
1461 // sizeof_struct = sizeof (struct s);
1462 llvm::StructType *StructTy = getTypes().getCGRecordLayout(RD).getLLVMType();
1463 const llvm::DataLayout &Layout = CGM.getDataLayout();
1464 TypeSize Size = Layout.getTypeSizeInBits(StructTy);
1465 Value *SizeofStruct =
1466 llvm::ConstantInt::get(ResType, Size.getKnownMinValue() / CharWidth);
1467
1468 if (Idx) { // Option (4) '&((cast) ptr->field_array)[idx]'
1469 // casted_field_element_size = sizeof (*((cast) ptr->field_array));
1470 CharUnits BaseSize;
1471 if (!CastedArrayElementTy.isNull() &&
1472 CastedArrayElementTy->isPointerType()) {
1473 BaseSize =
1474 Ctx.getTypeSizeInChars(CastedArrayElementTy->getPointeeType());
1475 } else {
1476 const ArrayType *ArrayTy = Ctx.getAsArrayType(FD->getType());
1477 BaseSize = Ctx.getTypeSizeInChars(ArrayTy->getElementType());
1478 }
1479
1480 llvm::ConstantInt *CastedFieldElementSize =
1481 llvm::ConstantInt::get(ResType, BaseSize.getQuantity(), IsSigned);
1482
1483 // field_offset += index * casted_field_element_size;
1484 Value *Mul = Builder.CreateMul(Index, CastedFieldElementSize,
1485 "field_offset", !IsSigned, IsSigned);
1486 FieldOffset = Builder.CreateAdd(FieldOffset, Mul);
1487 }
1488 // Option (3) '&ptr->field', and Option (4) continuation.
1489 // offset_diff = flexible_array_member_offset - field_offset;
1490 Value *OffsetDiff = Builder.CreateSub(SizeofStruct, FieldOffset,
1491 "offset_diff", !IsSigned, IsSigned);
1492
1493 // result = offset_diff + flexible_array_member_size;
1494 Result = Builder.CreateAdd(FlexibleArrayMemberSize, OffsetDiff, "result");
1495 }
1496
1497 return EmitPositiveResultOrZero(*this, Result, Index, ResType, IsSigned);
1498}
1499
1500/// Returns a Value corresponding to the size of the given expression.
1501/// This Value may be either of the following:
1502/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
1503/// it)
1504/// - A call to the @llvm.objectsize intrinsic
1505///
1506/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
1507/// and we wouldn't otherwise try to reference a pass_object_size parameter,
1508/// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
1509llvm::Value *
1510CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
1511 llvm::IntegerType *ResType,
1512 llvm::Value *EmittedE, bool IsDynamic) {
1513 // We need to reference an argument if the pointer is a parameter with the
1514 // pass_object_size attribute.
1515 if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
1516 auto *Param = dyn_cast<ParmVarDecl>(D->getDecl());
1517 auto *PS = D->getDecl()->getAttr<PassObjectSizeAttr>();
1518 if (Param != nullptr && PS != nullptr &&
1519 areBOSTypesCompatible(PS->getType(), Type)) {
1520 auto Iter = SizeArguments.find(Param);
1521 assert(Iter != SizeArguments.end());
1522
1523 const ImplicitParamDecl *D = Iter->second;
1524 auto DIter = LocalDeclMap.find(D);
1525 assert(DIter != LocalDeclMap.end());
1526
1527 return EmitLoadOfScalar(DIter->second, /*Volatile=*/false,
1528 getContext().getSizeType(), E->getBeginLoc());
1529 }
1530 }
1531
1532 // LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
1533 // evaluate E for side-effects. In either case, we shouldn't lower to
1534 // @llvm.objectsize.
1535 if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
1536 return getDefaultBuiltinObjectSizeResult(Type, ResType);
1537
1538 Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
1539 assert(Ptr->getType()->isPointerTy() &&
1540 "Non-pointer passed to __builtin_object_size?");
1541
1542 if (IsDynamic)
1543 // Emit special code for a flexible array member with the "counted_by"
1544 // attribute.
1545 if (Value *V = emitCountedBySize(E, Ptr, Type, ResType))
1546 return V;
1547
1548 Function *F =
1549 CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
1550
1551 // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
1552 Value *Min = Builder.getInt1((Type & 2) != 0);
1553 // For GCC compatibility, __builtin_object_size treat NULL as unknown size.
1554 Value *NullIsUnknown = Builder.getTrue();
1555 Value *Dynamic = Builder.getInt1(IsDynamic);
1556 return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown, Dynamic});
1557}
1558
1559namespace {
1560/// A struct to generically describe a bit test intrinsic.
1561struct BitTest {
1562 enum ActionKind : uint8_t { TestOnly, Complement, Reset, Set };
1563 enum InterlockingKind : uint8_t {
1564 Unlocked,
1565 Sequential,
1566 Acquire,
1567 Release,
1568 NoFence
1569 };
1570
1571 ActionKind Action;
1572 InterlockingKind Interlocking;
1573 bool Is64Bit;
1574
1575 static BitTest decodeBitTestBuiltin(unsigned BuiltinID);
1576};
1577
1578} // namespace
1579
1580BitTest BitTest::decodeBitTestBuiltin(unsigned BuiltinID) {
1581 switch (BuiltinID) {
1582 // Main portable variants.
1583 case Builtin::BI_bittest:
1584 return {TestOnly, Unlocked, false};
1585 case Builtin::BI_bittestandcomplement:
1586 return {Complement, Unlocked, false};
1587 case Builtin::BI_bittestandreset:
1588 return {Reset, Unlocked, false};
1589 case Builtin::BI_bittestandset:
1590 return {Set, Unlocked, false};
1591 case Builtin::BI_interlockedbittestandreset:
1592 return {Reset, Sequential, false};
1593 case Builtin::BI_interlockedbittestandset:
1594 return {Set, Sequential, false};
1595
1596 // 64-bit variants.
1597 case Builtin::BI_bittest64:
1598 return {TestOnly, Unlocked, true};
1599 case Builtin::BI_bittestandcomplement64:
1600 return {Complement, Unlocked, true};
1601 case Builtin::BI_bittestandreset64:
1602 return {Reset, Unlocked, true};
1603 case Builtin::BI_bittestandset64:
1604 return {Set, Unlocked, true};
1605 case Builtin::BI_interlockedbittestandreset64:
1606 return {Reset, Sequential, true};
1607 case Builtin::BI_interlockedbittestandset64:
1608 return {Set, Sequential, true};
1609
1610 // ARM/AArch64-specific ordering variants.
1611 case Builtin::BI_interlockedbittestandset_acq:
1612 return {Set, Acquire, false};
1613 case Builtin::BI_interlockedbittestandset_rel:
1614 return {Set, Release, false};
1615 case Builtin::BI_interlockedbittestandset_nf:
1616 return {Set, NoFence, false};
1617 case Builtin::BI_interlockedbittestandreset_acq:
1618 return {Reset, Acquire, false};
1619 case Builtin::BI_interlockedbittestandreset_rel:
1620 return {Reset, Release, false};
1621 case Builtin::BI_interlockedbittestandreset_nf:
1622 return {Reset, NoFence, false};
1623 case Builtin::BI_interlockedbittestandreset64_acq:
1624 return {Reset, Acquire, false};
1625 case Builtin::BI_interlockedbittestandreset64_rel:
1626 return {Reset, Release, false};
1627 case Builtin::BI_interlockedbittestandreset64_nf:
1628 return {Reset, NoFence, false};
1629 case Builtin::BI_interlockedbittestandset64_acq:
1630 return {Set, Acquire, false};
1631 case Builtin::BI_interlockedbittestandset64_rel:
1632 return {Set, Release, false};
1633 case Builtin::BI_interlockedbittestandset64_nf:
1634 return {Set, NoFence, false};
1635 }
1636 llvm_unreachable("expected only bittest intrinsics");
1637}
1638
1639static char bitActionToX86BTCode(BitTest::ActionKind A) {
1640 switch (A) {
1641 case BitTest::TestOnly: return '\0';
1642 case BitTest::Complement: return 'c';
1643 case BitTest::Reset: return 'r';
1644 case BitTest::Set: return 's';
1645 }
1646 llvm_unreachable("invalid action");
1647}
1648
1650 BitTest BT,
1651 const CallExpr *E, Value *BitBase,
1652 Value *BitPos) {
1653 char Action = bitActionToX86BTCode(BT.Action);
1654 char SizeSuffix = BT.Is64Bit ? 'q' : 'l';
1655
1656 // Build the assembly.
1658 raw_svector_ostream AsmOS(Asm);
1659 if (BT.Interlocking != BitTest::Unlocked)
1660 AsmOS << "lock ";
1661 AsmOS << "bt";
1662 if (Action)
1663 AsmOS << Action;
1664 AsmOS << SizeSuffix << " $2, ($1)";
1665
1666 // Build the constraints. FIXME: We should support immediates when possible.
1667 std::string Constraints = "={@ccc},r,r,~{cc},~{memory}";
1668 std::string_view MachineClobbers = CGF.getTarget().getClobbers();
1669 if (!MachineClobbers.empty()) {
1670 Constraints += ',';
1671 Constraints += MachineClobbers;
1672 }
1673 llvm::IntegerType *IntType = llvm::IntegerType::get(
1674 CGF.getLLVMContext(),
1675 CGF.getContext().getTypeSize(E->getArg(1)->getType()));
1676 llvm::FunctionType *FTy =
1677 llvm::FunctionType::get(CGF.Int8Ty, {CGF.DefaultPtrTy, IntType}, false);
1678
1679 llvm::InlineAsm *IA =
1680 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
1681 return CGF.Builder.CreateCall(IA, {BitBase, BitPos});
1682}
1683
1684static llvm::AtomicOrdering
1685getBitTestAtomicOrdering(BitTest::InterlockingKind I) {
1686 switch (I) {
1687 case BitTest::Unlocked: return llvm::AtomicOrdering::NotAtomic;
1688 case BitTest::Sequential: return llvm::AtomicOrdering::SequentiallyConsistent;
1689 case BitTest::Acquire: return llvm::AtomicOrdering::Acquire;
1690 case BitTest::Release: return llvm::AtomicOrdering::Release;
1691 case BitTest::NoFence: return llvm::AtomicOrdering::Monotonic;
1692 }
1693 llvm_unreachable("invalid interlocking");
1694}
1695
1696static llvm::Value *EmitBitCountExpr(CodeGenFunction &CGF, const Expr *E) {
1697 llvm::Value *ArgValue = CGF.EmitScalarExpr(E);
1698 llvm::Type *ArgType = ArgValue->getType();
1699
1700 // Boolean vectors can be casted directly to its bitfield representation. We
1701 // intentionally do not round up to the next power of two size and let LLVM
1702 // handle the trailing bits.
1703 if (auto *VT = dyn_cast<llvm::FixedVectorType>(ArgType);
1704 VT && VT->getElementType()->isIntegerTy(1)) {
1705 llvm::Type *StorageType =
1706 llvm::Type::getIntNTy(CGF.getLLVMContext(), VT->getNumElements());
1707 ArgValue = CGF.Builder.CreateBitCast(ArgValue, StorageType);
1708 }
1709
1710 return ArgValue;
1711}
1712
1713/// Emit a _bittest* intrinsic. These intrinsics take a pointer to an array of
1714/// bits and a bit position and read and optionally modify the bit at that
1715/// position. The position index can be arbitrarily large, i.e. it can be larger
1716/// than 31 or 63, so we need an indexed load in the general case.
1717static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
1718 unsigned BuiltinID,
1719 const CallExpr *E) {
1720 Value *BitBase = CGF.EmitScalarExpr(E->getArg(0));
1721 Value *BitPos = CGF.EmitScalarExpr(E->getArg(1));
1722
1723 BitTest BT = BitTest::decodeBitTestBuiltin(BuiltinID);
1724
1725 // X86 has special BT, BTC, BTR, and BTS instructions that handle the array
1726 // indexing operation internally. Use them if possible.
1727 if (CGF.getTarget().getTriple().isX86())
1728 return EmitX86BitTestIntrinsic(CGF, BT, E, BitBase, BitPos);
1729
1730 // Otherwise, use generic code to load one byte and test the bit. Use all but
1731 // the bottom three bits as the array index, and the bottom three bits to form
1732 // a mask.
1733 // Bit = BitBaseI8[BitPos >> 3] & (1 << (BitPos & 0x7)) != 0;
1734 Value *ByteIndex = CGF.Builder.CreateAShr(
1735 BitPos, llvm::ConstantInt::get(BitPos->getType(), 3), "bittest.byteidx");
1736 Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBase, ByteIndex,
1737 "bittest.byteaddr"),
1738 CGF.Int8Ty, CharUnits::One());
1739 Value *PosLow =
1740 CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
1741 llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
1742
1743 // The updating instructions will need a mask.
1744 Value *Mask = nullptr;
1745 if (BT.Action != BitTest::TestOnly) {
1746 Mask = CGF.Builder.CreateShl(llvm::ConstantInt::get(CGF.Int8Ty, 1), PosLow,
1747 "bittest.mask");
1748 }
1749
1750 // Check the action and ordering of the interlocked intrinsics.
1751 llvm::AtomicOrdering Ordering = getBitTestAtomicOrdering(BT.Interlocking);
1752
1753 Value *OldByte = nullptr;
1754 if (Ordering != llvm::AtomicOrdering::NotAtomic) {
1755 // Emit a combined atomicrmw load/store operation for the interlocked
1756 // intrinsics.
1757 llvm::AtomicRMWInst::BinOp RMWOp = llvm::AtomicRMWInst::Or;
1758 if (BT.Action == BitTest::Reset) {
1759 Mask = CGF.Builder.CreateNot(Mask);
1760 RMWOp = llvm::AtomicRMWInst::And;
1761 }
1762 OldByte = CGF.Builder.CreateAtomicRMW(RMWOp, ByteAddr, Mask, Ordering);
1763 } else {
1764 // Emit a plain load for the non-interlocked intrinsics.
1765 OldByte = CGF.Builder.CreateLoad(ByteAddr, "bittest.byte");
1766 Value *NewByte = nullptr;
1767 switch (BT.Action) {
1768 case BitTest::TestOnly:
1769 // Don't store anything.
1770 break;
1771 case BitTest::Complement:
1772 NewByte = CGF.Builder.CreateXor(OldByte, Mask);
1773 break;
1774 case BitTest::Reset:
1775 NewByte = CGF.Builder.CreateAnd(OldByte, CGF.Builder.CreateNot(Mask));
1776 break;
1777 case BitTest::Set:
1778 NewByte = CGF.Builder.CreateOr(OldByte, Mask);
1779 break;
1780 }
1781 if (NewByte)
1782 CGF.Builder.CreateStore(NewByte, ByteAddr);
1783 }
1784
1785 // However we loaded the old byte, either by plain load or atomicrmw, shift
1786 // the bit into the low position and mask it to 0 or 1.
1787 Value *ShiftedByte = CGF.Builder.CreateLShr(OldByte, PosLow, "bittest.shr");
1788 return CGF.Builder.CreateAnd(
1789 ShiftedByte, llvm::ConstantInt::get(CGF.Int8Ty, 1), "bittest.res");
1790}
1791
1792namespace {
1793enum class MSVCSetJmpKind {
1794 _setjmpex,
1795 _setjmp3,
1796 _setjmp
1797};
1798}
1799
1800/// MSVC handles setjmp a bit differently on different platforms. On every
1801/// architecture except 32-bit x86, the frame address is passed. On x86, extra
1802/// parameters can be passed as variadic arguments, but we always pass none.
1803static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind,
1804 const CallExpr *E) {
1805 llvm::Value *Arg1 = nullptr;
1806 llvm::Type *Arg1Ty = nullptr;
1807 StringRef Name;
1808 bool IsVarArg = false;
1809 if (SJKind == MSVCSetJmpKind::_setjmp3) {
1810 Name = "_setjmp3";
1811 Arg1Ty = CGF.Int32Ty;
1812 Arg1 = llvm::ConstantInt::get(CGF.IntTy, 0);
1813 IsVarArg = true;
1814 } else {
1815 Name = SJKind == MSVCSetJmpKind::_setjmp ? "_setjmp" : "_setjmpex";
1816 Arg1Ty = CGF.Int8PtrTy;
1817 if (CGF.getTarget().getTriple().getArch() == llvm::Triple::aarch64) {
1818 Arg1 = CGF.Builder.CreateCall(
1819 CGF.CGM.getIntrinsic(Intrinsic::sponentry, CGF.AllocaInt8PtrTy));
1820 } else
1821 Arg1 = CGF.Builder.CreateCall(
1822 CGF.CGM.getIntrinsic(Intrinsic::frameaddress, CGF.AllocaInt8PtrTy),
1823 llvm::ConstantInt::get(CGF.Int32Ty, 0));
1824 }
1825
1826 // Mark the call site and declaration with ReturnsTwice.
1827 llvm::Type *ArgTypes[2] = {CGF.Int8PtrTy, Arg1Ty};
1828 llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
1829 CGF.getLLVMContext(), llvm::AttributeList::FunctionIndex,
1830 llvm::Attribute::ReturnsTwice);
1831 llvm::FunctionCallee SetJmpFn = CGF.CGM.CreateRuntimeFunction(
1832 llvm::FunctionType::get(CGF.IntTy, ArgTypes, IsVarArg), Name,
1833 ReturnsTwiceAttr, /*Local=*/true);
1834
1835 llvm::Value *Buf = CGF.Builder.CreateBitOrPointerCast(
1836 CGF.EmitScalarExpr(E->getArg(0)), CGF.Int8PtrTy);
1837 llvm::Value *Args[] = {Buf, Arg1};
1838 llvm::CallBase *CB = CGF.EmitRuntimeCallOrInvoke(SetJmpFn, Args);
1839 CB->setAttributes(ReturnsTwiceAttr);
1840 return RValue::get(CB);
1841}
1842
1843// Emit an MSVC intrinsic. Assumes that arguments have *not* been evaluated.
1845 const CallExpr *E) {
1846 switch (BuiltinID) {
1849 Address IndexAddress(EmitPointerWithAlignment(E->getArg(0)));
1850 Value *ArgValue = EmitScalarExpr(E->getArg(1));
1851
1852 llvm::Type *ArgType = ArgValue->getType();
1853 llvm::Type *IndexType = IndexAddress.getElementType();
1854 llvm::Type *ResultType = ConvertType(E->getType());
1855
1856 Value *ArgZero = llvm::Constant::getNullValue(ArgType);
1857 Value *ResZero = llvm::Constant::getNullValue(ResultType);
1858 Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
1859
1860 BasicBlock *Begin = Builder.GetInsertBlock();
1861 BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
1862 Builder.SetInsertPoint(End);
1863 PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
1864
1865 Builder.SetInsertPoint(Begin);
1866 Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
1867 BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
1868 Builder.CreateCondBr(IsZero, End, NotZero);
1869 Result->addIncoming(ResZero, Begin);
1870
1871 Builder.SetInsertPoint(NotZero);
1872
1873 if (BuiltinID == MSVCIntrin::_BitScanForward) {
1874 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
1875 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1876 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1877 Builder.CreateStore(ZeroCount, IndexAddress, false);
1878 } else {
1879 unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
1880 Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
1881
1882 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
1883 Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
1884 ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
1885 Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
1886 Builder.CreateStore(Index, IndexAddress, false);
1887 }
1888 Builder.CreateBr(End);
1889 Result->addIncoming(ResOne, NotZero);
1890
1891 Builder.SetInsertPoint(End);
1892 return Result;
1893 }
1895 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
1897 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
1899 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
1901 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
1903 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
1905 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
1907 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1908 AtomicOrdering::Acquire);
1910 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1911 AtomicOrdering::Release);
1913 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E,
1914 AtomicOrdering::Monotonic);
1916 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1917 AtomicOrdering::Acquire);
1919 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1920 AtomicOrdering::Release);
1922 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E,
1923 AtomicOrdering::Monotonic);
1925 return EmitAtomicCmpXchgForMSIntrin(*this, E);
1927 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Acquire);
1929 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Release);
1931 return EmitAtomicCmpXchgForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1934 *this, E, AtomicOrdering::SequentiallyConsistent);
1936 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Acquire);
1938 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Release);
1940 return EmitAtomicCmpXchg128ForMSIntrin(*this, E, AtomicOrdering::Monotonic);
1942 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1943 AtomicOrdering::Acquire);
1945 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1946 AtomicOrdering::Release);
1948 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E,
1949 AtomicOrdering::Monotonic);
1951 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1952 AtomicOrdering::Acquire);
1954 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1955 AtomicOrdering::Release);
1957 return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E,
1958 AtomicOrdering::Monotonic);
1960 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1961 AtomicOrdering::Acquire);
1963 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1964 AtomicOrdering::Release);
1966 return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E,
1967 AtomicOrdering::Monotonic);
1969 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Acquire);
1971 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Release);
1973 return EmitAtomicIncrementValue(*this, E, AtomicOrdering::Monotonic);
1975 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Acquire);
1977 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Release);
1979 return EmitAtomicDecrementValue(*this, E, AtomicOrdering::Monotonic);
1980
1982 return EmitAtomicDecrementValue(*this, E);
1984 return EmitAtomicIncrementValue(*this, E);
1985
1987 // Request immediate process termination from the kernel. The instruction
1988 // sequences to do this are documented on MSDN:
1989 // https://msdn.microsoft.com/en-us/library/dn774154.aspx
1990 llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
1991 StringRef Asm, Constraints;
1992 switch (ISA) {
1993 default:
1994 ErrorUnsupported(E, "__fastfail call for this architecture");
1995 break;
1996 case llvm::Triple::x86:
1997 case llvm::Triple::x86_64:
1998 Asm = "int $$0x29";
1999 Constraints = "{cx}";
2000 break;
2001 case llvm::Triple::thumb:
2002 Asm = "udf #251";
2003 Constraints = "{r0}";
2004 break;
2005 case llvm::Triple::aarch64:
2006 Asm = "brk #0xF003";
2007 Constraints = "{w0}";
2008 }
2009 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
2010 llvm::InlineAsm *IA =
2011 llvm::InlineAsm::get(FTy, Asm, Constraints, /*hasSideEffects=*/true);
2012 llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
2013 getLLVMContext(), llvm::AttributeList::FunctionIndex,
2014 llvm::Attribute::NoReturn);
2015 llvm::CallInst *CI = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
2016 CI->setAttributes(NoReturnAttr);
2017 return CI;
2018 }
2019 }
2020 llvm_unreachable("Incorrect MSVC intrinsic!");
2021}
2022
2023namespace {
2024// ARC cleanup for __builtin_os_log_format
2025struct CallObjCArcUse final : EHScopeStack::Cleanup {
2026 CallObjCArcUse(llvm::Value *object) : object(object) {}
2027 llvm::Value *object;
2028
2029 void Emit(CodeGenFunction &CGF, Flags flags) override {
2030 CGF.EmitARCIntrinsicUse(object);
2031 }
2032};
2033}
2034
2036 BuiltinCheckKind Kind) {
2037 assert((Kind == BCK_CLZPassedZero || Kind == BCK_CTZPassedZero) &&
2038 "Unsupported builtin check kind");
2039
2040 Value *ArgValue = EmitBitCountExpr(*this, E);
2041 if (!SanOpts.has(SanitizerKind::Builtin))
2042 return ArgValue;
2043
2044 auto CheckOrdinal = SanitizerKind::SO_Builtin;
2045 auto CheckHandler = SanitizerHandler::InvalidBuiltin;
2046 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2047 Value *Cond = Builder.CreateICmpNE(
2048 ArgValue, llvm::Constant::getNullValue(ArgValue->getType()));
2049 EmitCheck(std::make_pair(Cond, CheckOrdinal), CheckHandler,
2051 llvm::ConstantInt::get(Builder.getInt8Ty(), Kind)},
2052 {});
2053 return ArgValue;
2054}
2055
2057 Value *ArgValue = EvaluateExprAsBool(E);
2058 if (!SanOpts.has(SanitizerKind::Builtin))
2059 return ArgValue;
2060
2061 auto CheckOrdinal = SanitizerKind::SO_Builtin;
2062 auto CheckHandler = SanitizerHandler::InvalidBuiltin;
2063 SanitizerDebugLocation SanScope(this, {CheckOrdinal}, CheckHandler);
2064 EmitCheck(
2065 std::make_pair(ArgValue, CheckOrdinal), CheckHandler,
2067 llvm::ConstantInt::get(Builder.getInt8Ty(), BCK_AssumePassedFalse)},
2068 {});
2069 return ArgValue;
2070}
2071
2072static Value *EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW) {
2073 return CGF.Builder.CreateBinaryIntrinsic(
2074 Intrinsic::abs, ArgValue,
2075 ConstantInt::get(CGF.Builder.getInt1Ty(), HasNSW));
2076}
2077
2079 bool SanitizeOverflow) {
2080 Value *ArgValue = CGF.EmitScalarExpr(E->getArg(0));
2081
2082 // Try to eliminate overflow check.
2083 if (const auto *VCI = dyn_cast<llvm::ConstantInt>(ArgValue)) {
2084 if (!VCI->isMinSignedValue())
2085 return EmitAbs(CGF, ArgValue, true);
2086 }
2087
2089 SanitizerHandler CheckHandler;
2090 if (SanitizeOverflow) {
2091 Ordinals.push_back(SanitizerKind::SO_SignedIntegerOverflow);
2092 CheckHandler = SanitizerHandler::NegateOverflow;
2093 } else
2094 CheckHandler = SanitizerHandler::SubOverflow;
2095
2096 SanitizerDebugLocation SanScope(&CGF, Ordinals, CheckHandler);
2097
2098 Constant *Zero = Constant::getNullValue(ArgValue->getType());
2099 Value *ResultAndOverflow = CGF.Builder.CreateBinaryIntrinsic(
2100 Intrinsic::ssub_with_overflow, Zero, ArgValue);
2101 Value *Result = CGF.Builder.CreateExtractValue(ResultAndOverflow, 0);
2102 Value *NotOverflow = CGF.Builder.CreateNot(
2103 CGF.Builder.CreateExtractValue(ResultAndOverflow, 1));
2104
2105 // TODO: support -ftrapv-handler.
2106 if (SanitizeOverflow) {
2107 CGF.EmitCheck({{NotOverflow, SanitizerKind::SO_SignedIntegerOverflow}},
2108 CheckHandler,
2111 {ArgValue});
2112 } else
2113 CGF.EmitTrapCheck(NotOverflow, CheckHandler);
2114
2115 Value *CmpResult = CGF.Builder.CreateICmpSLT(ArgValue, Zero, "abscond");
2116 return CGF.Builder.CreateSelect(CmpResult, Result, ArgValue, "abs");
2117}
2118
2119/// Get the argument type for arguments to os_log_helper.
2121 QualType UnsignedTy = C.getIntTypeForBitwidth(Size * 8, /*Signed=*/false);
2122 return C.getCanonicalType(UnsignedTy);
2123}
2124
2127 CharUnits BufferAlignment) {
2128 ASTContext &Ctx = getContext();
2129
2131 {
2132 raw_svector_ostream OS(Name);
2133 OS << "__os_log_helper";
2134 OS << "_" << BufferAlignment.getQuantity();
2135 OS << "_" << int(Layout.getSummaryByte());
2136 OS << "_" << int(Layout.getNumArgsByte());
2137 for (const auto &Item : Layout.Items)
2138 OS << "_" << int(Item.getSizeByte()) << "_"
2139 << int(Item.getDescriptorByte());
2140 }
2141
2142 if (llvm::Function *F = CGM.getModule().getFunction(Name))
2143 return F;
2144
2146 FunctionArgList Args;
2147 Args.push_back(ImplicitParamDecl::Create(
2148 Ctx, nullptr, SourceLocation(), &Ctx.Idents.get("buffer"), Ctx.VoidPtrTy,
2150 ArgTys.emplace_back(Ctx.VoidPtrTy);
2151
2152 for (unsigned int I = 0, E = Layout.Items.size(); I < E; ++I) {
2153 char Size = Layout.Items[I].getSizeByte();
2154 if (!Size)
2155 continue;
2156
2157 QualType ArgTy = getOSLogArgType(Ctx, Size);
2158 Args.push_back(ImplicitParamDecl::Create(
2159 Ctx, nullptr, SourceLocation(),
2160 &Ctx.Idents.get(std::string("arg") + llvm::to_string(I)), ArgTy,
2162 ArgTys.emplace_back(ArgTy);
2163 }
2164
2165 QualType ReturnTy = Ctx.VoidTy;
2166
2167 // The helper function has linkonce_odr linkage to enable the linker to merge
2168 // identical functions. To ensure the merging always happens, 'noinline' is
2169 // attached to the function when compiling with -Oz.
2170 const CGFunctionInfo &FI =
2171 CGM.getTypes().arrangeBuiltinFunctionDeclaration(ReturnTy, Args);
2172 llvm::FunctionType *FuncTy = CGM.getTypes().GetFunctionType(FI);
2173 llvm::Function *Fn = llvm::Function::Create(
2174 FuncTy, llvm::GlobalValue::LinkOnceODRLinkage, Name, &CGM.getModule());
2175 Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
2176 CGM.SetLLVMFunctionAttributes(GlobalDecl(), FI, Fn, /*IsThunk=*/false);
2177 CGM.SetLLVMFunctionAttributesForDefinition(nullptr, Fn);
2178 Fn->setDoesNotThrow();
2179
2180 // Attach 'noinline' at -Oz.
2181 if (CGM.getCodeGenOpts().OptimizeSize == 2)
2182 Fn->addFnAttr(llvm::Attribute::NoInline);
2183
2184 auto NL = ApplyDebugLocation::CreateEmpty(*this);
2185 StartFunction(GlobalDecl(), ReturnTy, Fn, FI, Args);
2186
2187 // Create a scope with an artificial location for the body of this function.
2188 auto AL = ApplyDebugLocation::CreateArtificial(*this);
2189
2190 CharUnits Offset;
2192 Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), Ctx.VoidTy,
2193 BufferAlignment);
2194 Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
2195 Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
2196 Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
2197 Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
2198
2199 unsigned I = 1;
2200 for (const auto &Item : Layout.Items) {
2201 Builder.CreateStore(
2202 Builder.getInt8(Item.getDescriptorByte()),
2203 Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
2204 Builder.CreateStore(
2205 Builder.getInt8(Item.getSizeByte()),
2206 Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
2207
2208 CharUnits Size = Item.size();
2209 if (!Size.getQuantity())
2210 continue;
2211
2212 Address Arg = GetAddrOfLocalVar(Args[I]);
2213 Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
2214 Addr = Addr.withElementType(Arg.getElementType());
2215 Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
2216 Offset += Size;
2217 ++I;
2218 }
2219
2221
2222 return Fn;
2223}
2224
2226 assert(E.getNumArgs() >= 2 &&
2227 "__builtin_os_log_format takes at least 2 arguments");
2228 ASTContext &Ctx = getContext();
2231 Address BufAddr = EmitPointerWithAlignment(E.getArg(0));
2232
2233 // Ignore argument 1, the format string. It is not currently used.
2234 CallArgList Args;
2235 Args.add(RValue::get(BufAddr.emitRawPointer(*this)), Ctx.VoidPtrTy);
2236
2237 for (const auto &Item : Layout.Items) {
2238 int Size = Item.getSizeByte();
2239 if (!Size)
2240 continue;
2241
2242 llvm::Value *ArgVal;
2243
2244 if (Item.getKind() == analyze_os_log::OSLogBufferItem::MaskKind) {
2245 uint64_t Val = 0;
2246 for (unsigned I = 0, E = Item.getMaskType().size(); I < E; ++I)
2247 Val |= ((uint64_t)Item.getMaskType()[I]) << I * 8;
2248 ArgVal = llvm::Constant::getIntegerValue(Int64Ty, llvm::APInt(64, Val));
2249 } else if (const Expr *TheExpr = Item.getExpr()) {
2250 ArgVal = EmitScalarExpr(TheExpr, /*Ignore*/ false);
2251
2252 // If a temporary object that requires destruction after the full
2253 // expression is passed, push a lifetime-extended cleanup to extend its
2254 // lifetime to the end of the enclosing block scope.
2255 auto LifetimeExtendObject = [&](const Expr *E) {
2256 E = E->IgnoreParenCasts();
2257 // Extend lifetimes of objects returned by function calls and message
2258 // sends.
2259
2260 // FIXME: We should do this in other cases in which temporaries are
2261 // created including arguments of non-ARC types (e.g., C++
2262 // temporaries).
2264 return true;
2265 return false;
2266 };
2267
2268 if (TheExpr->getType()->isObjCRetainableType() &&
2269 getLangOpts().ObjCAutoRefCount && LifetimeExtendObject(TheExpr)) {
2270 assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
2271 "Only scalar can be a ObjC retainable type");
2272 if (!isa<Constant>(ArgVal)) {
2273 CleanupKind Cleanup = getARCCleanupKind();
2274 QualType Ty = TheExpr->getType();
2276 RawAddress Addr = CreateMemTemp(Ty, "os.log.arg", &Alloca);
2277 ArgVal = EmitARCRetain(Ty, ArgVal);
2278 Builder.CreateStore(ArgVal, Addr);
2279 pushLifetimeExtendedDestroy(Cleanup, Alloca, Ty,
2281 Cleanup & EHCleanup);
2282
2283 // Push a clang.arc.use call to ensure ARC optimizer knows that the
2284 // argument has to be alive.
2285 if (CGM.getCodeGenOpts().OptimizationLevel != 0)
2287 }
2288 }
2289 } else {
2290 ArgVal = Builder.getInt32(Item.getConstValue().getQuantity());
2291 }
2292
2293 unsigned ArgValSize =
2294 CGM.getDataLayout().getTypeSizeInBits(ArgVal->getType());
2295 llvm::IntegerType *IntTy = llvm::Type::getIntNTy(getLLVMContext(),
2296 ArgValSize);
2297 ArgVal = Builder.CreateBitOrPointerCast(ArgVal, IntTy);
2298 CanQualType ArgTy = getOSLogArgType(Ctx, Size);
2299 // If ArgVal has type x86_fp80, zero-extend ArgVal.
2300 ArgVal = Builder.CreateZExtOrBitCast(ArgVal, ConvertType(ArgTy));
2301 Args.add(RValue::get(ArgVal), ArgTy);
2302 }
2303
2304 const CGFunctionInfo &FI =
2305 CGM.getTypes().arrangeBuiltinFunctionCall(Ctx.VoidTy, Args);
2306 llvm::Function *F = CodeGenFunction(CGM).generateBuiltinOSLogHelperFunction(
2307 Layout, BufAddr.getAlignment());
2309 return RValue::get(BufAddr, *this);
2310}
2311
2313 unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info,
2314 WidthAndSignedness ResultInfo) {
2315 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2316 Op1Info.Width == Op2Info.Width && Op2Info.Width == ResultInfo.Width &&
2317 !Op1Info.Signed && !Op2Info.Signed && ResultInfo.Signed;
2318}
2319
2321 CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info,
2322 const clang::Expr *Op2, WidthAndSignedness Op2Info,
2323 const clang::Expr *ResultArg, QualType ResultQTy,
2324 WidthAndSignedness ResultInfo) {
2326 Builtin::BI__builtin_mul_overflow, Op1Info, Op2Info, ResultInfo) &&
2327 "Cannot specialize this multiply");
2328
2329 llvm::Value *V1 = CGF.EmitScalarExpr(Op1);
2330 llvm::Value *V2 = CGF.EmitScalarExpr(Op2);
2331
2332 llvm::Value *HasOverflow;
2333 llvm::Value *Result = EmitOverflowIntrinsic(
2334 CGF, Intrinsic::umul_with_overflow, V1, V2, HasOverflow);
2335
2336 // The intrinsic call will detect overflow when the value is > UINT_MAX,
2337 // however, since the original builtin had a signed result, we need to report
2338 // an overflow when the result is greater than INT_MAX.
2339 auto IntMax = llvm::APInt::getSignedMaxValue(ResultInfo.Width);
2340 llvm::Value *IntMaxValue = llvm::ConstantInt::get(Result->getType(), IntMax);
2341
2342 llvm::Value *IntMaxOverflow = CGF.Builder.CreateICmpUGT(Result, IntMaxValue);
2343 HasOverflow = CGF.Builder.CreateOr(HasOverflow, IntMaxOverflow);
2344
2345 bool isVolatile =
2346 ResultArg->getType()->getPointeeType().isVolatileQualified();
2347 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2348 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2349 isVolatile);
2350 return RValue::get(HasOverflow);
2351}
2352
2353/// Determine if a binop is a checked mixed-sign multiply we can specialize.
2354static bool isSpecialMixedSignMultiply(unsigned BuiltinID,
2355 WidthAndSignedness Op1Info,
2356 WidthAndSignedness Op2Info,
2357 WidthAndSignedness ResultInfo) {
2358 return BuiltinID == Builtin::BI__builtin_mul_overflow &&
2359 std::max(Op1Info.Width, Op2Info.Width) >= ResultInfo.Width &&
2360 Op1Info.Signed != Op2Info.Signed;
2361}
2362
2363/// Emit a checked mixed-sign multiply. This is a cheaper specialization of
2364/// the generic checked-binop irgen.
2365static RValue
2367 WidthAndSignedness Op1Info, const clang::Expr *Op2,
2368 WidthAndSignedness Op2Info,
2369 const clang::Expr *ResultArg, QualType ResultQTy,
2370 WidthAndSignedness ResultInfo) {
2371 assert(isSpecialMixedSignMultiply(Builtin::BI__builtin_mul_overflow, Op1Info,
2372 Op2Info, ResultInfo) &&
2373 "Not a mixed-sign multipliction we can specialize");
2374
2375 // Emit the signed and unsigned operands.
2376 const clang::Expr *SignedOp = Op1Info.Signed ? Op1 : Op2;
2377 const clang::Expr *UnsignedOp = Op1Info.Signed ? Op2 : Op1;
2378 llvm::Value *Signed = CGF.EmitScalarExpr(SignedOp);
2379 llvm::Value *Unsigned = CGF.EmitScalarExpr(UnsignedOp);
2380 unsigned SignedOpWidth = Op1Info.Signed ? Op1Info.Width : Op2Info.Width;
2381 unsigned UnsignedOpWidth = Op1Info.Signed ? Op2Info.Width : Op1Info.Width;
2382
2383 // One of the operands may be smaller than the other. If so, [s|z]ext it.
2384 if (SignedOpWidth < UnsignedOpWidth)
2385 Signed = CGF.Builder.CreateSExt(Signed, Unsigned->getType(), "op.sext");
2386 if (UnsignedOpWidth < SignedOpWidth)
2387 Unsigned = CGF.Builder.CreateZExt(Unsigned, Signed->getType(), "op.zext");
2388
2389 llvm::Type *OpTy = Signed->getType();
2390 llvm::Value *Zero = llvm::Constant::getNullValue(OpTy);
2391 Address ResultPtr = CGF.EmitPointerWithAlignment(ResultArg);
2392 llvm::Type *ResTy = CGF.getTypes().ConvertType(ResultQTy);
2393 unsigned OpWidth = std::max(Op1Info.Width, Op2Info.Width);
2394
2395 // Take the absolute value of the signed operand.
2396 llvm::Value *IsNegative = CGF.Builder.CreateICmpSLT(Signed, Zero);
2397 llvm::Value *AbsOfNegative = CGF.Builder.CreateSub(Zero, Signed);
2398 llvm::Value *AbsSigned =
2399 CGF.Builder.CreateSelect(IsNegative, AbsOfNegative, Signed);
2400
2401 // Perform a checked unsigned multiplication.
2402 llvm::Value *UnsignedOverflow;
2403 llvm::Value *UnsignedResult =
2404 EmitOverflowIntrinsic(CGF, Intrinsic::umul_with_overflow, AbsSigned,
2405 Unsigned, UnsignedOverflow);
2406
2407 llvm::Value *Overflow, *Result;
2408 if (ResultInfo.Signed) {
2409 // Signed overflow occurs if the result is greater than INT_MAX or lesser
2410 // than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
2411 auto IntMax =
2412 llvm::APInt::getSignedMaxValue(ResultInfo.Width).zext(OpWidth);
2413 llvm::Value *MaxResult =
2414 CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
2415 CGF.Builder.CreateZExt(IsNegative, OpTy));
2416 llvm::Value *SignedOverflow =
2417 CGF.Builder.CreateICmpUGT(UnsignedResult, MaxResult);
2418 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, SignedOverflow);
2419
2420 // Prepare the signed result (possibly by negating it).
2421 llvm::Value *NegativeResult = CGF.Builder.CreateNeg(UnsignedResult);
2422 llvm::Value *SignedResult =
2423 CGF.Builder.CreateSelect(IsNegative, NegativeResult, UnsignedResult);
2424 Result = CGF.Builder.CreateTrunc(SignedResult, ResTy);
2425 } else {
2426 // Unsigned overflow occurs if the result is < 0 or greater than UINT_MAX.
2427 llvm::Value *Underflow = CGF.Builder.CreateAnd(
2428 IsNegative, CGF.Builder.CreateIsNotNull(UnsignedResult));
2429 Overflow = CGF.Builder.CreateOr(UnsignedOverflow, Underflow);
2430 if (ResultInfo.Width < OpWidth) {
2431 auto IntMax =
2432 llvm::APInt::getMaxValue(ResultInfo.Width).zext(OpWidth);
2433 llvm::Value *TruncOverflow = CGF.Builder.CreateICmpUGT(
2434 UnsignedResult, llvm::ConstantInt::get(OpTy, IntMax));
2435 Overflow = CGF.Builder.CreateOr(Overflow, TruncOverflow);
2436 }
2437
2438 // Negate the product if it would be negative in infinite precision.
2439 Result = CGF.Builder.CreateSelect(
2440 IsNegative, CGF.Builder.CreateNeg(UnsignedResult), UnsignedResult);
2441
2442 Result = CGF.Builder.CreateTrunc(Result, ResTy);
2443 }
2444 assert(Overflow && Result && "Missing overflow or result");
2445
2446 bool isVolatile =
2447 ResultArg->getType()->getPointeeType().isVolatileQualified();
2448 CGF.Builder.CreateStore(CGF.EmitToMemory(Result, ResultQTy), ResultPtr,
2449 isVolatile);
2450 return RValue::get(Overflow);
2451}
2452
2453static bool
2455 llvm::SmallPtrSetImpl<const Decl *> &Seen) {
2456 if (const auto *Arr = Ctx.getAsArrayType(Ty))
2457 Ty = Ctx.getBaseElementType(Arr);
2458
2459 const auto *Record = Ty->getAsCXXRecordDecl();
2460 if (!Record)
2461 return false;
2462
2463 // We've already checked this type, or are in the process of checking it.
2464 if (!Seen.insert(Record).second)
2465 return false;
2466
2467 assert(Record->hasDefinition() &&
2468 "Incomplete types should already be diagnosed");
2469
2470 if (Record->isDynamicClass())
2471 return true;
2472
2473 for (FieldDecl *F : Record->fields()) {
2474 if (TypeRequiresBuiltinLaunderImp(Ctx, F->getType(), Seen))
2475 return true;
2476 }
2477 return false;
2478}
2479
2480/// Determine if the specified type requires laundering by checking if it is a
2481/// dynamic class type or contains a subobject which is a dynamic class type.
2483 if (!CGM.getCodeGenOpts().StrictVTablePointers)
2484 return false;
2486 return TypeRequiresBuiltinLaunderImp(CGM.getContext(), Ty, Seen);
2487}
2488
2489RValue CodeGenFunction::emitRotate(const CallExpr *E, bool IsRotateRight) {
2490 llvm::Value *Src = EmitScalarExpr(E->getArg(0));
2491 llvm::Value *ShiftAmt = EmitScalarExpr(E->getArg(1));
2492
2493 // The builtin's shift arg may have a different type than the source arg and
2494 // result, but the LLVM intrinsic uses the same type for all values.
2495 llvm::Type *Ty = Src->getType();
2496 llvm::Type *ShiftTy = ShiftAmt->getType();
2497
2498 unsigned BitWidth = Ty->getIntegerBitWidth();
2499
2500 // Normalize shift amount to [0, BitWidth) range to match runtime behavior.
2501 // This matches the algorithm in ExprConstant.cpp for constant evaluation.
2502 if (BitWidth == 1) {
2503 // Rotating a 1-bit value is always a no-op
2504 ShiftAmt = ConstantInt::get(ShiftTy, 0);
2505 } else if (BitWidth == 2) {
2506 // For 2-bit values: rotation amount is 0 or 1 based on
2507 // whether the amount is even or odd. We can't use srem here because
2508 // the divisor (2) would be misinterpreted as -2 in 2-bit signed arithmetic.
2509 llvm::Value *One = ConstantInt::get(ShiftTy, 1);
2510 ShiftAmt = Builder.CreateAnd(ShiftAmt, One);
2511 } else {
2512 unsigned ShiftAmtBitWidth = ShiftTy->getIntegerBitWidth();
2513 bool ShiftAmtIsSigned = E->getArg(1)->getType()->isSignedIntegerType();
2514
2515 // Choose the wider type for the divisor to avoid truncation
2516 llvm::Type *DivisorTy = ShiftAmtBitWidth > BitWidth ? ShiftTy : Ty;
2517 llvm::Value *Divisor = ConstantInt::get(DivisorTy, BitWidth);
2518
2519 // Extend ShiftAmt to match Divisor width if needed
2520 if (ShiftAmtBitWidth < DivisorTy->getIntegerBitWidth()) {
2521 ShiftAmt = Builder.CreateIntCast(ShiftAmt, DivisorTy, ShiftAmtIsSigned);
2522 }
2523
2524 // Normalize to [0, BitWidth)
2525 llvm::Value *RemResult;
2526 if (ShiftAmtIsSigned) {
2527 RemResult = Builder.CreateSRem(ShiftAmt, Divisor);
2528 // Signed remainder can be negative, convert to positive equivalent
2529 llvm::Value *Zero = ConstantInt::get(DivisorTy, 0);
2530 llvm::Value *IsNegative = Builder.CreateICmpSLT(RemResult, Zero);
2531 llvm::Value *PositiveShift = Builder.CreateAdd(RemResult, Divisor);
2532 ShiftAmt = Builder.CreateSelect(IsNegative, PositiveShift, RemResult);
2533 } else {
2534 ShiftAmt = Builder.CreateURem(ShiftAmt, Divisor);
2535 }
2536 }
2537
2538 // Convert to the source type if needed
2539 if (ShiftAmt->getType() != Ty) {
2540 ShiftAmt = Builder.CreateIntCast(ShiftAmt, Ty, false);
2541 }
2542
2543 // Rotate is a special case of LLVM funnel shift - 1st 2 args are the same.
2544 unsigned IID = IsRotateRight ? Intrinsic::fshr : Intrinsic::fshl;
2545 Function *F = CGM.getIntrinsic(IID, Ty);
2546 return RValue::get(Builder.CreateCall(F, {Src, Src, ShiftAmt}));
2547}
2548
2549// Map math builtins for long-double to f128 version.
2550static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID) {
2551 switch (BuiltinID) {
2552#define MUTATE_LDBL(func) \
2553 case Builtin::BI__builtin_##func##l: \
2554 return Builtin::BI__builtin_##func##f128;
2585 MUTATE_LDBL(nans)
2586 MUTATE_LDBL(inf)
2605 MUTATE_LDBL(huge_val)
2615#undef MUTATE_LDBL
2616 default:
2617 return BuiltinID;
2618 }
2619}
2620
2621static Value *tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID,
2622 Value *V) {
2623 if (CGF.Builder.getIsFPConstrained() &&
2624 CGF.Builder.getDefaultConstrainedExcept() != fp::ebIgnore) {
2625 if (Value *Result =
2626 CGF.getTargetHooks().testFPKind(V, BuiltinID, CGF.Builder, CGF.CGM))
2627 return Result;
2628 }
2629 return nullptr;
2630}
2631
2633 const FunctionDecl *FD) {
2634 auto Name = FD->getNameAsString() + "__hipstdpar_unsupported";
2635 auto FnTy = CGF->CGM.getTypes().GetFunctionType(FD);
2636 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, FnTy);
2637
2639 for (auto &&FormalTy : FnTy->params())
2640 Args.push_back(llvm::PoisonValue::get(FormalTy));
2641
2642 return RValue::get(CGF->Builder.CreateCall(UBF, Args));
2643}
2644
2646 const CallExpr *E,
2648 assert(!getContext().BuiltinInfo.isImmediate(BuiltinID) &&
2649 "Should not codegen for consteval builtins");
2650
2651 const FunctionDecl *FD = GD.getDecl()->getAsFunction();
2652 // See if we can constant fold this builtin. If so, don't emit it at all.
2653 // TODO: Extend this handling to all builtin calls that we can constant-fold.
2655 if (E->isPRValue() && E->EvaluateAsRValue(Result, CGM.getContext()) &&
2656 !Result.hasSideEffects()) {
2657 if (Result.Val.isInt())
2658 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
2659 Result.Val.getInt()));
2660 if (Result.Val.isFloat())
2661 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
2662 Result.Val.getFloat()));
2663 }
2664
2665 // If current long-double semantics is IEEE 128-bit, replace math builtins
2666 // of long-double with f128 equivalent.
2667 // TODO: This mutation should also be applied to other targets other than PPC,
2668 // after backend supports IEEE 128-bit style libcalls.
2669 if (getTarget().getTriple().isPPC64() &&
2670 &getTarget().getLongDoubleFormat() == &llvm::APFloat::IEEEquad())
2671 BuiltinID = mutateLongDoubleBuiltin(BuiltinID);
2672
2673 // If the builtin has been declared explicitly with an assembler label,
2674 // disable the specialized emitting below. Ideally we should communicate the
2675 // rename in IR, or at least avoid generating the intrinsic calls that are
2676 // likely to get lowered to the renamed library functions.
2677 const unsigned BuiltinIDIfNoAsmLabel =
2678 FD->hasAttr<AsmLabelAttr>() ? 0 : BuiltinID;
2679
2680 std::optional<bool> ErrnoOverriden;
2681 // ErrnoOverriden is true if math-errno is overriden via the
2682 // '#pragma float_control(precise, on)'. This pragma disables fast-math,
2683 // which implies math-errno.
2684 if (E->hasStoredFPFeatures()) {
2686 if (OP.hasMathErrnoOverride())
2687 ErrnoOverriden = OP.getMathErrnoOverride();
2688 }
2689 // True if 'attribute__((optnone))' is used. This attribute overrides
2690 // fast-math which implies math-errno.
2691 bool OptNone = CurFuncDecl && CurFuncDecl->hasAttr<OptimizeNoneAttr>();
2692
2693 bool IsOptimizationEnabled = CGM.getCodeGenOpts().OptimizationLevel != 0;
2694
2695 bool GenerateFPMathIntrinsics =
2697 BuiltinID, CGM.getTriple(), ErrnoOverriden, getLangOpts().MathErrno,
2698 OptNone, IsOptimizationEnabled);
2699
2700 if (GenerateFPMathIntrinsics) {
2701 switch (BuiltinIDIfNoAsmLabel) {
2702 case Builtin::BIacos:
2703 case Builtin::BIacosf:
2704 case Builtin::BIacosl:
2705 case Builtin::BI__builtin_acos:
2706 case Builtin::BI__builtin_acosf:
2707 case Builtin::BI__builtin_acosf16:
2708 case Builtin::BI__builtin_acosl:
2709 case Builtin::BI__builtin_acosf128:
2710 case Builtin::BI__builtin_elementwise_acos:
2712 *this, E, Intrinsic::acos, Intrinsic::experimental_constrained_acos));
2713
2714 case Builtin::BIasin:
2715 case Builtin::BIasinf:
2716 case Builtin::BIasinl:
2717 case Builtin::BI__builtin_asin:
2718 case Builtin::BI__builtin_asinf:
2719 case Builtin::BI__builtin_asinf16:
2720 case Builtin::BI__builtin_asinl:
2721 case Builtin::BI__builtin_asinf128:
2722 case Builtin::BI__builtin_elementwise_asin:
2724 *this, E, Intrinsic::asin, Intrinsic::experimental_constrained_asin));
2725
2726 case Builtin::BIatan:
2727 case Builtin::BIatanf:
2728 case Builtin::BIatanl:
2729 case Builtin::BI__builtin_atan:
2730 case Builtin::BI__builtin_atanf:
2731 case Builtin::BI__builtin_atanf16:
2732 case Builtin::BI__builtin_atanl:
2733 case Builtin::BI__builtin_atanf128:
2734 case Builtin::BI__builtin_elementwise_atan:
2736 *this, E, Intrinsic::atan, Intrinsic::experimental_constrained_atan));
2737
2738 case Builtin::BIatan2:
2739 case Builtin::BIatan2f:
2740 case Builtin::BIatan2l:
2741 case Builtin::BI__builtin_atan2:
2742 case Builtin::BI__builtin_atan2f:
2743 case Builtin::BI__builtin_atan2f16:
2744 case Builtin::BI__builtin_atan2l:
2745 case Builtin::BI__builtin_atan2f128:
2746 case Builtin::BI__builtin_elementwise_atan2:
2748 *this, E, Intrinsic::atan2,
2749 Intrinsic::experimental_constrained_atan2));
2750
2751 case Builtin::BIceil:
2752 case Builtin::BIceilf:
2753 case Builtin::BIceill:
2754 case Builtin::BI__builtin_ceil:
2755 case Builtin::BI__builtin_ceilf:
2756 case Builtin::BI__builtin_ceilf16:
2757 case Builtin::BI__builtin_ceill:
2758 case Builtin::BI__builtin_ceilf128:
2759 case Builtin::BI__builtin_elementwise_ceil:
2761 Intrinsic::ceil,
2762 Intrinsic::experimental_constrained_ceil));
2763
2764 case Builtin::BIcopysign:
2765 case Builtin::BIcopysignf:
2766 case Builtin::BIcopysignl:
2767 case Builtin::BI__builtin_copysign:
2768 case Builtin::BI__builtin_copysignf:
2769 case Builtin::BI__builtin_copysignf16:
2770 case Builtin::BI__builtin_copysignl:
2771 case Builtin::BI__builtin_copysignf128:
2772 return RValue::get(
2773 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::copysign));
2774
2775 case Builtin::BIcos:
2776 case Builtin::BIcosf:
2777 case Builtin::BIcosl:
2778 case Builtin::BI__builtin_cos:
2779 case Builtin::BI__builtin_cosf:
2780 case Builtin::BI__builtin_cosf16:
2781 case Builtin::BI__builtin_cosl:
2782 case Builtin::BI__builtin_cosf128:
2783 case Builtin::BI__builtin_elementwise_cos:
2785 Intrinsic::cos,
2786 Intrinsic::experimental_constrained_cos));
2787
2788 case Builtin::BIcosh:
2789 case Builtin::BIcoshf:
2790 case Builtin::BIcoshl:
2791 case Builtin::BI__builtin_cosh:
2792 case Builtin::BI__builtin_coshf:
2793 case Builtin::BI__builtin_coshf16:
2794 case Builtin::BI__builtin_coshl:
2795 case Builtin::BI__builtin_coshf128:
2796 case Builtin::BI__builtin_elementwise_cosh:
2798 *this, E, Intrinsic::cosh, Intrinsic::experimental_constrained_cosh));
2799
2800 case Builtin::BIexp:
2801 case Builtin::BIexpf:
2802 case Builtin::BIexpl:
2803 case Builtin::BI__builtin_exp:
2804 case Builtin::BI__builtin_expf:
2805 case Builtin::BI__builtin_expf16:
2806 case Builtin::BI__builtin_expl:
2807 case Builtin::BI__builtin_expf128:
2808 case Builtin::BI__builtin_elementwise_exp:
2810 Intrinsic::exp,
2811 Intrinsic::experimental_constrained_exp));
2812
2813 case Builtin::BIexp2:
2814 case Builtin::BIexp2f:
2815 case Builtin::BIexp2l:
2816 case Builtin::BI__builtin_exp2:
2817 case Builtin::BI__builtin_exp2f:
2818 case Builtin::BI__builtin_exp2f16:
2819 case Builtin::BI__builtin_exp2l:
2820 case Builtin::BI__builtin_exp2f128:
2821 case Builtin::BI__builtin_elementwise_exp2:
2823 Intrinsic::exp2,
2824 Intrinsic::experimental_constrained_exp2));
2825 case Builtin::BI__builtin_exp10:
2826 case Builtin::BI__builtin_exp10f:
2827 case Builtin::BI__builtin_exp10f16:
2828 case Builtin::BI__builtin_exp10l:
2829 case Builtin::BI__builtin_exp10f128:
2830 case Builtin::BI__builtin_elementwise_exp10: {
2831 // TODO: strictfp support
2832 if (Builder.getIsFPConstrained())
2833 break;
2834 return RValue::get(
2835 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::exp10));
2836 }
2837 case Builtin::BIfabs:
2838 case Builtin::BIfabsf:
2839 case Builtin::BIfabsl:
2840 case Builtin::BI__builtin_fabs:
2841 case Builtin::BI__builtin_fabsf:
2842 case Builtin::BI__builtin_fabsf16:
2843 case Builtin::BI__builtin_fabsl:
2844 case Builtin::BI__builtin_fabsf128:
2845 return RValue::get(
2846 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::fabs));
2847
2848 case Builtin::BIfloor:
2849 case Builtin::BIfloorf:
2850 case Builtin::BIfloorl:
2851 case Builtin::BI__builtin_floor:
2852 case Builtin::BI__builtin_floorf:
2853 case Builtin::BI__builtin_floorf16:
2854 case Builtin::BI__builtin_floorl:
2855 case Builtin::BI__builtin_floorf128:
2856 case Builtin::BI__builtin_elementwise_floor:
2858 Intrinsic::floor,
2859 Intrinsic::experimental_constrained_floor));
2860
2861 case Builtin::BIfma:
2862 case Builtin::BIfmaf:
2863 case Builtin::BIfmal:
2864 case Builtin::BI__builtin_fma:
2865 case Builtin::BI__builtin_fmaf:
2866 case Builtin::BI__builtin_fmaf16:
2867 case Builtin::BI__builtin_fmal:
2868 case Builtin::BI__builtin_fmaf128:
2869 case Builtin::BI__builtin_elementwise_fma:
2871 Intrinsic::fma,
2872 Intrinsic::experimental_constrained_fma));
2873
2874 case Builtin::BIfmax:
2875 case Builtin::BIfmaxf:
2876 case Builtin::BIfmaxl:
2877 case Builtin::BI__builtin_fmax:
2878 case Builtin::BI__builtin_fmaxf:
2879 case Builtin::BI__builtin_fmaxf16:
2880 case Builtin::BI__builtin_fmaxl:
2881 case Builtin::BI__builtin_fmaxf128: {
2882 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
2883 Builder.getFastMathFlags().setNoSignedZeros();
2885 *this, E, Intrinsic::maxnum,
2886 Intrinsic::experimental_constrained_maxnum));
2887 }
2888
2889 case Builtin::BIfmin:
2890 case Builtin::BIfminf:
2891 case Builtin::BIfminl:
2892 case Builtin::BI__builtin_fmin:
2893 case Builtin::BI__builtin_fminf:
2894 case Builtin::BI__builtin_fminf16:
2895 case Builtin::BI__builtin_fminl:
2896 case Builtin::BI__builtin_fminf128: {
2897 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
2898 Builder.getFastMathFlags().setNoSignedZeros();
2900 *this, E, Intrinsic::minnum,
2901 Intrinsic::experimental_constrained_minnum));
2902 }
2903
2904 case Builtin::BIfmaximum_num:
2905 case Builtin::BIfmaximum_numf:
2906 case Builtin::BIfmaximum_numl:
2907 case Builtin::BI__builtin_fmaximum_num:
2908 case Builtin::BI__builtin_fmaximum_numf:
2909 case Builtin::BI__builtin_fmaximum_numf16:
2910 case Builtin::BI__builtin_fmaximum_numl:
2911 case Builtin::BI__builtin_fmaximum_numf128:
2912 return RValue::get(
2913 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::maximumnum));
2914
2915 case Builtin::BIfminimum_num:
2916 case Builtin::BIfminimum_numf:
2917 case Builtin::BIfminimum_numl:
2918 case Builtin::BI__builtin_fminimum_num:
2919 case Builtin::BI__builtin_fminimum_numf:
2920 case Builtin::BI__builtin_fminimum_numf16:
2921 case Builtin::BI__builtin_fminimum_numl:
2922 case Builtin::BI__builtin_fminimum_numf128:
2923 return RValue::get(
2924 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::minimumnum));
2925
2926 // fmod() is a special-case. It maps to the frem instruction rather than an
2927 // LLVM intrinsic.
2928 case Builtin::BIfmod:
2929 case Builtin::BIfmodf:
2930 case Builtin::BIfmodl:
2931 case Builtin::BI__builtin_fmod:
2932 case Builtin::BI__builtin_fmodf:
2933 case Builtin::BI__builtin_fmodf16:
2934 case Builtin::BI__builtin_fmodl:
2935 case Builtin::BI__builtin_fmodf128:
2936 case Builtin::BI__builtin_elementwise_fmod: {
2937 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
2938 Value *Arg1 = EmitScalarExpr(E->getArg(0));
2939 Value *Arg2 = EmitScalarExpr(E->getArg(1));
2940 if (Builder.getIsFPConstrained()) {
2941 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_frem,
2942 Arg1->getType());
2943 return RValue::get(Builder.CreateConstrainedFPCall(F, {Arg1, Arg2}));
2944 } else {
2945 return RValue::get(Builder.CreateFRem(Arg1, Arg2, "fmod"));
2946 }
2947 }
2948
2949 case Builtin::BIlog:
2950 case Builtin::BIlogf:
2951 case Builtin::BIlogl:
2952 case Builtin::BI__builtin_log:
2953 case Builtin::BI__builtin_logf:
2954 case Builtin::BI__builtin_logf16:
2955 case Builtin::BI__builtin_logl:
2956 case Builtin::BI__builtin_logf128:
2957 case Builtin::BI__builtin_elementwise_log:
2959 Intrinsic::log,
2960 Intrinsic::experimental_constrained_log));
2961
2962 case Builtin::BIlog10:
2963 case Builtin::BIlog10f:
2964 case Builtin::BIlog10l:
2965 case Builtin::BI__builtin_log10:
2966 case Builtin::BI__builtin_log10f:
2967 case Builtin::BI__builtin_log10f16:
2968 case Builtin::BI__builtin_log10l:
2969 case Builtin::BI__builtin_log10f128:
2970 case Builtin::BI__builtin_elementwise_log10:
2972 Intrinsic::log10,
2973 Intrinsic::experimental_constrained_log10));
2974
2975 case Builtin::BIlog2:
2976 case Builtin::BIlog2f:
2977 case Builtin::BIlog2l:
2978 case Builtin::BI__builtin_log2:
2979 case Builtin::BI__builtin_log2f:
2980 case Builtin::BI__builtin_log2f16:
2981 case Builtin::BI__builtin_log2l:
2982 case Builtin::BI__builtin_log2f128:
2983 case Builtin::BI__builtin_elementwise_log2:
2985 Intrinsic::log2,
2986 Intrinsic::experimental_constrained_log2));
2987
2988 case Builtin::BInearbyint:
2989 case Builtin::BInearbyintf:
2990 case Builtin::BInearbyintl:
2991 case Builtin::BI__builtin_nearbyint:
2992 case Builtin::BI__builtin_nearbyintf:
2993 case Builtin::BI__builtin_nearbyintl:
2994 case Builtin::BI__builtin_nearbyintf128:
2995 case Builtin::BI__builtin_elementwise_nearbyint:
2997 Intrinsic::nearbyint,
2998 Intrinsic::experimental_constrained_nearbyint));
2999
3000 case Builtin::BIpow:
3001 case Builtin::BIpowf:
3002 case Builtin::BIpowl:
3003 case Builtin::BI__builtin_pow:
3004 case Builtin::BI__builtin_powf:
3005 case Builtin::BI__builtin_powf16:
3006 case Builtin::BI__builtin_powl:
3007 case Builtin::BI__builtin_powf128:
3008 case Builtin::BI__builtin_elementwise_pow:
3010 Intrinsic::pow,
3011 Intrinsic::experimental_constrained_pow));
3012
3013 case Builtin::BIrint:
3014 case Builtin::BIrintf:
3015 case Builtin::BIrintl:
3016 case Builtin::BI__builtin_rint:
3017 case Builtin::BI__builtin_rintf:
3018 case Builtin::BI__builtin_rintf16:
3019 case Builtin::BI__builtin_rintl:
3020 case Builtin::BI__builtin_rintf128:
3021 case Builtin::BI__builtin_elementwise_rint:
3023 Intrinsic::rint,
3024 Intrinsic::experimental_constrained_rint));
3025
3026 case Builtin::BIround:
3027 case Builtin::BIroundf:
3028 case Builtin::BIroundl:
3029 case Builtin::BI__builtin_round:
3030 case Builtin::BI__builtin_roundf:
3031 case Builtin::BI__builtin_roundf16:
3032 case Builtin::BI__builtin_roundl:
3033 case Builtin::BI__builtin_roundf128:
3034 case Builtin::BI__builtin_elementwise_round:
3036 Intrinsic::round,
3037 Intrinsic::experimental_constrained_round));
3038
3039 case Builtin::BIroundeven:
3040 case Builtin::BIroundevenf:
3041 case Builtin::BIroundevenl:
3042 case Builtin::BI__builtin_roundeven:
3043 case Builtin::BI__builtin_roundevenf:
3044 case Builtin::BI__builtin_roundevenf16:
3045 case Builtin::BI__builtin_roundevenl:
3046 case Builtin::BI__builtin_roundevenf128:
3047 case Builtin::BI__builtin_elementwise_roundeven:
3049 Intrinsic::roundeven,
3050 Intrinsic::experimental_constrained_roundeven));
3051
3052 case Builtin::BIsin:
3053 case Builtin::BIsinf:
3054 case Builtin::BIsinl:
3055 case Builtin::BI__builtin_sin:
3056 case Builtin::BI__builtin_sinf:
3057 case Builtin::BI__builtin_sinf16:
3058 case Builtin::BI__builtin_sinl:
3059 case Builtin::BI__builtin_sinf128:
3060 case Builtin::BI__builtin_elementwise_sin:
3062 Intrinsic::sin,
3063 Intrinsic::experimental_constrained_sin));
3064
3065 case Builtin::BIsinh:
3066 case Builtin::BIsinhf:
3067 case Builtin::BIsinhl:
3068 case Builtin::BI__builtin_sinh:
3069 case Builtin::BI__builtin_sinhf:
3070 case Builtin::BI__builtin_sinhf16:
3071 case Builtin::BI__builtin_sinhl:
3072 case Builtin::BI__builtin_sinhf128:
3073 case Builtin::BI__builtin_elementwise_sinh:
3075 *this, E, Intrinsic::sinh, Intrinsic::experimental_constrained_sinh));
3076
3077 case Builtin::BI__builtin_sincospi:
3078 case Builtin::BI__builtin_sincospif:
3079 case Builtin::BI__builtin_sincospil:
3080 if (Builder.getIsFPConstrained())
3081 break; // TODO: Emit constrained sincospi intrinsic once one exists.
3082 emitSincosBuiltin(*this, E, Intrinsic::sincospi);
3083 return RValue::get(nullptr);
3084
3085 case Builtin::BIsincos:
3086 case Builtin::BIsincosf:
3087 case Builtin::BIsincosl:
3088 case Builtin::BI__builtin_sincos:
3089 case Builtin::BI__builtin_sincosf:
3090 case Builtin::BI__builtin_sincosf16:
3091 case Builtin::BI__builtin_sincosl:
3092 case Builtin::BI__builtin_sincosf128:
3093 if (Builder.getIsFPConstrained())
3094 break; // TODO: Emit constrained sincos intrinsic once one exists.
3095 emitSincosBuiltin(*this, E, Intrinsic::sincos);
3096 return RValue::get(nullptr);
3097
3098 case Builtin::BIsqrt:
3099 case Builtin::BIsqrtf:
3100 case Builtin::BIsqrtl:
3101 case Builtin::BI__builtin_sqrt:
3102 case Builtin::BI__builtin_sqrtf:
3103 case Builtin::BI__builtin_sqrtf16:
3104 case Builtin::BI__builtin_sqrtl:
3105 case Builtin::BI__builtin_sqrtf128:
3106 case Builtin::BI__builtin_elementwise_sqrt: {
3108 *this, E, Intrinsic::sqrt, Intrinsic::experimental_constrained_sqrt);
3110 return RValue::get(Call);
3111 }
3112
3113 case Builtin::BItan:
3114 case Builtin::BItanf:
3115 case Builtin::BItanl:
3116 case Builtin::BI__builtin_tan:
3117 case Builtin::BI__builtin_tanf:
3118 case Builtin::BI__builtin_tanf16:
3119 case Builtin::BI__builtin_tanl:
3120 case Builtin::BI__builtin_tanf128:
3121 case Builtin::BI__builtin_elementwise_tan:
3123 *this, E, Intrinsic::tan, Intrinsic::experimental_constrained_tan));
3124
3125 case Builtin::BItanh:
3126 case Builtin::BItanhf:
3127 case Builtin::BItanhl:
3128 case Builtin::BI__builtin_tanh:
3129 case Builtin::BI__builtin_tanhf:
3130 case Builtin::BI__builtin_tanhf16:
3131 case Builtin::BI__builtin_tanhl:
3132 case Builtin::BI__builtin_tanhf128:
3133 case Builtin::BI__builtin_elementwise_tanh:
3135 *this, E, Intrinsic::tanh, Intrinsic::experimental_constrained_tanh));
3136
3137 case Builtin::BItrunc:
3138 case Builtin::BItruncf:
3139 case Builtin::BItruncl:
3140 case Builtin::BI__builtin_trunc:
3141 case Builtin::BI__builtin_truncf:
3142 case Builtin::BI__builtin_truncf16:
3143 case Builtin::BI__builtin_truncl:
3144 case Builtin::BI__builtin_truncf128:
3145 case Builtin::BI__builtin_elementwise_trunc:
3147 Intrinsic::trunc,
3148 Intrinsic::experimental_constrained_trunc));
3149
3150 case Builtin::BIlround:
3151 case Builtin::BIlroundf:
3152 case Builtin::BIlroundl:
3153 case Builtin::BI__builtin_lround:
3154 case Builtin::BI__builtin_lroundf:
3155 case Builtin::BI__builtin_lroundl:
3156 case Builtin::BI__builtin_lroundf128:
3158 *this, E, Intrinsic::lround,
3159 Intrinsic::experimental_constrained_lround));
3160
3161 case Builtin::BIllround:
3162 case Builtin::BIllroundf:
3163 case Builtin::BIllroundl:
3164 case Builtin::BI__builtin_llround:
3165 case Builtin::BI__builtin_llroundf:
3166 case Builtin::BI__builtin_llroundl:
3167 case Builtin::BI__builtin_llroundf128:
3169 *this, E, Intrinsic::llround,
3170 Intrinsic::experimental_constrained_llround));
3171
3172 case Builtin::BIlrint:
3173 case Builtin::BIlrintf:
3174 case Builtin::BIlrintl:
3175 case Builtin::BI__builtin_lrint:
3176 case Builtin::BI__builtin_lrintf:
3177 case Builtin::BI__builtin_lrintl:
3178 case Builtin::BI__builtin_lrintf128:
3180 *this, E, Intrinsic::lrint,
3181 Intrinsic::experimental_constrained_lrint));
3182
3183 case Builtin::BIllrint:
3184 case Builtin::BIllrintf:
3185 case Builtin::BIllrintl:
3186 case Builtin::BI__builtin_llrint:
3187 case Builtin::BI__builtin_llrintf:
3188 case Builtin::BI__builtin_llrintl:
3189 case Builtin::BI__builtin_llrintf128:
3191 *this, E, Intrinsic::llrint,
3192 Intrinsic::experimental_constrained_llrint));
3193 case Builtin::BI__builtin_ldexp:
3194 case Builtin::BI__builtin_ldexpf:
3195 case Builtin::BI__builtin_ldexpl:
3196 case Builtin::BI__builtin_ldexpf16:
3197 case Builtin::BI__builtin_ldexpf128:
3198 case Builtin::BI__builtin_elementwise_ldexp:
3200 *this, E, Intrinsic::ldexp,
3201 Intrinsic::experimental_constrained_ldexp));
3202 default:
3203 break;
3204 }
3205 }
3206
3207 // Check NonnullAttribute/NullabilityArg and Alignment.
3208 auto EmitArgCheck = [&](TypeCheckKind Kind, Address A, const Expr *Arg,
3209 unsigned ParmNum) {
3210 Value *Val = A.emitRawPointer(*this);
3211 EmitNonNullArgCheck(RValue::get(Val), Arg->getType(), Arg->getExprLoc(), FD,
3212 ParmNum);
3213
3214 if (SanOpts.has(SanitizerKind::Alignment)) {
3215 SanitizerSet SkippedChecks;
3216 SkippedChecks.set(SanitizerKind::All);
3217 SkippedChecks.clear(SanitizerKind::Alignment);
3218 SourceLocation Loc = Arg->getExprLoc();
3219 // Strip an implicit cast.
3220 if (auto *CE = dyn_cast<ImplicitCastExpr>(Arg))
3221 if (CE->getCastKind() == CK_BitCast)
3222 Arg = CE->getSubExpr();
3223 EmitTypeCheck(Kind, Loc, Val, Arg->getType(), A.getAlignment(),
3224 SkippedChecks);
3225 }
3226 };
3227
3228 switch (BuiltinIDIfNoAsmLabel) {
3229 default: break;
3230 case Builtin::BI__builtin___CFStringMakeConstantString:
3231 case Builtin::BI__builtin___NSStringMakeConstantString:
3232 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
3233 case Builtin::BI__builtin_stdarg_start:
3234 case Builtin::BI__builtin_va_start:
3235 case Builtin::BI__va_start:
3236 case Builtin::BI__builtin_c23_va_start:
3237 case Builtin::BI__builtin_va_end:
3238 EmitVAStartEnd(BuiltinID == Builtin::BI__va_start
3239 ? EmitScalarExpr(E->getArg(0))
3240 : EmitVAListRef(E->getArg(0)).emitRawPointer(*this),
3241 BuiltinID != Builtin::BI__builtin_va_end);
3242 return RValue::get(nullptr);
3243 case Builtin::BI__builtin_va_copy: {
3244 Value *DstPtr = EmitVAListRef(E->getArg(0)).emitRawPointer(*this);
3245 Value *SrcPtr = EmitVAListRef(E->getArg(1)).emitRawPointer(*this);
3246 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::vacopy, {DstPtr->getType()}),
3247 {DstPtr, SrcPtr});
3248 return RValue::get(nullptr);
3249 }
3250 case Builtin::BIabs:
3251 case Builtin::BIlabs:
3252 case Builtin::BIllabs:
3253 case Builtin::BI__builtin_abs:
3254 case Builtin::BI__builtin_labs:
3255 case Builtin::BI__builtin_llabs: {
3256 bool SanitizeOverflow = SanOpts.has(SanitizerKind::SignedIntegerOverflow);
3257
3258 Value *Result;
3259 switch (getLangOpts().getSignedOverflowBehavior()) {
3261 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), false);
3262 break;
3264 if (!SanitizeOverflow) {
3265 Result = EmitAbs(*this, EmitScalarExpr(E->getArg(0)), true);
3266 break;
3267 }
3268 [[fallthrough]];
3270 // TODO: Somehow handle the corner case when the address of abs is taken.
3271 Result = EmitOverflowCheckedAbs(*this, E, SanitizeOverflow);
3272 break;
3273 }
3274 return RValue::get(Result);
3275 }
3276 case Builtin::BI__builtin_complex: {
3277 Value *Real = EmitScalarExpr(E->getArg(0));
3278 Value *Imag = EmitScalarExpr(E->getArg(1));
3279 return RValue::getComplex({Real, Imag});
3280 }
3281 case Builtin::BI__builtin_conj:
3282 case Builtin::BI__builtin_conjf:
3283 case Builtin::BI__builtin_conjl:
3284 case Builtin::BIconj:
3285 case Builtin::BIconjf:
3286 case Builtin::BIconjl: {
3287 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3288 Value *Real = ComplexVal.first;
3289 Value *Imag = ComplexVal.second;
3290 Imag = Builder.CreateFNeg(Imag, "neg");
3291 return RValue::getComplex(std::make_pair(Real, Imag));
3292 }
3293 case Builtin::BI__builtin_creal:
3294 case Builtin::BI__builtin_crealf:
3295 case Builtin::BI__builtin_creall:
3296 case Builtin::BIcreal:
3297 case Builtin::BIcrealf:
3298 case Builtin::BIcreall: {
3299 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3300 return RValue::get(ComplexVal.first);
3301 }
3302
3303 case Builtin::BI__builtin_preserve_access_index: {
3304 // Only enabled preserved access index region when debuginfo
3305 // is available as debuginfo is needed to preserve user-level
3306 // access pattern.
3307 if (!getDebugInfo()) {
3308 CGM.Error(E->getExprLoc(), "using builtin_preserve_access_index() without -g");
3309 return RValue::get(EmitScalarExpr(E->getArg(0)));
3310 }
3311
3312 // Nested builtin_preserve_access_index() not supported
3314 CGM.Error(E->getExprLoc(), "nested builtin_preserve_access_index() not supported");
3315 return RValue::get(EmitScalarExpr(E->getArg(0)));
3316 }
3317
3318 IsInPreservedAIRegion = true;
3319 Value *Res = EmitScalarExpr(E->getArg(0));
3320 IsInPreservedAIRegion = false;
3321 return RValue::get(Res);
3322 }
3323
3324 case Builtin::BI__builtin_cimag:
3325 case Builtin::BI__builtin_cimagf:
3326 case Builtin::BI__builtin_cimagl:
3327 case Builtin::BIcimag:
3328 case Builtin::BIcimagf:
3329 case Builtin::BIcimagl: {
3330 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3331 return RValue::get(ComplexVal.second);
3332 }
3333
3334 case Builtin::BI__builtin_clrsb:
3335 case Builtin::BI__builtin_clrsbl:
3336 case Builtin::BI__builtin_clrsbll: {
3337 // clrsb(x) -> clz(x < 0 ? ~x : x) - 1 or
3338 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3339
3340 llvm::Type *ArgType = ArgValue->getType();
3341 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3342
3343 llvm::Type *ResultType = ConvertType(E->getType());
3344 Value *Zero = llvm::Constant::getNullValue(ArgType);
3345 Value *IsNeg = Builder.CreateICmpSLT(ArgValue, Zero, "isneg");
3346 Value *Inverse = Builder.CreateNot(ArgValue, "not");
3347 Value *Tmp = Builder.CreateSelect(IsNeg, Inverse, ArgValue);
3348 Value *Ctlz = Builder.CreateCall(F, {Tmp, Builder.getFalse()});
3349 Value *Result =
3350 Builder.CreateNUWSub(Ctlz, llvm::ConstantInt::get(ArgType, 1));
3351 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3352 "cast");
3353 return RValue::get(Result);
3354 }
3355 case Builtin::BI__builtin_ctzs:
3356 case Builtin::BI__builtin_ctz:
3357 case Builtin::BI__builtin_ctzl:
3358 case Builtin::BI__builtin_ctzll:
3359 case Builtin::BI__builtin_ctzg:
3360 case Builtin::BI__builtin_elementwise_ctzg: {
3361 bool HasFallback =
3362 (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_ctzg ||
3363 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_ctzg) &&
3364 E->getNumArgs() > 1;
3365
3366 Value *ArgValue =
3367 HasFallback ? EmitBitCountExpr(*this, E->getArg(0))
3369
3370 llvm::Type *ArgType = ArgValue->getType();
3371 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
3372
3373 llvm::Type *ResultType = ConvertType(E->getType());
3374 // The elementwise builtins always exhibit zero-is-undef behaviour
3375 Value *ZeroUndef = Builder.getInt1(
3376 HasFallback || getTarget().isCLZForZeroUndef() ||
3377 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_ctzg);
3378 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
3379 if (Result->getType() != ResultType)
3380 Result =
3381 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3382 if (!HasFallback)
3383 return RValue::get(Result);
3384
3385 Value *Zero = Constant::getNullValue(ArgType);
3386 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3387 Value *FallbackValue = EmitScalarExpr(E->getArg(1));
3388 Value *ResultOrFallback =
3389 Builder.CreateSelect(IsZero, FallbackValue, Result, "ctzg");
3390 return RValue::get(ResultOrFallback);
3391 }
3392 case Builtin::BI__builtin_clzs:
3393 case Builtin::BI__builtin_clz:
3394 case Builtin::BI__builtin_clzl:
3395 case Builtin::BI__builtin_clzll:
3396 case Builtin::BI__builtin_clzg:
3397 case Builtin::BI__builtin_elementwise_clzg: {
3398 bool HasFallback =
3399 (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_clzg ||
3400 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_clzg) &&
3401 E->getNumArgs() > 1;
3402
3403 Value *ArgValue =
3404 HasFallback ? EmitBitCountExpr(*this, E->getArg(0))
3406
3407 llvm::Type *ArgType = ArgValue->getType();
3408 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3409
3410 llvm::Type *ResultType = ConvertType(E->getType());
3411 // The elementwise builtins always exhibit zero-is-undef behaviour
3412 Value *ZeroUndef = Builder.getInt1(
3413 HasFallback || getTarget().isCLZForZeroUndef() ||
3414 BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_clzg);
3415 Value *Result = Builder.CreateCall(F, {ArgValue, ZeroUndef});
3416 if (Result->getType() != ResultType)
3417 Result =
3418 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3419 if (!HasFallback)
3420 return RValue::get(Result);
3421
3422 Value *Zero = Constant::getNullValue(ArgType);
3423 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3424 Value *FallbackValue = EmitScalarExpr(E->getArg(1));
3425 Value *ResultOrFallback =
3426 Builder.CreateSelect(IsZero, FallbackValue, Result, "clzg");
3427 return RValue::get(ResultOrFallback);
3428 }
3429 case Builtin::BI__builtin_ffs:
3430 case Builtin::BI__builtin_ffsl:
3431 case Builtin::BI__builtin_ffsll: {
3432 // ffs(x) -> x ? cttz(x) + 1 : 0
3433 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3434
3435 llvm::Type *ArgType = ArgValue->getType();
3436 Function *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
3437
3438 llvm::Type *ResultType = ConvertType(E->getType());
3439 Value *Tmp =
3440 Builder.CreateAdd(Builder.CreateCall(F, {ArgValue, Builder.getTrue()}),
3441 llvm::ConstantInt::get(ArgType, 1));
3442 Value *Zero = llvm::Constant::getNullValue(ArgType);
3443 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
3444 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
3445 if (Result->getType() != ResultType)
3446 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3447 "cast");
3448 return RValue::get(Result);
3449 }
3450 case Builtin::BI__builtin_parity:
3451 case Builtin::BI__builtin_parityl:
3452 case Builtin::BI__builtin_parityll: {
3453 // parity(x) -> ctpop(x) & 1
3454 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3455
3456 llvm::Type *ArgType = ArgValue->getType();
3457 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3458
3459 llvm::Type *ResultType = ConvertType(E->getType());
3460 Value *Tmp = Builder.CreateCall(F, ArgValue);
3461 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
3462 if (Result->getType() != ResultType)
3463 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3464 "cast");
3465 return RValue::get(Result);
3466 }
3467 case Builtin::BI__lzcnt16:
3468 case Builtin::BI__lzcnt:
3469 case Builtin::BI__lzcnt64: {
3470 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3471
3472 llvm::Type *ArgType = ArgValue->getType();
3473 Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
3474
3475 llvm::Type *ResultType = ConvertType(E->getType());
3476 Value *Result = Builder.CreateCall(F, {ArgValue, Builder.getFalse()});
3477 if (Result->getType() != ResultType)
3478 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
3479 "cast");
3480 return RValue::get(Result);
3481 }
3482 case Builtin::BI__popcnt16:
3483 case Builtin::BI__popcnt:
3484 case Builtin::BI__popcnt64:
3485 case Builtin::BI__builtin_popcount:
3486 case Builtin::BI__builtin_popcountl:
3487 case Builtin::BI__builtin_popcountll:
3488 case Builtin::BI__builtin_popcountg: {
3489 Value *ArgValue = EmitBitCountExpr(*this, E->getArg(0));
3490
3491 llvm::Type *ArgType = ArgValue->getType();
3492 Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
3493
3494 llvm::Type *ResultType = ConvertType(E->getType());
3495 Value *Result = Builder.CreateCall(F, ArgValue);
3496 if (Result->getType() != ResultType)
3497 Result =
3498 Builder.CreateIntCast(Result, ResultType, /*isSigned*/ false, "cast");
3499 return RValue::get(Result);
3500 }
3501 case Builtin::BI__builtin_unpredictable: {
3502 // Always return the argument of __builtin_unpredictable. LLVM does not
3503 // handle this builtin. Metadata for this builtin should be added directly
3504 // to instructions such as branches or switches that use it.
3505 return RValue::get(EmitScalarExpr(E->getArg(0)));
3506 }
3507 case Builtin::BI__builtin_expect: {
3508 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3509 llvm::Type *ArgType = ArgValue->getType();
3510
3511 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
3512 // Don't generate llvm.expect on -O0 as the backend won't use it for
3513 // anything.
3514 // Note, we still IRGen ExpectedValue because it could have side-effects.
3515 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3516 return RValue::get(ArgValue);
3517
3518 Function *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
3519 Value *Result =
3520 Builder.CreateCall(FnExpect, {ArgValue, ExpectedValue}, "expval");
3521 return RValue::get(Result);
3522 }
3523 case Builtin::BI__builtin_expect_with_probability: {
3524 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3525 llvm::Type *ArgType = ArgValue->getType();
3526
3527 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
3528 llvm::APFloat Probability(0.0);
3529 const Expr *ProbArg = E->getArg(2);
3530 bool EvalSucceed = ProbArg->EvaluateAsFloat(Probability, CGM.getContext());
3531 assert(EvalSucceed && "probability should be able to evaluate as float");
3532 (void)EvalSucceed;
3533 bool LoseInfo = false;
3534 Probability.convert(llvm::APFloat::IEEEdouble(),
3535 llvm::RoundingMode::Dynamic, &LoseInfo);
3536 llvm::Type *Ty = ConvertType(ProbArg->getType());
3537 Constant *Confidence = ConstantFP::get(Ty, Probability);
3538 // Don't generate llvm.expect.with.probability on -O0 as the backend
3539 // won't use it for anything.
3540 // Note, we still IRGen ExpectedValue because it could have side-effects.
3541 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
3542 return RValue::get(ArgValue);
3543
3544 Function *FnExpect =
3545 CGM.getIntrinsic(Intrinsic::expect_with_probability, ArgType);
3546 Value *Result = Builder.CreateCall(
3547 FnExpect, {ArgValue, ExpectedValue, Confidence}, "expval");
3548 return RValue::get(Result);
3549 }
3550 case Builtin::BI__builtin_assume_aligned: {
3551 const Expr *Ptr = E->getArg(0);
3552 Value *PtrValue = EmitScalarExpr(Ptr);
3553 Value *OffsetValue =
3554 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
3555
3556 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
3557 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
3558 if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
3559 AlignmentCI = ConstantInt::get(AlignmentCI->getIntegerType(),
3560 llvm::Value::MaximumAlignment);
3561
3562 emitAlignmentAssumption(PtrValue, Ptr,
3563 /*The expr loc is sufficient.*/ SourceLocation(),
3564 AlignmentCI, OffsetValue);
3565 return RValue::get(PtrValue);
3566 }
3567 case Builtin::BI__builtin_assume_dereferenceable: {
3568 const Expr *Ptr = E->getArg(0);
3569 const Expr *Size = E->getArg(1);
3570 Value *PtrValue = EmitScalarExpr(Ptr);
3571 Value *SizeValue = EmitScalarExpr(Size);
3572 if (SizeValue->getType() != IntPtrTy)
3573 SizeValue =
3574 Builder.CreateIntCast(SizeValue, IntPtrTy, false, "casted.size");
3575 Builder.CreateDereferenceableAssumption(PtrValue, SizeValue);
3576 return RValue::get(nullptr);
3577 }
3578 case Builtin::BI__assume:
3579 case Builtin::BI__builtin_assume: {
3580 if (E->getArg(0)->HasSideEffects(getContext()))
3581 return RValue::get(nullptr);
3582
3583 Value *ArgValue = EmitCheckedArgForAssume(E->getArg(0));
3584 Function *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
3585 Builder.CreateCall(FnAssume, ArgValue);
3586 return RValue::get(nullptr);
3587 }
3588 case Builtin::BI__builtin_assume_separate_storage: {
3589 const Expr *Arg0 = E->getArg(0);
3590 const Expr *Arg1 = E->getArg(1);
3591
3592 Value *Value0 = EmitScalarExpr(Arg0);
3593 Value *Value1 = EmitScalarExpr(Arg1);
3594
3595 Value *Values[] = {Value0, Value1};
3596 OperandBundleDefT<Value *> OBD("separate_storage", Values);
3597 Builder.CreateAssumption(ConstantInt::getTrue(getLLVMContext()), {OBD});
3598 return RValue::get(nullptr);
3599 }
3600 case Builtin::BI__builtin_allow_runtime_check: {
3601 StringRef Kind =
3602 cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts())->getString();
3603 LLVMContext &Ctx = CGM.getLLVMContext();
3604 llvm::Value *Allow = Builder.CreateCall(
3605 CGM.getIntrinsic(Intrinsic::allow_runtime_check),
3606 llvm::MetadataAsValue::get(Ctx, llvm::MDString::get(Ctx, Kind)));
3607 return RValue::get(Allow);
3608 }
3609 case Builtin::BI__builtin_allow_sanitize_check: {
3610 Intrinsic::ID IntrID = Intrinsic::not_intrinsic;
3611 StringRef Name =
3612 cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts())->getString();
3613
3614 // We deliberately allow the use of kernel- and non-kernel names
3615 // interchangably, even when one or the other is enabled. This is consistent
3616 // with the no_sanitize-attribute, which allows either kernel- or non-kernel
3617 // name to disable instrumentation (see CodeGenFunction::StartFunction).
3618 if (getLangOpts().Sanitize.hasOneOf(SanitizerKind::Address |
3619 SanitizerKind::KernelAddress) &&
3620 (Name == "address" || Name == "kernel-address")) {
3621 IntrID = Intrinsic::allow_sanitize_address;
3622 } else if (getLangOpts().Sanitize.has(SanitizerKind::Thread) &&
3623 Name == "thread") {
3624 IntrID = Intrinsic::allow_sanitize_thread;
3625 } else if (getLangOpts().Sanitize.hasOneOf(SanitizerKind::Memory |
3626 SanitizerKind::KernelMemory) &&
3627 (Name == "memory" || Name == "kernel-memory")) {
3628 IntrID = Intrinsic::allow_sanitize_memory;
3629 } else if (getLangOpts().Sanitize.hasOneOf(
3630 SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress) &&
3631 (Name == "hwaddress" || Name == "kernel-hwaddress")) {
3632 IntrID = Intrinsic::allow_sanitize_hwaddress;
3633 }
3634
3635 if (IntrID != Intrinsic::not_intrinsic) {
3636 llvm::Value *Allow = Builder.CreateCall(CGM.getIntrinsic(IntrID));
3637 return RValue::get(Allow);
3638 }
3639 // If the checked sanitizer is not enabled, we can safely lower to false
3640 // right away. This is also more efficient, since the LowerAllowCheckPass
3641 // must not always be enabled if none of the above sanitizers are enabled.
3642 return RValue::get(Builder.getFalse());
3643 }
3644 case Builtin::BI__arithmetic_fence: {
3645 // Create the builtin call if FastMath is selected, and the target
3646 // supports the builtin, otherwise just return the argument.
3647 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3648 llvm::FastMathFlags FMF = Builder.getFastMathFlags();
3649 bool isArithmeticFenceEnabled =
3650 FMF.allowReassoc() &&
3652 QualType ArgType = E->getArg(0)->getType();
3653 if (ArgType->isComplexType()) {
3654 if (isArithmeticFenceEnabled) {
3655 QualType ElementType = ArgType->castAs<ComplexType>()->getElementType();
3656 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3657 Value *Real = Builder.CreateArithmeticFence(ComplexVal.first,
3658 ConvertType(ElementType));
3659 Value *Imag = Builder.CreateArithmeticFence(ComplexVal.second,
3660 ConvertType(ElementType));
3661 return RValue::getComplex(std::make_pair(Real, Imag));
3662 }
3663 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
3664 Value *Real = ComplexVal.first;
3665 Value *Imag = ComplexVal.second;
3666 return RValue::getComplex(std::make_pair(Real, Imag));
3667 }
3668 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3669 if (isArithmeticFenceEnabled)
3670 return RValue::get(
3671 Builder.CreateArithmeticFence(ArgValue, ConvertType(ArgType)));
3672 return RValue::get(ArgValue);
3673 }
3674 case Builtin::BI__builtin_bswapg: {
3675 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3676 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(ArgValue->getType());
3677 assert(IntTy && "LLVM's __builtin_bswapg only supports integer variants");
3678 if (IntTy->getBitWidth() == 1 || IntTy->getBitWidth() == 8)
3679 return RValue::get(ArgValue);
3680 assert(((IntTy->getBitWidth() % 16 == 0 && IntTy->getBitWidth() != 0)) &&
3681 "LLVM's __builtin_bswapg only supports integer variants that has a "
3682 "multiple of 16 bits as well as a single byte");
3683 return RValue::get(
3684 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bswap));
3685 }
3686 case Builtin::BI__builtin_bswap16:
3687 case Builtin::BI__builtin_bswap32:
3688 case Builtin::BI__builtin_bswap64:
3689 case Builtin::BI_byteswap_ushort:
3690 case Builtin::BI_byteswap_ulong:
3691 case Builtin::BI_byteswap_uint64: {
3692 return RValue::get(
3693 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bswap));
3694 }
3695 case Builtin::BI__builtin_bitreverseg: {
3696 Value *ArgValue = EmitScalarExpr(E->getArg(0));
3697 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(ArgValue->getType());
3698 assert(IntTy &&
3699 "LLVM's __builtin_bitreverseg only support integer variants");
3700 if (IntTy->getBitWidth() == 1)
3701 return RValue::get(ArgValue);
3702 return RValue::get(
3703 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bitreverse));
3704 }
3705 case Builtin::BI__builtin_bitreverse8:
3706 case Builtin::BI__builtin_bitreverse16:
3707 case Builtin::BI__builtin_bitreverse32:
3708 case Builtin::BI__builtin_bitreverse64: {
3709 return RValue::get(
3710 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::bitreverse));
3711 }
3712 case Builtin::BI__builtin_rotateleft8:
3713 case Builtin::BI__builtin_rotateleft16:
3714 case Builtin::BI__builtin_rotateleft32:
3715 case Builtin::BI__builtin_rotateleft64:
3716 case Builtin::BI__builtin_stdc_rotate_left:
3717 case Builtin::BI_rotl8: // Microsoft variants of rotate left
3718 case Builtin::BI_rotl16:
3719 case Builtin::BI_rotl:
3720 case Builtin::BI_lrotl:
3721 case Builtin::BI_rotl64:
3722 return emitRotate(E, false);
3723
3724 case Builtin::BI__builtin_rotateright8:
3725 case Builtin::BI__builtin_rotateright16:
3726 case Builtin::BI__builtin_rotateright32:
3727 case Builtin::BI__builtin_rotateright64:
3728 case Builtin::BI__builtin_stdc_rotate_right:
3729 case Builtin::BI_rotr8: // Microsoft variants of rotate right
3730 case Builtin::BI_rotr16:
3731 case Builtin::BI_rotr:
3732 case Builtin::BI_lrotr:
3733 case Builtin::BI_rotr64:
3734 return emitRotate(E, true);
3735
3736 case Builtin::BI__builtin_constant_p: {
3737 llvm::Type *ResultType = ConvertType(E->getType());
3738
3739 const Expr *Arg = E->getArg(0);
3740 QualType ArgType = Arg->getType();
3741 // FIXME: The allowance for Obj-C pointers and block pointers is historical
3742 // and likely a mistake.
3743 if (!ArgType->isIntegralOrEnumerationType() && !ArgType->isFloatingType() &&
3744 !ArgType->isObjCObjectPointerType() && !ArgType->isBlockPointerType())
3745 // Per the GCC documentation, only numeric constants are recognized after
3746 // inlining.
3747 return RValue::get(ConstantInt::get(ResultType, 0));
3748
3749 if (Arg->HasSideEffects(getContext()))
3750 // The argument is unevaluated, so be conservative if it might have
3751 // side-effects.
3752 return RValue::get(ConstantInt::get(ResultType, 0));
3753
3754 Value *ArgValue = EmitScalarExpr(Arg);
3755 if (ArgType->isObjCObjectPointerType()) {
3756 // Convert Objective-C objects to id because we cannot distinguish between
3757 // LLVM types for Obj-C classes as they are opaque.
3758 ArgType = CGM.getContext().getObjCIdType();
3759 ArgValue = Builder.CreateBitCast(ArgValue, ConvertType(ArgType));
3760 }
3761 Function *F =
3762 CGM.getIntrinsic(Intrinsic::is_constant, ConvertType(ArgType));
3763 Value *Result = Builder.CreateCall(F, ArgValue);
3764 if (Result->getType() != ResultType)
3765 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/false);
3766 return RValue::get(Result);
3767 }
3768 case Builtin::BI__builtin_dynamic_object_size:
3769 case Builtin::BI__builtin_object_size: {
3770 unsigned Type =
3771 E->getArg(1)->EvaluateKnownConstInt(getContext()).getZExtValue();
3772 auto *ResType = cast<llvm::IntegerType>(ConvertType(E->getType()));
3773
3774 // We pass this builtin onto the optimizer so that it can figure out the
3775 // object size in more complex cases.
3776 bool IsDynamic = BuiltinID == Builtin::BI__builtin_dynamic_object_size;
3777 return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
3778 /*EmittedE=*/nullptr, IsDynamic));
3779 }
3780 case Builtin::BI__builtin_counted_by_ref: {
3781 // Default to returning '(void *) 0'.
3782 llvm::Value *Result = llvm::ConstantPointerNull::get(
3783 llvm::PointerType::getUnqual(getLLVMContext()));
3784
3785 const Expr *Arg = E->getArg(0)->IgnoreParenImpCasts();
3786
3787 if (auto *UO = dyn_cast<UnaryOperator>(Arg);
3788 UO && UO->getOpcode() == UO_AddrOf) {
3789 Arg = UO->getSubExpr()->IgnoreParenImpCasts();
3790
3791 if (auto *ASE = dyn_cast<ArraySubscriptExpr>(Arg))
3792 Arg = ASE->getBase()->IgnoreParenImpCasts();
3793 }
3794
3795 if (const MemberExpr *ME = dyn_cast_if_present<MemberExpr>(Arg)) {
3796 if (auto *CATy =
3798 CATy && CATy->getKind() == CountAttributedType::CountedBy) {
3799 const auto *MemberDecl = cast<FieldDecl>(ME->getMemberDecl());
3800 if (const FieldDecl *CountFD = MemberDecl->findCountedByField())
3801 Result = GetCountedByFieldExprGEP(Arg, MemberDecl, CountFD);
3802 else
3803 llvm::report_fatal_error("Cannot find the counted_by 'count' field");
3804 }
3805 }
3806
3807 return RValue::get(Result);
3808 }
3809 case Builtin::BI__builtin_prefetch: {
3810 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
3811 // FIXME: Technically these constants should of type 'int', yes?
3812 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
3813 llvm::ConstantInt::get(Int32Ty, 0);
3814 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
3815 llvm::ConstantInt::get(Int32Ty, 3);
3816 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
3817 Function *F = CGM.getIntrinsic(Intrinsic::prefetch, Address->getType());
3818 Builder.CreateCall(F, {Address, RW, Locality, Data});
3819 return RValue::get(nullptr);
3820 }
3821 case Builtin::BI__builtin_readcyclecounter: {
3822 Function *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
3823 return RValue::get(Builder.CreateCall(F));
3824 }
3825 case Builtin::BI__builtin_readsteadycounter: {
3826 Function *F = CGM.getIntrinsic(Intrinsic::readsteadycounter);
3827 return RValue::get(Builder.CreateCall(F));
3828 }
3829 case Builtin::BI__builtin___clear_cache: {
3830 Value *Begin = EmitScalarExpr(E->getArg(0));
3831 Value *End = EmitScalarExpr(E->getArg(1));
3832 Function *F = CGM.getIntrinsic(Intrinsic::clear_cache, {CGM.DefaultPtrTy});
3833 return RValue::get(Builder.CreateCall(F, {Begin, End}));
3834 }
3835 case Builtin::BI__builtin_trap:
3836 EmitTrapCall(Intrinsic::trap);
3837 return RValue::get(nullptr);
3838 case Builtin::BI__builtin_verbose_trap: {
3839 llvm::DILocation *TrapLocation = Builder.getCurrentDebugLocation();
3840 if (getDebugInfo()) {
3841 TrapLocation = getDebugInfo()->CreateTrapFailureMessageFor(
3842 TrapLocation, *E->getArg(0)->tryEvaluateString(getContext()),
3844 }
3845 ApplyDebugLocation ApplyTrapDI(*this, TrapLocation);
3846 // Currently no attempt is made to prevent traps from being merged.
3847 EmitTrapCall(Intrinsic::trap);
3848 return RValue::get(nullptr);
3849 }
3850 case Builtin::BI__debugbreak:
3851 EmitTrapCall(Intrinsic::debugtrap);
3852 return RValue::get(nullptr);
3853 case Builtin::BI__builtin_unreachable: {
3855
3856 // We do need to preserve an insertion point.
3857 EmitBlock(createBasicBlock("unreachable.cont"));
3858
3859 return RValue::get(nullptr);
3860 }
3861
3862 case Builtin::BI__builtin_powi:
3863 case Builtin::BI__builtin_powif:
3864 case Builtin::BI__builtin_powil: {
3865 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
3866 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
3867
3868 if (Builder.getIsFPConstrained()) {
3869 // FIXME: llvm.powi has 2 mangling types,
3870 // llvm.experimental.constrained.powi has one.
3871 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3872 Function *F = CGM.getIntrinsic(Intrinsic::experimental_constrained_powi,
3873 Src0->getType());
3874 return RValue::get(Builder.CreateConstrainedFPCall(F, { Src0, Src1 }));
3875 }
3876
3877 Function *F = CGM.getIntrinsic(Intrinsic::powi,
3878 { Src0->getType(), Src1->getType() });
3879 return RValue::get(Builder.CreateCall(F, { Src0, Src1 }));
3880 }
3881 case Builtin::BI__builtin_frexpl: {
3882 // Linux PPC will not be adding additional PPCDoubleDouble support.
3883 // WIP to switch default to IEEE long double. Will emit libcall for
3884 // frexpl instead of legalizing this type in the BE.
3885 if (&getTarget().getLongDoubleFormat() == &llvm::APFloat::PPCDoubleDouble())
3886 break;
3887 [[fallthrough]];
3888 }
3889 case Builtin::BI__builtin_frexp:
3890 case Builtin::BI__builtin_frexpf:
3891 case Builtin::BI__builtin_frexpf128:
3892 case Builtin::BI__builtin_frexpf16:
3893 return RValue::get(emitFrexpBuiltin(*this, E, Intrinsic::frexp));
3894 case Builtin::BImodf:
3895 case Builtin::BImodff:
3896 case Builtin::BImodfl:
3897 case Builtin::BI__builtin_modf:
3898 case Builtin::BI__builtin_modff:
3899 case Builtin::BI__builtin_modfl:
3900 if (Builder.getIsFPConstrained())
3901 break; // TODO: Emit constrained modf intrinsic once one exists.
3902 return RValue::get(emitModfBuiltin(*this, E, Intrinsic::modf));
3903 case Builtin::BI__builtin_isgreater:
3904 case Builtin::BI__builtin_isgreaterequal:
3905 case Builtin::BI__builtin_isless:
3906 case Builtin::BI__builtin_islessequal:
3907 case Builtin::BI__builtin_islessgreater:
3908 case Builtin::BI__builtin_isunordered: {
3909 // Ordered comparisons: we know the arguments to these are matching scalar
3910 // floating point values.
3911 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3912 Value *LHS = EmitScalarExpr(E->getArg(0));
3913 Value *RHS = EmitScalarExpr(E->getArg(1));
3914
3915 switch (BuiltinID) {
3916 default: llvm_unreachable("Unknown ordered comparison");
3917 case Builtin::BI__builtin_isgreater:
3918 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
3919 break;
3920 case Builtin::BI__builtin_isgreaterequal:
3921 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
3922 break;
3923 case Builtin::BI__builtin_isless:
3924 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
3925 break;
3926 case Builtin::BI__builtin_islessequal:
3927 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
3928 break;
3929 case Builtin::BI__builtin_islessgreater:
3930 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
3931 break;
3932 case Builtin::BI__builtin_isunordered:
3933 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
3934 break;
3935 }
3936 // ZExt bool to int type.
3937 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
3938 }
3939
3940 case Builtin::BI__builtin_isnan: {
3941 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3942 Value *V = EmitScalarExpr(E->getArg(0));
3943 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3944 return RValue::get(Result);
3945 return RValue::get(
3946 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNan),
3947 ConvertType(E->getType())));
3948 }
3949
3950 case Builtin::BI__builtin_issignaling: {
3951 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3952 Value *V = EmitScalarExpr(E->getArg(0));
3953 return RValue::get(
3954 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSNan),
3955 ConvertType(E->getType())));
3956 }
3957
3958 case Builtin::BI__builtin_isinf: {
3959 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3960 Value *V = EmitScalarExpr(E->getArg(0));
3961 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3962 return RValue::get(Result);
3963 return RValue::get(
3964 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcInf),
3965 ConvertType(E->getType())));
3966 }
3967
3968 case Builtin::BIfinite:
3969 case Builtin::BI__finite:
3970 case Builtin::BIfinitef:
3971 case Builtin::BI__finitef:
3972 case Builtin::BIfinitel:
3973 case Builtin::BI__finitel:
3974 case Builtin::BI__builtin_isfinite: {
3975 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3976 Value *V = EmitScalarExpr(E->getArg(0));
3977 if (Value *Result = tryUseTestFPKind(*this, BuiltinID, V))
3978 return RValue::get(Result);
3979 return RValue::get(
3980 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcFinite),
3981 ConvertType(E->getType())));
3982 }
3983
3984 case Builtin::BI__builtin_isnormal: {
3985 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3986 Value *V = EmitScalarExpr(E->getArg(0));
3987 return RValue::get(
3988 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcNormal),
3989 ConvertType(E->getType())));
3990 }
3991
3992 case Builtin::BI__builtin_issubnormal: {
3993 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
3994 Value *V = EmitScalarExpr(E->getArg(0));
3995 return RValue::get(
3996 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcSubnormal),
3997 ConvertType(E->getType())));
3998 }
3999
4000 case Builtin::BI__builtin_iszero: {
4001 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4002 Value *V = EmitScalarExpr(E->getArg(0));
4003 return RValue::get(
4004 Builder.CreateZExt(Builder.createIsFPClass(V, FPClassTest::fcZero),
4005 ConvertType(E->getType())));
4006 }
4007
4008 case Builtin::BI__builtin_isfpclass: {
4010 if (!E->getArg(1)->EvaluateAsInt(Result, CGM.getContext()))
4011 break;
4012 uint64_t Test = Result.Val.getInt().getLimitedValue();
4013 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4014 Value *V = EmitScalarExpr(E->getArg(0));
4015 return RValue::get(Builder.CreateZExt(Builder.createIsFPClass(V, Test),
4016 ConvertType(E->getType())));
4017 }
4018
4019 case Builtin::BI__builtin_nondeterministic_value: {
4020 llvm::Type *Ty = ConvertType(E->getArg(0)->getType());
4021
4022 Value *Result = PoisonValue::get(Ty);
4023 Result = Builder.CreateFreeze(Result);
4024
4025 return RValue::get(Result);
4026 }
4027
4028 case Builtin::BI__builtin_elementwise_abs: {
4029 Value *Result;
4030 QualType QT = E->getArg(0)->getType();
4031
4032 if (auto *VecTy = QT->getAs<VectorType>())
4033 QT = VecTy->getElementType();
4034 if (QT->isIntegerType())
4035 Result = Builder.CreateBinaryIntrinsic(
4036 Intrinsic::abs, EmitScalarExpr(E->getArg(0)), Builder.getFalse(),
4037 nullptr, "elt.abs");
4038 else
4039 Result = emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::fabs,
4040 "elt.abs");
4041
4042 return RValue::get(Result);
4043 }
4044 case Builtin::BI__builtin_elementwise_bitreverse:
4046 *this, E, Intrinsic::bitreverse, "elt.bitreverse"));
4047 case Builtin::BI__builtin_elementwise_popcount:
4049 *this, E, Intrinsic::ctpop, "elt.ctpop"));
4050 case Builtin::BI__builtin_elementwise_canonicalize:
4052 *this, E, Intrinsic::canonicalize, "elt.canonicalize"));
4053 case Builtin::BI__builtin_elementwise_copysign:
4054 return RValue::get(
4055 emitBuiltinWithOneOverloadedType<2>(*this, E, Intrinsic::copysign));
4056 case Builtin::BI__builtin_elementwise_fshl:
4057 return RValue::get(
4058 emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fshl));
4059 case Builtin::BI__builtin_elementwise_fshr:
4060 return RValue::get(
4061 emitBuiltinWithOneOverloadedType<3>(*this, E, Intrinsic::fshr));
4062
4063 case Builtin::BI__builtin_elementwise_add_sat:
4064 case Builtin::BI__builtin_elementwise_sub_sat: {
4065 Value *Op0 = EmitScalarExpr(E->getArg(0));
4066 Value *Op1 = EmitScalarExpr(E->getArg(1));
4067 Value *Result;
4068 assert(Op0->getType()->isIntOrIntVectorTy() && "integer type expected");
4069 QualType Ty = E->getArg(0)->getType();
4070 if (auto *VecTy = Ty->getAs<VectorType>())
4071 Ty = VecTy->getElementType();
4072 bool IsSigned = Ty->isSignedIntegerType();
4073 unsigned Opc;
4074 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_add_sat)
4075 Opc = IsSigned ? Intrinsic::sadd_sat : Intrinsic::uadd_sat;
4076 else
4077 Opc = IsSigned ? Intrinsic::ssub_sat : Intrinsic::usub_sat;
4078 Result = Builder.CreateBinaryIntrinsic(Opc, Op0, Op1, nullptr, "elt.sat");
4079 return RValue::get(Result);
4080 }
4081
4082 case Builtin::BI__builtin_elementwise_max: {
4083 Value *Op0 = EmitScalarExpr(E->getArg(0));
4084 Value *Op1 = EmitScalarExpr(E->getArg(1));
4085 Value *Result;
4086 if (Op0->getType()->isIntOrIntVectorTy()) {
4087 QualType Ty = E->getArg(0)->getType();
4088 if (auto *VecTy = Ty->getAs<VectorType>())
4089 Ty = VecTy->getElementType();
4090 Result = Builder.CreateBinaryIntrinsic(
4091 Ty->isSignedIntegerType() ? Intrinsic::smax : Intrinsic::umax, Op0,
4092 Op1, nullptr, "elt.max");
4093 } else
4094 Result = Builder.CreateMaxNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.max");
4095 return RValue::get(Result);
4096 }
4097 case Builtin::BI__builtin_elementwise_min: {
4098 Value *Op0 = EmitScalarExpr(E->getArg(0));
4099 Value *Op1 = EmitScalarExpr(E->getArg(1));
4100 Value *Result;
4101 if (Op0->getType()->isIntOrIntVectorTy()) {
4102 QualType Ty = E->getArg(0)->getType();
4103 if (auto *VecTy = Ty->getAs<VectorType>())
4104 Ty = VecTy->getElementType();
4105 Result = Builder.CreateBinaryIntrinsic(
4106 Ty->isSignedIntegerType() ? Intrinsic::smin : Intrinsic::umin, Op0,
4107 Op1, nullptr, "elt.min");
4108 } else
4109 Result = Builder.CreateMinNum(Op0, Op1, /*FMFSource=*/nullptr, "elt.min");
4110 return RValue::get(Result);
4111 }
4112
4113 case Builtin::BI__builtin_elementwise_maxnum: {
4114 Value *Op0 = EmitScalarExpr(E->getArg(0));
4115 Value *Op1 = EmitScalarExpr(E->getArg(1));
4116 Value *Result = Builder.CreateBinaryIntrinsic(llvm::Intrinsic::maxnum, Op0,
4117 Op1, nullptr, "elt.maxnum");
4118 return RValue::get(Result);
4119 }
4120
4121 case Builtin::BI__builtin_elementwise_minnum: {
4122 Value *Op0 = EmitScalarExpr(E->getArg(0));
4123 Value *Op1 = EmitScalarExpr(E->getArg(1));
4124 Value *Result = Builder.CreateBinaryIntrinsic(llvm::Intrinsic::minnum, Op0,
4125 Op1, nullptr, "elt.minnum");
4126 return RValue::get(Result);
4127 }
4128
4129 case Builtin::BI__builtin_elementwise_maximum: {
4130 Value *Op0 = EmitScalarExpr(E->getArg(0));
4131 Value *Op1 = EmitScalarExpr(E->getArg(1));
4132 Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::maximum, Op0, Op1,
4133 nullptr, "elt.maximum");
4134 return RValue::get(Result);
4135 }
4136
4137 case Builtin::BI__builtin_elementwise_minimum: {
4138 Value *Op0 = EmitScalarExpr(E->getArg(0));
4139 Value *Op1 = EmitScalarExpr(E->getArg(1));
4140 Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::minimum, Op0, Op1,
4141 nullptr, "elt.minimum");
4142 return RValue::get(Result);
4143 }
4144
4145 case Builtin::BI__builtin_elementwise_maximumnum: {
4146 Value *Op0 = EmitScalarExpr(E->getArg(0));
4147 Value *Op1 = EmitScalarExpr(E->getArg(1));
4148 Value *Result = Builder.CreateBinaryIntrinsic(
4149 Intrinsic::maximumnum, Op0, Op1, nullptr, "elt.maximumnum");
4150 return RValue::get(Result);
4151 }
4152
4153 case Builtin::BI__builtin_elementwise_minimumnum: {
4154 Value *Op0 = EmitScalarExpr(E->getArg(0));
4155 Value *Op1 = EmitScalarExpr(E->getArg(1));
4156 Value *Result = Builder.CreateBinaryIntrinsic(
4157 Intrinsic::minimumnum, Op0, Op1, nullptr, "elt.minimumnum");
4158 return RValue::get(Result);
4159 }
4160
4161 case Builtin::BI__builtin_reduce_max: {
4162 auto GetIntrinsicID = [this](QualType QT) {
4163 if (auto *VecTy = QT->getAs<VectorType>())
4164 QT = VecTy->getElementType();
4165 else if (QT->isSizelessVectorType())
4166 QT = QT->getSizelessVectorEltType(CGM.getContext());
4167
4168 if (QT->isSignedIntegerType())
4169 return Intrinsic::vector_reduce_smax;
4170 if (QT->isUnsignedIntegerType())
4171 return Intrinsic::vector_reduce_umax;
4172 assert(QT->isFloatingType() && "must have a float here");
4173 return Intrinsic::vector_reduce_fmax;
4174 };
4176 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
4177 }
4178
4179 case Builtin::BI__builtin_reduce_min: {
4180 auto GetIntrinsicID = [this](QualType QT) {
4181 if (auto *VecTy = QT->getAs<VectorType>())
4182 QT = VecTy->getElementType();
4183 else if (QT->isSizelessVectorType())
4184 QT = QT->getSizelessVectorEltType(CGM.getContext());
4185
4186 if (QT->isSignedIntegerType())
4187 return Intrinsic::vector_reduce_smin;
4188 if (QT->isUnsignedIntegerType())
4189 return Intrinsic::vector_reduce_umin;
4190 assert(QT->isFloatingType() && "must have a float here");
4191 return Intrinsic::vector_reduce_fmin;
4192 };
4193
4195 *this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
4196 }
4197
4198 case Builtin::BI__builtin_reduce_add:
4200 *this, E, Intrinsic::vector_reduce_add, "rdx.add"));
4201 case Builtin::BI__builtin_reduce_mul:
4203 *this, E, Intrinsic::vector_reduce_mul, "rdx.mul"));
4204 case Builtin::BI__builtin_reduce_xor:
4206 *this, E, Intrinsic::vector_reduce_xor, "rdx.xor"));
4207 case Builtin::BI__builtin_reduce_or:
4209 *this, E, Intrinsic::vector_reduce_or, "rdx.or"));
4210 case Builtin::BI__builtin_reduce_and:
4212 *this, E, Intrinsic::vector_reduce_and, "rdx.and"));
4213 case Builtin::BI__builtin_reduce_maximum:
4215 *this, E, Intrinsic::vector_reduce_fmaximum, "rdx.maximum"));
4216 case Builtin::BI__builtin_reduce_minimum:
4218 *this, E, Intrinsic::vector_reduce_fminimum, "rdx.minimum"));
4219 case Builtin::BI__builtin_reduce_assoc_fadd:
4220 case Builtin::BI__builtin_reduce_in_order_fadd: {
4221 llvm::Value *Vector = EmitScalarExpr(E->getArg(0));
4222 llvm::Type *ScalarTy = Vector->getType()->getScalarType();
4223 llvm::Value *StartValue = nullptr;
4224 if (E->getNumArgs() == 2)
4225 StartValue = Builder.CreateFPCast(EmitScalarExpr(E->getArg(1)), ScalarTy);
4226 llvm::Value *Args[] = {/*start_value=*/StartValue
4227 ? StartValue
4228 : llvm::ConstantFP::get(ScalarTy, -0.0F),
4229 /*vector=*/Vector};
4230 llvm::Function *F =
4231 CGM.getIntrinsic(Intrinsic::vector_reduce_fadd, Vector->getType());
4232 llvm::CallBase *Reduce = Builder.CreateCall(F, Args, "rdx.addf");
4233 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_reduce_assoc_fadd) {
4234 // `__builtin_reduce_assoc_fadd` is an associative reduction which
4235 // requires the reassoc FMF flag.
4236 llvm::FastMathFlags FMF;
4237 FMF.setAllowReassoc();
4238 cast<llvm::CallBase>(Reduce)->setFastMathFlags(FMF);
4239 }
4240 return RValue::get(Reduce);
4241 }
4242
4243 case Builtin::BI__builtin_matrix_transpose: {
4244 auto *MatrixTy = E->getArg(0)->getType()->castAs<ConstantMatrixType>();
4245 Value *MatValue = EmitScalarExpr(E->getArg(0));
4246 MatrixBuilder MB(Builder);
4247 Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
4248 MatrixTy->getNumColumns());
4249 return RValue::get(Result);
4250 }
4251
4252 case Builtin::BI__builtin_matrix_column_major_load: {
4253 MatrixBuilder MB(Builder);
4254 // Emit everything that isn't dependent on the first parameter type
4255 Value *Stride = EmitScalarExpr(E->getArg(3));
4256 const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
4257 auto *PtrTy = E->getArg(0)->getType()->getAs<PointerType>();
4258 assert(PtrTy && "arg0 must be of pointer type");
4259 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
4260
4263 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4264 0);
4265 Value *Result = MB.CreateColumnMajorLoad(
4266 Src.getElementType(), Src.emitRawPointer(*this),
4267 Align(Src.getAlignment().getQuantity()), Stride, IsVolatile,
4268 ResultTy->getNumRows(), ResultTy->getNumColumns(), "matrix");
4269 return RValue::get(Result);
4270 }
4271
4272 case Builtin::BI__builtin_matrix_column_major_store: {
4273 MatrixBuilder MB(Builder);
4274 Value *Matrix = EmitScalarExpr(E->getArg(0));
4276 Value *Stride = EmitScalarExpr(E->getArg(2));
4277
4278 const auto *MatrixTy = E->getArg(0)->getType()->getAs<ConstantMatrixType>();
4279 auto *PtrTy = E->getArg(1)->getType()->getAs<PointerType>();
4280 assert(PtrTy && "arg1 must be of pointer type");
4281 bool IsVolatile = PtrTy->getPointeeType().isVolatileQualified();
4282
4284 E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
4285 0);
4286 Value *Result = MB.CreateColumnMajorStore(
4287 Matrix, Dst.emitRawPointer(*this),
4288 Align(Dst.getAlignment().getQuantity()), Stride, IsVolatile,
4289 MatrixTy->getNumRows(), MatrixTy->getNumColumns());
4291 return RValue::get(Result);
4292 }
4293
4294 case Builtin::BI__builtin_masked_load:
4295 case Builtin::BI__builtin_masked_expand_load: {
4296 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4297 llvm::Value *Ptr = EmitScalarExpr(E->getArg(1));
4298
4299 llvm::Type *RetTy = CGM.getTypes().ConvertType(E->getType());
4300 llvm::Value *PassThru = llvm::PoisonValue::get(RetTy);
4301 if (E->getNumArgs() > 2)
4302 PassThru = EmitScalarExpr(E->getArg(2));
4303
4304 CharUnits Align = CGM.getNaturalTypeAlignment(
4305 E->getType()->getAs<VectorType>()->getElementType(), nullptr);
4306
4307 llvm::Value *Result;
4308 if (BuiltinID == Builtin::BI__builtin_masked_load) {
4309 Result = Builder.CreateMaskedLoad(RetTy, Ptr, Align.getAsAlign(), Mask,
4310 PassThru, "masked_load");
4311 } else {
4312 Function *F = CGM.getIntrinsic(Intrinsic::masked_expandload, {RetTy});
4313 Result =
4314 Builder.CreateCall(F, {Ptr, Mask, PassThru}, "masked_expand_load");
4315 }
4316 return RValue::get(Result);
4317 };
4318 case Builtin::BI__builtin_masked_gather: {
4319 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4320 llvm::Value *Idx = EmitScalarExpr(E->getArg(1));
4321 llvm::Value *Ptr = EmitScalarExpr(E->getArg(2));
4322
4323 llvm::Type *RetTy = CGM.getTypes().ConvertType(E->getType());
4324 CharUnits Align = CGM.getNaturalTypeAlignment(
4325 E->getType()->getAs<VectorType>()->getElementType(), nullptr);
4326
4327 llvm::Value *PassThru = llvm::PoisonValue::get(RetTy);
4328 if (E->getNumArgs() > 3)
4329 PassThru = EmitScalarExpr(E->getArg(3));
4330
4331 llvm::Type *ElemTy = CGM.getTypes().ConvertType(
4333 llvm::Value *PtrVec = Builder.CreateGEP(ElemTy, Ptr, Idx);
4334
4335 llvm::Value *Result = Builder.CreateMaskedGather(
4336 RetTy, PtrVec, Align.getAsAlign(), Mask, PassThru, "masked_gather");
4337 return RValue::get(Result);
4338 }
4339 case Builtin::BI__builtin_masked_store:
4340 case Builtin::BI__builtin_masked_compress_store: {
4341 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4342 llvm::Value *Val = EmitScalarExpr(E->getArg(1));
4343 llvm::Value *Ptr = EmitScalarExpr(E->getArg(2));
4344
4345 QualType ValTy = E->getArg(1)->getType();
4346 llvm::Type *ValLLTy = CGM.getTypes().ConvertType(ValTy);
4347
4348 CharUnits Align = CGM.getNaturalTypeAlignment(
4350 nullptr);
4351
4352 if (BuiltinID == Builtin::BI__builtin_masked_store) {
4353 Builder.CreateMaskedStore(Val, Ptr, Align.getAsAlign(), Mask);
4354 } else {
4355 llvm::Function *F =
4356 CGM.getIntrinsic(llvm::Intrinsic::masked_compressstore, {ValLLTy});
4357 Builder.CreateCall(F, {Val, Ptr, Mask});
4358 }
4359 return RValue::get(nullptr);
4360 }
4361 case Builtin::BI__builtin_masked_scatter: {
4362 llvm::Value *Mask = EmitScalarExpr(E->getArg(0));
4363 llvm::Value *Idx = EmitScalarExpr(E->getArg(1));
4364 llvm::Value *Val = EmitScalarExpr(E->getArg(2));
4365 llvm::Value *Ptr = EmitScalarExpr(E->getArg(3));
4366
4367 CharUnits Align = CGM.getNaturalTypeAlignment(
4369 nullptr);
4370
4371 llvm::Type *ElemTy = CGM.getTypes().ConvertType(
4372 E->getArg(1)->getType()->getAs<VectorType>()->getElementType());
4373 llvm::Value *PtrVec = Builder.CreateGEP(ElemTy, Ptr, Idx);
4374
4375 Builder.CreateMaskedScatter(Val, PtrVec, Align.getAsAlign(), Mask);
4376 return RValue();
4377 }
4378 case Builtin::BI__builtin_isinf_sign: {
4379 // isinf_sign(x) -> fabs(x) == infinity ? (signbit(x) ? -1 : 1) : 0
4380 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4381 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
4382 Value *Arg = EmitScalarExpr(E->getArg(0));
4383 Value *AbsArg = EmitFAbs(*this, Arg);
4384 Value *IsInf = Builder.CreateFCmpOEQ(
4385 AbsArg, ConstantFP::getInfinity(Arg->getType()), "isinf");
4386 Value *IsNeg = EmitSignBit(*this, Arg);
4387
4388 llvm::Type *IntTy = ConvertType(E->getType());
4389 Value *Zero = Constant::getNullValue(IntTy);
4390 Value *One = ConstantInt::get(IntTy, 1);
4391 Value *NegativeOne = ConstantInt::getAllOnesValue(IntTy);
4392 Value *SignResult = Builder.CreateSelect(IsNeg, NegativeOne, One);
4393 Value *Result = Builder.CreateSelect(IsInf, SignResult, Zero);
4394 return RValue::get(Result);
4395 }
4396
4397 case Builtin::BI__builtin_flt_rounds: {
4398 Function *F = CGM.getIntrinsic(Intrinsic::get_rounding);
4399
4400 llvm::Type *ResultType = ConvertType(E->getType());
4401 Value *Result = Builder.CreateCall(F);
4402 if (Result->getType() != ResultType)
4403 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
4404 "cast");
4405 return RValue::get(Result);
4406 }
4407
4408 case Builtin::BI__builtin_set_flt_rounds: {
4409 Function *F = CGM.getIntrinsic(Intrinsic::set_rounding);
4410
4411 Value *V = EmitScalarExpr(E->getArg(0));
4412 Builder.CreateCall(F, V);
4413 return RValue::get(nullptr);
4414 }
4415
4416 case Builtin::BI__builtin_fpclassify: {
4417 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
4418 // FIXME: for strictfp/IEEE-754 we need to not trap on SNaN here.
4419 Value *V = EmitScalarExpr(E->getArg(5));
4420 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
4421
4422 // Create Result
4423 BasicBlock *Begin = Builder.GetInsertBlock();
4424 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
4425 Builder.SetInsertPoint(End);
4426 PHINode *Result =
4427 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
4428 "fpclassify_result");
4429
4430 // if (V==0) return FP_ZERO
4431 Builder.SetInsertPoint(Begin);
4432 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
4433 "iszero");
4434 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
4435 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
4436 Builder.CreateCondBr(IsZero, End, NotZero);
4437 Result->addIncoming(ZeroLiteral, Begin);
4438
4439 // if (V != V) return FP_NAN
4440 Builder.SetInsertPoint(NotZero);
4441 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
4442 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
4443 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
4444 Builder.CreateCondBr(IsNan, End, NotNan);
4445 Result->addIncoming(NanLiteral, NotZero);
4446
4447 // if (fabs(V) == infinity) return FP_INFINITY
4448 Builder.SetInsertPoint(NotNan);
4449 Value *VAbs = EmitFAbs(*this, V);
4450 Value *IsInf =
4451 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
4452 "isinf");
4453 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
4454 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
4455 Builder.CreateCondBr(IsInf, End, NotInf);
4456 Result->addIncoming(InfLiteral, NotNan);
4457
4458 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
4459 Builder.SetInsertPoint(NotInf);
4460 APFloat Smallest = APFloat::getSmallestNormalized(
4461 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
4462 Value *IsNormal =
4463 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
4464 "isnormal");
4465 Value *NormalResult =
4466 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
4467 EmitScalarExpr(E->getArg(3)));
4468 Builder.CreateBr(End);
4469 Result->addIncoming(NormalResult, NotInf);
4470
4471 // return Result
4472 Builder.SetInsertPoint(End);
4473 return RValue::get(Result);
4474 }
4475
4476 // An alloca will always return a pointer to the alloca (stack) address
4477 // space. This address space need not be the same as the AST / Language
4478 // default (e.g. in C / C++ auto vars are in the generic address space). At
4479 // the AST level this is handled within CreateTempAlloca et al., but for the
4480 // builtin / dynamic alloca we have to handle it here. We use an explicit cast
4481 // instead of passing an AS to CreateAlloca so as to not inhibit optimisation.
4482 case Builtin::BIalloca:
4483 case Builtin::BI_alloca:
4484 case Builtin::BI__builtin_alloca_uninitialized:
4485 case Builtin::BI__builtin_alloca: {
4486 Value *Size = EmitScalarExpr(E->getArg(0));
4487 const TargetInfo &TI = getContext().getTargetInfo();
4488 // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
4489 const Align SuitableAlignmentInBytes =
4490 CGM.getContext()
4491 .toCharUnitsFromBits(TI.getSuitableAlign())
4492 .getAsAlign();
4493 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
4494 AI->setAlignment(SuitableAlignmentInBytes);
4495 if (BuiltinID != Builtin::BI__builtin_alloca_uninitialized)
4496 initializeAlloca(*this, AI, Size, SuitableAlignmentInBytes);
4497 if (AI->getAddressSpace() !=
4498 CGM.getContext().getTargetAddressSpace(
4500 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
4501 return RValue::get(performAddrSpaceCast(AI, Ty));
4502 }
4503 return RValue::get(AI);
4504 }
4505
4506 case Builtin::BI__builtin_alloca_with_align_uninitialized:
4507 case Builtin::BI__builtin_alloca_with_align: {
4508 Value *Size = EmitScalarExpr(E->getArg(0));
4509 Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
4510 auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
4511 unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
4512 const Align AlignmentInBytes =
4513 CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getAsAlign();
4514 AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
4515 AI->setAlignment(AlignmentInBytes);
4516 if (BuiltinID != Builtin::BI__builtin_alloca_with_align_uninitialized)
4517 initializeAlloca(*this, AI, Size, AlignmentInBytes);
4518 if (AI->getAddressSpace() !=
4519 CGM.getContext().getTargetAddressSpace(
4521 llvm::Type *Ty = CGM.getTypes().ConvertType(E->getType());
4522 return RValue::get(performAddrSpaceCast(AI, Ty));
4523 }
4524 return RValue::get(AI);
4525 }
4526
4527 case Builtin::BI__builtin_infer_alloc_token: {
4528 llvm::MDNode *MDN = buildAllocToken(E);
4529 llvm::Value *MDV = MetadataAsValue::get(getLLVMContext(), MDN);
4530 llvm::Function *F =
4531 CGM.getIntrinsic(llvm::Intrinsic::alloc_token_id, {IntPtrTy});
4532 llvm::CallBase *TokenID = Builder.CreateCall(F, MDV);
4533 return RValue::get(TokenID);
4534 }
4535
4536 case Builtin::BIbzero:
4537 case Builtin::BI__builtin_bzero: {
4539 Value *SizeVal = EmitScalarExpr(E->getArg(1));
4540 EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
4541 E->getArg(0)->getExprLoc(), FD, 0);
4542 auto *I = Builder.CreateMemSet(Dest, Builder.getInt8(0), SizeVal, false);
4543 addInstToNewSourceAtom(I, nullptr);
4544 return RValue::get(nullptr);
4545 }
4546
4547 case Builtin::BIbcopy:
4548 case Builtin::BI__builtin_bcopy: {
4551 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4553 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4554 0);
4556 E->getArg(1)->getType(), E->getArg(1)->getExprLoc(), FD,
4557 0);
4558 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4559 addInstToNewSourceAtom(I, nullptr);
4560 return RValue::get(nullptr);
4561 }
4562
4563 case Builtin::BImemcpy:
4564 case Builtin::BI__builtin_memcpy:
4565 case Builtin::BImempcpy:
4566 case Builtin::BI__builtin_mempcpy: {
4569 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4570 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4571 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4572 auto *I = Builder.CreateMemCpy(Dest, Src, SizeVal, false);
4573 addInstToNewSourceAtom(I, nullptr);
4574 if (BuiltinID == Builtin::BImempcpy ||
4575 BuiltinID == Builtin::BI__builtin_mempcpy)
4576 return RValue::get(Builder.CreateInBoundsGEP(
4577 Dest.getElementType(), Dest.emitRawPointer(*this), SizeVal));
4578 else
4579 return RValue::get(Dest, *this);
4580 }
4581
4582 case Builtin::BI__builtin_memcpy_inline: {
4585 uint64_t Size =
4586 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
4587 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4588 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4589 auto *I = Builder.CreateMemCpyInline(Dest, Src, Size);
4590 addInstToNewSourceAtom(I, nullptr);
4591 return RValue::get(nullptr);
4592 }
4593
4594 case Builtin::BI__builtin_char_memchr:
4595 BuiltinID = Builtin::BI__builtin_memchr;
4596 break;
4597
4598 case Builtin::BI__builtin___memcpy_chk: {
4599 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
4600 Expr::EvalResult SizeResult, DstSizeResult;
4601 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4602 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4603 break;
4604 llvm::APSInt Size = SizeResult.Val.getInt();
4605 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4606 if (Size.ugt(DstSize))
4607 break;
4610 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4611 auto *I = Builder.CreateMemCpy(Dest, Src, SizeVal, false);
4612 addInstToNewSourceAtom(I, nullptr);
4613 return RValue::get(Dest, *this);
4614 }
4615
4616 case Builtin::BI__builtin_objc_memmove_collectable: {
4617 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
4618 Address SrcAddr = EmitPointerWithAlignment(E->getArg(1));
4619 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4620 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
4621 DestAddr, SrcAddr, SizeVal);
4622 return RValue::get(DestAddr, *this);
4623 }
4624
4625 case Builtin::BI__builtin___memmove_chk: {
4626 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
4627 Expr::EvalResult SizeResult, DstSizeResult;
4628 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4629 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4630 break;
4631 llvm::APSInt Size = SizeResult.Val.getInt();
4632 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4633 if (Size.ugt(DstSize))
4634 break;
4637 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4638 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4639 addInstToNewSourceAtom(I, nullptr);
4640 return RValue::get(Dest, *this);
4641 }
4642
4643 case Builtin::BI__builtin_trivially_relocate:
4644 case Builtin::BImemmove:
4645 case Builtin::BI__builtin_memmove: {
4648 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4649 if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_trivially_relocate)
4650 SizeVal = Builder.CreateMul(
4651 SizeVal,
4652 ConstantInt::get(
4653 SizeVal->getType(),
4654 getContext()
4655 .getTypeSizeInChars(E->getArg(0)->getType()->getPointeeType())
4656 .getQuantity()));
4657 EmitArgCheck(TCK_Store, Dest, E->getArg(0), 0);
4658 EmitArgCheck(TCK_Load, Src, E->getArg(1), 1);
4659 auto *I = Builder.CreateMemMove(Dest, Src, SizeVal, false);
4660 addInstToNewSourceAtom(I, nullptr);
4661 return RValue::get(Dest, *this);
4662 }
4663 case Builtin::BImemset:
4664 case Builtin::BI__builtin_memset: {
4666 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
4667 Builder.getInt8Ty());
4668 Value *SizeVal = EmitScalarExpr(E->getArg(2));
4669 EmitNonNullArgCheck(Dest, E->getArg(0)->getType(),
4670 E->getArg(0)->getExprLoc(), FD, 0);
4671 auto *I = Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
4672 addInstToNewSourceAtom(I, ByteVal);
4673 return RValue::get(Dest, *this);
4674 }
4675 case Builtin::BI__builtin_memset_inline: {
4677 Value *ByteVal =
4678 Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty());
4679 uint64_t Size =
4680 E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
4682 E->getArg(0)->getType(), E->getArg(0)->getExprLoc(), FD,
4683 0);
4684 auto *I = Builder.CreateMemSetInline(Dest, ByteVal, Size);
4685 addInstToNewSourceAtom(I, nullptr);
4686 return RValue::get(nullptr);
4687 }
4688 case Builtin::BI__builtin___memset_chk: {
4689 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
4690 Expr::EvalResult SizeResult, DstSizeResult;
4691 if (!E->getArg(2)->EvaluateAsInt(SizeResult, CGM.getContext()) ||
4692 !E->getArg(3)->EvaluateAsInt(DstSizeResult, CGM.getContext()))
4693 break;
4694 llvm::APSInt Size = SizeResult.Val.getInt();
4695 llvm::APSInt DstSize = DstSizeResult.Val.getInt();
4696 if (Size.ugt(DstSize))
4697 break;
4699 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
4700 Builder.getInt8Ty());
4701 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
4702 auto *I = Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
4703 addInstToNewSourceAtom(I, nullptr);
4704 return RValue::get(Dest, *this);
4705 }
4706 case Builtin::BI__builtin_wmemchr: {
4707 // The MSVC runtime library does not provide a definition of wmemchr, so we
4708 // need an inline implementation.
4709 if (!getTarget().getTriple().isOSMSVCRT())
4710 break;
4711
4712 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
4713 Value *Str = EmitScalarExpr(E->getArg(0));
4714 Value *Chr = EmitScalarExpr(E->getArg(1));
4715 Value *Size = EmitScalarExpr(E->getArg(2));
4716
4717 BasicBlock *Entry = Builder.GetInsertBlock();
4718 BasicBlock *CmpEq = createBasicBlock("wmemchr.eq");
4719 BasicBlock *Next = createBasicBlock("wmemchr.next");
4720 BasicBlock *Exit = createBasicBlock("wmemchr.exit");
4721 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
4722 Builder.CreateCondBr(SizeEq0, Exit, CmpEq);
4723
4724 EmitBlock(CmpEq);
4725 PHINode *StrPhi = Builder.CreatePHI(Str->getType(), 2);
4726 StrPhi->addIncoming(Str, Entry);
4727 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
4728 SizePhi->addIncoming(Size, Entry);
4729 CharUnits WCharAlign =
4731 Value *StrCh = Builder.CreateAlignedLoad(WCharTy, StrPhi, WCharAlign);
4732 Value *FoundChr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 0);
4733 Value *StrEqChr = Builder.CreateICmpEQ(StrCh, Chr);
4734 Builder.CreateCondBr(StrEqChr, Exit, Next);
4735
4736 EmitBlock(Next);
4737 Value *NextStr = Builder.CreateConstInBoundsGEP1_32(WCharTy, StrPhi, 1);
4738 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
4739 Value *NextSizeEq0 =
4740 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
4741 Builder.CreateCondBr(NextSizeEq0, Exit, CmpEq);
4742 StrPhi->addIncoming(NextStr, Next);
4743 SizePhi->addIncoming(NextSize, Next);
4744
4745 EmitBlock(Exit);
4746 PHINode *Ret = Builder.CreatePHI(Str->getType(), 3);
4747 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Entry);
4748 Ret->addIncoming(llvm::Constant::getNullValue(Str->getType()), Next);
4749 Ret->addIncoming(FoundChr, CmpEq);
4750 return RValue::get(Ret);
4751 }
4752 case Builtin::BI__builtin_wmemcmp: {
4753 // The MSVC runtime library does not provide a definition of wmemcmp, so we
4754 // need an inline implementation.
4755 if (!getTarget().getTriple().isOSMSVCRT())
4756 break;
4757
4758 llvm::Type *WCharTy = ConvertType(getContext().WCharTy);
4759
4760 Value *Dst = EmitScalarExpr(E->getArg(0));
4761 Value *Src = EmitScalarExpr(E->getArg(1));
4762 Value *Size = EmitScalarExpr(E->getArg(2));
4763
4764 BasicBlock *Entry = Builder.GetInsertBlock();
4765 BasicBlock *CmpGT = createBasicBlock("wmemcmp.gt");
4766 BasicBlock *CmpLT = createBasicBlock("wmemcmp.lt");
4767 BasicBlock *Next = createBasicBlock("wmemcmp.next");
4768 BasicBlock *Exit = createBasicBlock("wmemcmp.exit");
4769 Value *SizeEq0 = Builder.CreateICmpEQ(Size, ConstantInt::get(SizeTy, 0));
4770 Builder.CreateCondBr(SizeEq0, Exit, CmpGT);
4771
4772 EmitBlock(CmpGT);
4773 PHINode *DstPhi = Builder.CreatePHI(Dst->getType(), 2);
4774 DstPhi->addIncoming(Dst, Entry);
4775 PHINode *SrcPhi = Builder.CreatePHI(Src->getType(), 2);
4776 SrcPhi->addIncoming(Src, Entry);
4777 PHINode *SizePhi = Builder.CreatePHI(SizeTy, 2);
4778 SizePhi->addIncoming(Size, Entry);
4779 CharUnits WCharAlign =
4781 Value *DstCh = Builder.CreateAlignedLoad(WCharTy, DstPhi, WCharAlign);
4782 Value *SrcCh = Builder.CreateAlignedLoad(WCharTy, SrcPhi, WCharAlign);
4783 Value *DstGtSrc = Builder.CreateICmpUGT(DstCh, SrcCh);
4784 Builder.CreateCondBr(DstGtSrc, Exit, CmpLT);
4785
4786 EmitBlock(CmpLT);
4787 Value *DstLtSrc = Builder.CreateICmpULT(DstCh, SrcCh);
4788 Builder.CreateCondBr(DstLtSrc, Exit, Next);
4789
4790 EmitBlock(Next);
4791 Value *NextDst = Builder.CreateConstInBoundsGEP1_32(WCharTy, DstPhi, 1);
4792 Value *NextSrc = Builder.CreateConstInBoundsGEP1_32(WCharTy, SrcPhi, 1);
4793 Value *NextSize = Builder.CreateSub(SizePhi, ConstantInt::get(SizeTy, 1));
4794 Value *NextSizeEq0 =
4795 Builder.CreateICmpEQ(NextSize, ConstantInt::get(SizeTy, 0));
4796 Builder.CreateCondBr(NextSizeEq0, Exit, CmpGT);
4797 DstPhi->addIncoming(NextDst, Next);
4798 SrcPhi->addIncoming(NextSrc, Next);
4799 SizePhi->addIncoming(NextSize, Next);
4800
4801 EmitBlock(Exit);
4802 PHINode *Ret = Builder.CreatePHI(IntTy, 4);
4803 Ret->addIncoming(ConstantInt::get(IntTy, 0), Entry);
4804 Ret->addIncoming(ConstantInt::get(IntTy, 1), CmpGT);
4805 Ret->addIncoming(ConstantInt::getAllOnesValue(IntTy), CmpLT);
4806 Ret->addIncoming(ConstantInt::get(IntTy, 0), Next);
4807 return RValue::get(Ret);
4808 }
4809 case Builtin::BI__builtin_dwarf_cfa: {
4810 // The offset in bytes from the first argument to the CFA.
4811 //
4812 // Why on earth is this in the frontend? Is there any reason at
4813 // all that the backend can't reasonably determine this while
4814 // lowering llvm.eh.dwarf.cfa()?
4815 //
4816 // TODO: If there's a satisfactory reason, add a target hook for
4817 // this instead of hard-coding 0, which is correct for most targets.
4818 int32_t Offset = 0;
4819
4820 Function *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
4821 return RValue::get(Builder.CreateCall(F,
4822 llvm::ConstantInt::get(Int32Ty, Offset)));
4823 }
4824 case Builtin::BI__builtin_return_address: {
4825 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
4826 getContext().UnsignedIntTy);
4827 Function *F =
4828 CGM.getIntrinsic(Intrinsic::returnaddress, {CGM.ProgramPtrTy});
4829 return RValue::get(Builder.CreateCall(F, Depth));
4830 }
4831 case Builtin::BI_ReturnAddress: {
4832 Function *F =
4833 CGM.getIntrinsic(Intrinsic::returnaddress, {CGM.ProgramPtrTy});
4834 return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
4835 }
4836 case Builtin::BI__builtin_frame_address: {
4837 Value *Depth = ConstantEmitter(*this).emitAbstract(E->getArg(0),
4838 getContext().UnsignedIntTy);
4839 Function *F = CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy);
4840 return RValue::get(Builder.CreateCall(F, Depth));
4841 }
4842 case Builtin::BI__builtin_stack_address: {
4843 return RValue::get(Builder.CreateCall(
4844 CGM.getIntrinsic(Intrinsic::stackaddress, AllocaInt8PtrTy)));
4845 }
4846 case Builtin::BI__builtin_extract_return_addr: {
4849 return RValue::get(Result);
4850 }
4851 case Builtin::BI__builtin_frob_return_addr: {
4854 return RValue::get(Result);
4855 }
4856 case Builtin::BI__builtin_dwarf_sp_column: {
4857 llvm::IntegerType *Ty
4860 if (Column == -1) {
4861 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
4862 return RValue::get(llvm::UndefValue::get(Ty));
4863 }
4864 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
4865 }
4866 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
4868 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
4869 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
4870 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
4871 }
4872 case Builtin::BI__builtin_eh_return: {
4873 Value *Int = EmitScalarExpr(E->getArg(0));
4874 Value *Ptr = EmitScalarExpr(E->getArg(1));
4875
4876 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
4877 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
4878 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
4879 Function *F =
4880 CGM.getIntrinsic(IntTy->getBitWidth() == 32 ? Intrinsic::eh_return_i32
4881 : Intrinsic::eh_return_i64);
4882 Builder.CreateCall(F, {Int, Ptr});
4883 Builder.CreateUnreachable();
4884
4885 // We do need to preserve an insertion point.
4886 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
4887
4888 return RValue::get(nullptr);
4889 }
4890 case Builtin::BI__builtin_unwind_init: {
4891 Function *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
4892 Builder.CreateCall(F);
4893 return RValue::get(nullptr);
4894 }
4895 case Builtin::BI__builtin_extend_pointer: {
4896 // Extends a pointer to the size of an _Unwind_Word, which is
4897 // uint64_t on all platforms. Generally this gets poked into a
4898 // register and eventually used as an address, so if the
4899 // addressing registers are wider than pointers and the platform
4900 // doesn't implicitly ignore high-order bits when doing
4901 // addressing, we need to make sure we zext / sext based on
4902 // the platform's expectations.
4903 //
4904 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
4905
4906 // Cast the pointer to intptr_t.
4907 Value *Ptr = EmitScalarExpr(E->getArg(0));
4908 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
4909
4910 // If that's 64 bits, we're done.
4911 if (IntPtrTy->getBitWidth() == 64)
4912 return RValue::get(Result);
4913
4914 // Otherwise, ask the codegen data what to do.
4915 if (getTargetHooks().extendPointerWithSExt())
4916 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
4917 else
4918 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
4919 }
4920 case Builtin::BI__builtin_setjmp: {
4921 // Buffer is a void**.
4923
4924 if (getTarget().getTriple().getArch() == llvm::Triple::systemz) {
4925 // On this target, the back end fills in the context buffer completely.
4926 // It doesn't really matter if the frontend stores to the buffer before
4927 // calling setjmp, the back-end is going to overwrite them anyway.
4928 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
4929 return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
4930 }
4931
4932 // Store the frame pointer to the setjmp buffer.
4933 Value *FrameAddr = Builder.CreateCall(
4934 CGM.getIntrinsic(Intrinsic::frameaddress, AllocaInt8PtrTy),
4935 ConstantInt::get(Int32Ty, 0));
4936 Builder.CreateStore(FrameAddr, Buf);
4937
4938 // Store the stack pointer to the setjmp buffer.
4939 Value *StackAddr = Builder.CreateStackSave();
4940 assert(Buf.emitRawPointer(*this)->getType() == StackAddr->getType());
4941
4942 Address StackSaveSlot = Builder.CreateConstInBoundsGEP(Buf, 2);
4943 Builder.CreateStore(StackAddr, StackSaveSlot);
4944
4945 // Call LLVM's EH setjmp, which is lightweight.
4946 Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
4947 return RValue::get(Builder.CreateCall(F, Buf.emitRawPointer(*this)));
4948 }
4949 case Builtin::BI__builtin_longjmp: {
4950 Value *Buf = EmitScalarExpr(E->getArg(0));
4951
4952 // Call LLVM's EH longjmp, which is lightweight.
4953 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
4954
4955 // longjmp doesn't return; mark this as unreachable.
4956 Builder.CreateUnreachable();
4957
4958 // We do need to preserve an insertion point.
4959 EmitBlock(createBasicBlock("longjmp.cont"));
4960
4961 return RValue::get(nullptr);
4962 }
4963 case Builtin::BI__builtin_launder: {
4964 const Expr *Arg = E->getArg(0);
4965 QualType ArgTy = Arg->getType()->getPointeeType();
4966 Value *Ptr = EmitScalarExpr(Arg);
4967 if (TypeRequiresBuiltinLaunder(CGM, ArgTy))
4968 Ptr = Builder.CreateLaunderInvariantGroup(Ptr);
4969
4970 return RValue::get(Ptr);
4971 }
4972 case Builtin::BI__sync_fetch_and_add:
4973 case Builtin::BI__sync_fetch_and_sub:
4974 case Builtin::BI__sync_fetch_and_or:
4975 case Builtin::BI__sync_fetch_and_and:
4976 case Builtin::BI__sync_fetch_and_xor:
4977 case Builtin::BI__sync_fetch_and_nand:
4978 case Builtin::BI__sync_add_and_fetch:
4979 case Builtin::BI__sync_sub_and_fetch:
4980 case Builtin::BI__sync_and_and_fetch:
4981 case Builtin::BI__sync_or_and_fetch:
4982 case Builtin::BI__sync_xor_and_fetch:
4983 case Builtin::BI__sync_nand_and_fetch:
4984 case Builtin::BI__sync_val_compare_and_swap:
4985 case Builtin::BI__sync_bool_compare_and_swap:
4986 case Builtin::BI__sync_lock_test_and_set:
4987 case Builtin::BI__sync_lock_release:
4988 case Builtin::BI__sync_swap:
4989 llvm_unreachable("Shouldn't make it through sema");
4990 case Builtin::BI__sync_fetch_and_add_1:
4991 case Builtin::BI__sync_fetch_and_add_2:
4992 case Builtin::BI__sync_fetch_and_add_4:
4993 case Builtin::BI__sync_fetch_and_add_8:
4994 case Builtin::BI__sync_fetch_and_add_16:
4995 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
4996 case Builtin::BI__sync_fetch_and_sub_1:
4997 case Builtin::BI__sync_fetch_and_sub_2:
4998 case Builtin::BI__sync_fetch_and_sub_4:
4999 case Builtin::BI__sync_fetch_and_sub_8:
5000 case Builtin::BI__sync_fetch_and_sub_16:
5001 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
5002 case Builtin::BI__sync_fetch_and_or_1:
5003 case Builtin::BI__sync_fetch_and_or_2:
5004 case Builtin::BI__sync_fetch_and_or_4:
5005 case Builtin::BI__sync_fetch_and_or_8:
5006 case Builtin::BI__sync_fetch_and_or_16:
5007 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
5008 case Builtin::BI__sync_fetch_and_and_1:
5009 case Builtin::BI__sync_fetch_and_and_2:
5010 case Builtin::BI__sync_fetch_and_and_4:
5011 case Builtin::BI__sync_fetch_and_and_8:
5012 case Builtin::BI__sync_fetch_and_and_16:
5013 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
5014 case Builtin::BI__sync_fetch_and_xor_1:
5015 case Builtin::BI__sync_fetch_and_xor_2:
5016 case Builtin::BI__sync_fetch_and_xor_4:
5017 case Builtin::BI__sync_fetch_and_xor_8:
5018 case Builtin::BI__sync_fetch_and_xor_16:
5019 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
5020 case Builtin::BI__sync_fetch_and_nand_1:
5021 case Builtin::BI__sync_fetch_and_nand_2:
5022 case Builtin::BI__sync_fetch_and_nand_4:
5023 case Builtin::BI__sync_fetch_and_nand_8:
5024 case Builtin::BI__sync_fetch_and_nand_16:
5025 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
5026
5027 // Clang extensions: not overloaded yet.
5028 case Builtin::BI__sync_fetch_and_min:
5029 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
5030 case Builtin::BI__sync_fetch_and_max:
5031 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
5032 case Builtin::BI__sync_fetch_and_umin:
5033 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
5034 case Builtin::BI__sync_fetch_and_umax:
5035 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
5036
5037 case Builtin::BI__sync_add_and_fetch_1:
5038 case Builtin::BI__sync_add_and_fetch_2:
5039 case Builtin::BI__sync_add_and_fetch_4:
5040 case Builtin::BI__sync_add_and_fetch_8:
5041 case Builtin::BI__sync_add_and_fetch_16:
5042 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
5043 llvm::Instruction::Add);
5044 case Builtin::BI__sync_sub_and_fetch_1:
5045 case Builtin::BI__sync_sub_and_fetch_2:
5046 case Builtin::BI__sync_sub_and_fetch_4:
5047 case Builtin::BI__sync_sub_and_fetch_8:
5048 case Builtin::BI__sync_sub_and_fetch_16:
5049 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
5050 llvm::Instruction::Sub);
5051 case Builtin::BI__sync_and_and_fetch_1:
5052 case Builtin::BI__sync_and_and_fetch_2:
5053 case Builtin::BI__sync_and_and_fetch_4:
5054 case Builtin::BI__sync_and_and_fetch_8:
5055 case Builtin::BI__sync_and_and_fetch_16:
5056 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
5057 llvm::Instruction::And);
5058 case Builtin::BI__sync_or_and_fetch_1:
5059 case Builtin::BI__sync_or_and_fetch_2:
5060 case Builtin::BI__sync_or_and_fetch_4:
5061 case Builtin::BI__sync_or_and_fetch_8:
5062 case Builtin::BI__sync_or_and_fetch_16:
5063 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
5064 llvm::Instruction::Or);
5065 case Builtin::BI__sync_xor_and_fetch_1:
5066 case Builtin::BI__sync_xor_and_fetch_2:
5067 case Builtin::BI__sync_xor_and_fetch_4:
5068 case Builtin::BI__sync_xor_and_fetch_8:
5069 case Builtin::BI__sync_xor_and_fetch_16:
5070 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
5071 llvm::Instruction::Xor);
5072 case Builtin::BI__sync_nand_and_fetch_1:
5073 case Builtin::BI__sync_nand_and_fetch_2:
5074 case Builtin::BI__sync_nand_and_fetch_4:
5075 case Builtin::BI__sync_nand_and_fetch_8:
5076 case Builtin::BI__sync_nand_and_fetch_16:
5077 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
5078 llvm::Instruction::And, true);
5079
5080 case Builtin::BI__sync_val_compare_and_swap_1:
5081 case Builtin::BI__sync_val_compare_and_swap_2:
5082 case Builtin::BI__sync_val_compare_and_swap_4:
5083 case Builtin::BI__sync_val_compare_and_swap_8:
5084 case Builtin::BI__sync_val_compare_and_swap_16:
5086 *this, E, false, AtomicOrdering::SequentiallyConsistent,
5087 AtomicOrdering::SequentiallyConsistent));
5088
5089 case Builtin::BI__sync_bool_compare_and_swap_1:
5090 case Builtin::BI__sync_bool_compare_and_swap_2:
5091 case Builtin::BI__sync_bool_compare_and_swap_4:
5092 case Builtin::BI__sync_bool_compare_and_swap_8:
5093 case Builtin::BI__sync_bool_compare_and_swap_16:
5095 *this, E, true, AtomicOrdering::SequentiallyConsistent,
5096 AtomicOrdering::SequentiallyConsistent));
5097
5098 case Builtin::BI__sync_swap_1:
5099 case Builtin::BI__sync_swap_2:
5100 case Builtin::BI__sync_swap_4:
5101 case Builtin::BI__sync_swap_8:
5102 case Builtin::BI__sync_swap_16:
5103 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
5104
5105 case Builtin::BI__sync_lock_test_and_set_1:
5106 case Builtin::BI__sync_lock_test_and_set_2:
5107 case Builtin::BI__sync_lock_test_and_set_4:
5108 case Builtin::BI__sync_lock_test_and_set_8:
5109 case Builtin::BI__sync_lock_test_and_set_16:
5110 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
5111
5112 case Builtin::BI__sync_lock_release_1:
5113 case Builtin::BI__sync_lock_release_2:
5114 case Builtin::BI__sync_lock_release_4:
5115 case Builtin::BI__sync_lock_release_8:
5116 case Builtin::BI__sync_lock_release_16: {
5117 Address Ptr = CheckAtomicAlignment(*this, E);
5118 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
5119
5120 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
5121 getContext().getTypeSize(ElTy));
5122 llvm::StoreInst *Store =
5123 Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
5124 Store->setAtomic(llvm::AtomicOrdering::Release);
5125 return RValue::get(nullptr);
5126 }
5127
5128 case Builtin::BI__sync_synchronize: {
5129 // We assume this is supposed to correspond to a C++0x-style
5130 // sequentially-consistent fence (i.e. this is only usable for
5131 // synchronization, not device I/O or anything like that). This intrinsic
5132 // is really badly designed in the sense that in theory, there isn't
5133 // any way to safely use it... but in practice, it mostly works
5134 // to use it with non-atomic loads and stores to get acquire/release
5135 // semantics.
5136 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent);
5137 return RValue::get(nullptr);
5138 }
5139
5140 case Builtin::BI__builtin_nontemporal_load:
5141 return RValue::get(EmitNontemporalLoad(*this, E));
5142 case Builtin::BI__builtin_nontemporal_store:
5143 return RValue::get(EmitNontemporalStore(*this, E));
5144 case Builtin::BI__c11_atomic_is_lock_free:
5145 case Builtin::BI__atomic_is_lock_free: {
5146 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
5147 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
5148 // _Atomic(T) is always properly-aligned.
5149 const char *LibCallName = "__atomic_is_lock_free";
5150 CallArgList Args;
5151 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
5152 getContext().getSizeType());
5153 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
5154 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
5156 else
5157 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
5159 const CGFunctionInfo &FuncInfo =
5160 CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
5161 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
5162 llvm::FunctionCallee Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
5163 return EmitCall(FuncInfo, CGCallee::forDirect(Func),
5164 ReturnValueSlot(), Args);
5165 }
5166
5167 case Builtin::BI__atomic_thread_fence:
5168 case Builtin::BI__atomic_signal_fence:
5169 case Builtin::BI__c11_atomic_thread_fence:
5170 case Builtin::BI__c11_atomic_signal_fence: {
5171 llvm::SyncScope::ID SSID;
5172 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
5173 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
5174 SSID = llvm::SyncScope::SingleThread;
5175 else
5176 SSID = llvm::SyncScope::System;
5177 Value *Order = EmitScalarExpr(E->getArg(0));
5178 if (isa<llvm::ConstantInt>(Order)) {
5179 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
5180 switch (ord) {
5181 case 0: // memory_order_relaxed
5182 default: // invalid order
5183 break;
5184 case 1: // memory_order_consume
5185 case 2: // memory_order_acquire
5186 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
5187 break;
5188 case 3: // memory_order_release
5189 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
5190 break;
5191 case 4: // memory_order_acq_rel
5192 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
5193 break;
5194 case 5: // memory_order_seq_cst
5195 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
5196 break;
5197 }
5198 return RValue::get(nullptr);
5199 }
5200
5201 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
5202 AcquireBB = createBasicBlock("acquire", CurFn);
5203 ReleaseBB = createBasicBlock("release", CurFn);
5204 AcqRelBB = createBasicBlock("acqrel", CurFn);
5205 SeqCstBB = createBasicBlock("seqcst", CurFn);
5206 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
5207
5208 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
5209 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
5210
5211 Builder.SetInsertPoint(AcquireBB);
5212 Builder.CreateFence(llvm::AtomicOrdering::Acquire, SSID);
5213 Builder.CreateBr(ContBB);
5214 SI->addCase(Builder.getInt32(1), AcquireBB);
5215 SI->addCase(Builder.getInt32(2), AcquireBB);
5216
5217 Builder.SetInsertPoint(ReleaseBB);
5218 Builder.CreateFence(llvm::AtomicOrdering::Release, SSID);
5219 Builder.CreateBr(ContBB);
5220 SI->addCase(Builder.getInt32(3), ReleaseBB);
5221
5222 Builder.SetInsertPoint(AcqRelBB);
5223 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease, SSID);
5224 Builder.CreateBr(ContBB);
5225 SI->addCase(Builder.getInt32(4), AcqRelBB);
5226
5227 Builder.SetInsertPoint(SeqCstBB);
5228 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent, SSID);
5229 Builder.CreateBr(ContBB);
5230 SI->addCase(Builder.getInt32(5), SeqCstBB);
5231
5232 Builder.SetInsertPoint(ContBB);
5233 return RValue::get(nullptr);
5234 }
5235 case Builtin::BI__scoped_atomic_thread_fence: {
5237
5238 Value *Order = EmitScalarExpr(E->getArg(0));
5239 Value *Scope = EmitScalarExpr(E->getArg(1));
5240 auto Ord = dyn_cast<llvm::ConstantInt>(Order);
5241 auto Scp = dyn_cast<llvm::ConstantInt>(Scope);
5242 if (Ord && Scp) {
5243 SyncScope SS = ScopeModel->isValid(Scp->getZExtValue())
5244 ? ScopeModel->map(Scp->getZExtValue())
5245 : ScopeModel->map(ScopeModel->getFallBackValue());
5246 switch (Ord->getZExtValue()) {
5247 case 0: // memory_order_relaxed
5248 default: // invalid order
5249 break;
5250 case 1: // memory_order_consume
5251 case 2: // memory_order_acquire
5252 Builder.CreateFence(
5253 llvm::AtomicOrdering::Acquire,
5254 getTargetHooks().getLLVMSyncScopeID(getLangOpts(), SS,
5255 llvm::AtomicOrdering::Acquire,
5256 getLLVMContext()));
5257 break;
5258 case 3: // memory_order_release
5259 Builder.CreateFence(
5260 llvm::AtomicOrdering::Release,
5261 getTargetHooks().getLLVMSyncScopeID(getLangOpts(), SS,
5262 llvm::AtomicOrdering::Release,
5263 getLLVMContext()));
5264 break;
5265 case 4: // memory_order_acq_rel
5266 Builder.CreateFence(llvm::AtomicOrdering::AcquireRelease,
5267 getTargetHooks().getLLVMSyncScopeID(
5268 getLangOpts(), SS,
5269 llvm::AtomicOrdering::AcquireRelease,
5270 getLLVMContext()));
5271 break;
5272 case 5: // memory_order_seq_cst
5273 Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
5274 getTargetHooks().getLLVMSyncScopeID(
5275 getLangOpts(), SS,
5276 llvm::AtomicOrdering::SequentiallyConsistent,
5277 getLLVMContext()));
5278 break;
5279 }
5280 return RValue::get(nullptr);
5281 }
5282
5283 llvm::BasicBlock *ContBB = createBasicBlock("atomic.scope.continue", CurFn);
5284
5286 OrderBBs;
5287 if (Ord) {
5288 switch (Ord->getZExtValue()) {
5289 case 0: // memory_order_relaxed
5290 default: // invalid order
5291 ContBB->eraseFromParent();
5292 return RValue::get(nullptr);
5293 case 1: // memory_order_consume
5294 case 2: // memory_order_acquire
5295 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5296 llvm::AtomicOrdering::Acquire);
5297 break;
5298 case 3: // memory_order_release
5299 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5300 llvm::AtomicOrdering::Release);
5301 break;
5302 case 4: // memory_order_acq_rel
5303 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5304 llvm::AtomicOrdering::AcquireRelease);
5305 break;
5306 case 5: // memory_order_seq_cst
5307 OrderBBs.emplace_back(Builder.GetInsertBlock(),
5308 llvm::AtomicOrdering::SequentiallyConsistent);
5309 break;
5310 }
5311 } else {
5312 llvm::BasicBlock *AcquireBB = createBasicBlock("acquire", CurFn);
5313 llvm::BasicBlock *ReleaseBB = createBasicBlock("release", CurFn);
5314 llvm::BasicBlock *AcqRelBB = createBasicBlock("acqrel", CurFn);
5315 llvm::BasicBlock *SeqCstBB = createBasicBlock("seqcst", CurFn);
5316
5317 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
5318 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
5319 SI->addCase(Builder.getInt32(1), AcquireBB);
5320 SI->addCase(Builder.getInt32(2), AcquireBB);
5321 SI->addCase(Builder.getInt32(3), ReleaseBB);
5322 SI->addCase(Builder.getInt32(4), AcqRelBB);
5323 SI->addCase(Builder.getInt32(5), SeqCstBB);
5324
5325 OrderBBs.emplace_back(AcquireBB, llvm::AtomicOrdering::Acquire);
5326 OrderBBs.emplace_back(ReleaseBB, llvm::AtomicOrdering::Release);
5327 OrderBBs.emplace_back(AcqRelBB, llvm::AtomicOrdering::AcquireRelease);
5328 OrderBBs.emplace_back(SeqCstBB,
5329 llvm::AtomicOrdering::SequentiallyConsistent);
5330 }
5331
5332 for (auto &[OrderBB, Ordering] : OrderBBs) {
5333 Builder.SetInsertPoint(OrderBB);
5334 if (Scp) {
5335 SyncScope SS = ScopeModel->isValid(Scp->getZExtValue())
5336 ? ScopeModel->map(Scp->getZExtValue())
5337 : ScopeModel->map(ScopeModel->getFallBackValue());
5338 Builder.CreateFence(Ordering,
5339 getTargetHooks().getLLVMSyncScopeID(
5340 getLangOpts(), SS, Ordering, getLLVMContext()));
5341 Builder.CreateBr(ContBB);
5342 } else {
5343 llvm::DenseMap<unsigned, llvm::BasicBlock *> BBs;
5344 for (unsigned Scp : ScopeModel->getRuntimeValues())
5345 BBs[Scp] = createBasicBlock(getAsString(ScopeModel->map(Scp)), CurFn);
5346
5347 auto *SC = Builder.CreateIntCast(Scope, Builder.getInt32Ty(), false);
5348 llvm::SwitchInst *SI = Builder.CreateSwitch(SC, ContBB);
5349 for (unsigned Scp : ScopeModel->getRuntimeValues()) {
5350 auto *B = BBs[Scp];
5351 SI->addCase(Builder.getInt32(Scp), B);
5352
5353 Builder.SetInsertPoint(B);
5354 Builder.CreateFence(Ordering, getTargetHooks().getLLVMSyncScopeID(
5355 getLangOpts(), ScopeModel->map(Scp),
5356 Ordering, getLLVMContext()));
5357 Builder.CreateBr(ContBB);
5358 }
5359 }
5360 }
5361
5362 Builder.SetInsertPoint(ContBB);
5363 return RValue::get(nullptr);
5364 }
5365
5366 case Builtin::BI__builtin_signbit:
5367 case Builtin::BI__builtin_signbitf:
5368 case Builtin::BI__builtin_signbitl: {
5369 return RValue::get(
5370 Builder.CreateZExt(EmitSignBit(*this, EmitScalarExpr(E->getArg(0))),
5371 ConvertType(E->getType())));
5372 }
5373 case Builtin::BI__warn_memset_zero_len:
5374 return RValue::getIgnored();
5375 case Builtin::BI__annotation: {
5376 // Re-encode each wide string to UTF8 and make an MDString.
5378 for (const Expr *Arg : E->arguments()) {
5379 const auto *Str = cast<StringLiteral>(Arg->IgnoreParenCasts());
5380 assert(Str->getCharByteWidth() == 2);
5381 StringRef WideBytes = Str->getBytes();
5382 std::string StrUtf8;
5383 if (!convertUTF16ToUTF8String(
5384 ArrayRef(WideBytes.data(), WideBytes.size()), StrUtf8)) {
5385 CGM.ErrorUnsupported(E, "non-UTF16 __annotation argument");
5386 continue;
5387 }
5388 Strings.push_back(llvm::MDString::get(getLLVMContext(), StrUtf8));
5389 }
5390
5391 // Build and MDTuple of MDStrings and emit the intrinsic call.
5392 llvm::Function *F = CGM.getIntrinsic(Intrinsic::codeview_annotation, {});
5393 MDTuple *StrTuple = MDTuple::get(getLLVMContext(), Strings);
5394 Builder.CreateCall(F, MetadataAsValue::get(getLLVMContext(), StrTuple));
5395 return RValue::getIgnored();
5396 }
5397 case Builtin::BI__builtin_annotation: {
5398 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
5399 llvm::Function *F = CGM.getIntrinsic(
5400 Intrinsic::annotation, {AnnVal->getType(), CGM.ConstGlobalsPtrTy});
5401
5402 // Get the annotation string, go through casts. Sema requires this to be a
5403 // non-wide string literal, potentially casted, so the cast<> is safe.
5404 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
5405 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
5406 return RValue::get(
5407 EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc(), nullptr));
5408 }
5409 case Builtin::BI__builtin_addcb:
5410 case Builtin::BI__builtin_addcs:
5411 case Builtin::BI__builtin_addc:
5412 case Builtin::BI__builtin_addcl:
5413 case Builtin::BI__builtin_addcll:
5414 case Builtin::BI__builtin_subcb:
5415 case Builtin::BI__builtin_subcs:
5416 case Builtin::BI__builtin_subc:
5417 case Builtin::BI__builtin_subcl:
5418 case Builtin::BI__builtin_subcll: {
5419
5420 // We translate all of these builtins from expressions of the form:
5421 // int x = ..., y = ..., carryin = ..., carryout, result;
5422 // result = __builtin_addc(x, y, carryin, &carryout);
5423 //
5424 // to LLVM IR of the form:
5425 //
5426 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
5427 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
5428 // %carry1 = extractvalue {i32, i1} %tmp1, 1
5429 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
5430 // i32 %carryin)
5431 // %result = extractvalue {i32, i1} %tmp2, 0
5432 // %carry2 = extractvalue {i32, i1} %tmp2, 1
5433 // %tmp3 = or i1 %carry1, %carry2
5434 // %tmp4 = zext i1 %tmp3 to i32
5435 // store i32 %tmp4, i32* %carryout
5436
5437 // Scalarize our inputs.
5438 llvm::Value *X = EmitScalarExpr(E->getArg(0));
5439 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
5440 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
5441 Address CarryOutPtr = EmitPointerWithAlignment(E->getArg(3));
5442
5443 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
5444 Intrinsic::ID IntrinsicId;
5445 switch (BuiltinID) {
5446 default: llvm_unreachable("Unknown multiprecision builtin id.");
5447 case Builtin::BI__builtin_addcb:
5448 case Builtin::BI__builtin_addcs:
5449 case Builtin::BI__builtin_addc:
5450 case Builtin::BI__builtin_addcl:
5451 case Builtin::BI__builtin_addcll:
5452 IntrinsicId = Intrinsic::uadd_with_overflow;
5453 break;
5454 case Builtin::BI__builtin_subcb:
5455 case Builtin::BI__builtin_subcs:
5456 case Builtin::BI__builtin_subc:
5457 case Builtin::BI__builtin_subcl:
5458 case Builtin::BI__builtin_subcll:
5459 IntrinsicId = Intrinsic::usub_with_overflow;
5460 break;
5461 }
5462
5463 // Construct our resulting LLVM IR expression.
5464 llvm::Value *Carry1;
5465 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
5466 X, Y, Carry1);
5467 llvm::Value *Carry2;
5468 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
5469 Sum1, Carryin, Carry2);
5470 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
5471 X->getType());
5472 Builder.CreateStore(CarryOut, CarryOutPtr);
5473 return RValue::get(Sum2);
5474 }
5475
5476 case Builtin::BI__builtin_add_overflow:
5477 case Builtin::BI__builtin_sub_overflow:
5478 case Builtin::BI__builtin_mul_overflow: {
5479 const clang::Expr *LeftArg = E->getArg(0);
5480 const clang::Expr *RightArg = E->getArg(1);
5481 const clang::Expr *ResultArg = E->getArg(2);
5482
5483 clang::QualType ResultQTy =
5484 ResultArg->getType()->castAs<PointerType>()->getPointeeType();
5485
5486 WidthAndSignedness LeftInfo =
5487 getIntegerWidthAndSignedness(CGM.getContext(), LeftArg->getType());
5488 WidthAndSignedness RightInfo =
5489 getIntegerWidthAndSignedness(CGM.getContext(), RightArg->getType());
5490 WidthAndSignedness ResultInfo =
5491 getIntegerWidthAndSignedness(CGM.getContext(), ResultQTy);
5492
5493 // Handle mixed-sign multiplication as a special case, because adding
5494 // runtime or backend support for our generic irgen would be too expensive.
5495 if (isSpecialMixedSignMultiply(BuiltinID, LeftInfo, RightInfo, ResultInfo))
5496 return EmitCheckedMixedSignMultiply(*this, LeftArg, LeftInfo, RightArg,
5497 RightInfo, ResultArg, ResultQTy,
5498 ResultInfo);
5499
5500 if (isSpecialUnsignedMultiplySignedResult(BuiltinID, LeftInfo, RightInfo,
5501 ResultInfo))
5503 *this, LeftArg, LeftInfo, RightArg, RightInfo, ResultArg, ResultQTy,
5504 ResultInfo);
5505
5506 WidthAndSignedness EncompassingInfo =
5507 EncompassingIntegerType({LeftInfo, RightInfo, ResultInfo});
5508
5509 llvm::Type *EncompassingLLVMTy =
5510 llvm::IntegerType::get(CGM.getLLVMContext(), EncompassingInfo.Width);
5511
5512 llvm::Type *ResultLLVMTy = CGM.getTypes().ConvertType(ResultQTy);
5513
5514 Intrinsic::ID IntrinsicId;
5515 switch (BuiltinID) {
5516 default:
5517 llvm_unreachable("Unknown overflow builtin id.");
5518 case Builtin::BI__builtin_add_overflow:
5519 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::sadd_with_overflow
5520 : Intrinsic::uadd_with_overflow;
5521 break;
5522 case Builtin::BI__builtin_sub_overflow:
5523 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::ssub_with_overflow
5524 : Intrinsic::usub_with_overflow;
5525 break;
5526 case Builtin::BI__builtin_mul_overflow:
5527 IntrinsicId = EncompassingInfo.Signed ? Intrinsic::smul_with_overflow
5528 : Intrinsic::umul_with_overflow;
5529 break;
5530 }
5531
5532 llvm::Value *Left = EmitScalarExpr(LeftArg);
5533 llvm::Value *Right = EmitScalarExpr(RightArg);
5534 Address ResultPtr = EmitPointerWithAlignment(ResultArg);
5535
5536 // Extend each operand to the encompassing type.
5537 Left = Builder.CreateIntCast(Left, EncompassingLLVMTy, LeftInfo.Signed);
5538 Right = Builder.CreateIntCast(Right, EncompassingLLVMTy, RightInfo.Signed);
5539
5540 // Perform the operation on the extended values.
5541 llvm::Value *Overflow, *Result;
5542 Result = EmitOverflowIntrinsic(*this, IntrinsicId, Left, Right, Overflow);
5543
5544 if (EncompassingInfo.Width > ResultInfo.Width) {
5545 // The encompassing type is wider than the result type, so we need to
5546 // truncate it.
5547 llvm::Value *ResultTrunc = Builder.CreateTrunc(Result, ResultLLVMTy);
5548
5549 // To see if the truncation caused an overflow, we will extend
5550 // the result and then compare it to the original result.
5551 llvm::Value *ResultTruncExt = Builder.CreateIntCast(
5552 ResultTrunc, EncompassingLLVMTy, ResultInfo.Signed);
5553 llvm::Value *TruncationOverflow =
5554 Builder.CreateICmpNE(Result, ResultTruncExt);
5555
5556 Overflow = Builder.CreateOr(Overflow, TruncationOverflow);
5557 Result = ResultTrunc;
5558 }
5559
5560 // Finally, store the result using the pointer.
5561 bool isVolatile =
5562 ResultArg->getType()->getPointeeType().isVolatileQualified();
5563 Builder.CreateStore(EmitToMemory(Result, ResultQTy), ResultPtr, isVolatile);
5564
5565 return RValue::get(Overflow);
5566 }
5567
5568 case Builtin::BI__builtin_uadd_overflow:
5569 case Builtin::BI__builtin_uaddl_overflow:
5570 case Builtin::BI__builtin_uaddll_overflow:
5571 case Builtin::BI__builtin_usub_overflow:
5572 case Builtin::BI__builtin_usubl_overflow:
5573 case Builtin::BI__builtin_usubll_overflow:
5574 case Builtin::BI__builtin_umul_overflow:
5575 case Builtin::BI__builtin_umull_overflow:
5576 case Builtin::BI__builtin_umulll_overflow:
5577 case Builtin::BI__builtin_sadd_overflow:
5578 case Builtin::BI__builtin_saddl_overflow:
5579 case Builtin::BI__builtin_saddll_overflow:
5580 case Builtin::BI__builtin_ssub_overflow:
5581 case Builtin::BI__builtin_ssubl_overflow:
5582 case Builtin::BI__builtin_ssubll_overflow:
5583 case Builtin::BI__builtin_smul_overflow:
5584 case Builtin::BI__builtin_smull_overflow:
5585 case Builtin::BI__builtin_smulll_overflow: {
5586
5587 // We translate all of these builtins directly to the relevant llvm IR node.
5588
5589 // Scalarize our inputs.
5590 llvm::Value *X = EmitScalarExpr(E->getArg(0));
5591 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
5592 Address SumOutPtr = EmitPointerWithAlignment(E->getArg(2));
5593
5594 // Decide which of the overflow intrinsics we are lowering to:
5595 Intrinsic::ID IntrinsicId;
5596 switch (BuiltinID) {
5597 default: llvm_unreachable("Unknown overflow builtin id.");
5598 case Builtin::BI__builtin_uadd_overflow:
5599 case Builtin::BI__builtin_uaddl_overflow:
5600 case Builtin::BI__builtin_uaddll_overflow:
5601 IntrinsicId = Intrinsic::uadd_with_overflow;
5602 break;
5603 case Builtin::BI__builtin_usub_overflow:
5604 case Builtin::BI__builtin_usubl_overflow:
5605 case Builtin::BI__builtin_usubll_overflow:
5606 IntrinsicId = Intrinsic::usub_with_overflow;
5607 break;
5608 case Builtin::BI__builtin_umul_overflow:
5609 case Builtin::BI__builtin_umull_overflow:
5610 case Builtin::BI__builtin_umulll_overflow:
5611 IntrinsicId = Intrinsic::umul_with_overflow;
5612 break;
5613 case Builtin::BI__builtin_sadd_overflow:
5614 case Builtin::BI__builtin_saddl_overflow:
5615 case Builtin::BI__builtin_saddll_overflow:
5616 IntrinsicId = Intrinsic::sadd_with_overflow;
5617 break;
5618 case Builtin::BI__builtin_ssub_overflow:
5619 case Builtin::BI__builtin_ssubl_overflow:
5620 case Builtin::BI__builtin_ssubll_overflow:
5621 IntrinsicId = Intrinsic::ssub_with_overflow;
5622 break;
5623 case Builtin::BI__builtin_smul_overflow:
5624 case Builtin::BI__builtin_smull_overflow:
5625 case Builtin::BI__builtin_smulll_overflow:
5626 IntrinsicId = Intrinsic::smul_with_overflow;
5627 break;
5628 }
5629
5630
5631 llvm::Value *Carry;
5632 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
5633 Builder.CreateStore(Sum, SumOutPtr);
5634
5635 return RValue::get(Carry);
5636 }
5637 case Builtin::BIaddressof:
5638 case Builtin::BI__addressof:
5639 case Builtin::BI__builtin_addressof:
5640 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
5641 case Builtin::BI__builtin_function_start:
5642 return RValue::get(CGM.GetFunctionStart(
5643 E->getArg(0)->getAsBuiltinConstantDeclRef(CGM.getContext())));
5644 case Builtin::BI__builtin_operator_new:
5646 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, false);
5647 case Builtin::BI__builtin_operator_delete:
5649 E->getCallee()->getType()->castAs<FunctionProtoType>(), E, true);
5650 return RValue::get(nullptr);
5651
5652 case Builtin::BI__builtin_is_aligned:
5653 return EmitBuiltinIsAligned(E);
5654 case Builtin::BI__builtin_align_up:
5655 return EmitBuiltinAlignTo(E, true);
5656 case Builtin::BI__builtin_align_down:
5657 return EmitBuiltinAlignTo(E, false);
5658
5659 case Builtin::BI__noop:
5660 // __noop always evaluates to an integer literal zero.
5661 return RValue::get(ConstantInt::get(IntTy, 0));
5662 case Builtin::BI__builtin_call_with_static_chain: {
5663 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
5664 const Expr *Chain = E->getArg(1);
5665 return EmitCall(Call->getCallee()->getType(),
5666 EmitCallee(Call->getCallee()), Call, ReturnValue,
5667 EmitScalarExpr(Chain));
5668 }
5669 case Builtin::BI_InterlockedExchange8:
5670 case Builtin::BI_InterlockedExchange16:
5671 case Builtin::BI_InterlockedExchange:
5672 case Builtin::BI_InterlockedExchangePointer:
5673 return RValue::get(
5675 case Builtin::BI_InterlockedCompareExchangePointer:
5676 return RValue::get(
5678 case Builtin::BI_InterlockedCompareExchangePointer_nf:
5679 return RValue::get(
5681 case Builtin::BI_InterlockedCompareExchange8:
5682 case Builtin::BI_InterlockedCompareExchange16:
5683 case Builtin::BI_InterlockedCompareExchange:
5684 case Builtin::BI_InterlockedCompareExchange64:
5685 return RValue::get(EmitAtomicCmpXchgForMSIntrin(*this, E));
5686 case Builtin::BI_InterlockedIncrement16:
5687 case Builtin::BI_InterlockedIncrement:
5688 return RValue::get(
5690 case Builtin::BI_InterlockedDecrement16:
5691 case Builtin::BI_InterlockedDecrement:
5692 return RValue::get(
5694 case Builtin::BI_InterlockedAnd8:
5695 case Builtin::BI_InterlockedAnd16:
5696 case Builtin::BI_InterlockedAnd:
5698 case Builtin::BI_InterlockedExchangeAdd8:
5699 case Builtin::BI_InterlockedExchangeAdd16:
5700 case Builtin::BI_InterlockedExchangeAdd:
5701 return RValue::get(
5703 case Builtin::BI_InterlockedExchangeSub8:
5704 case Builtin::BI_InterlockedExchangeSub16:
5705 case Builtin::BI_InterlockedExchangeSub:
5706 return RValue::get(
5708 case Builtin::BI_InterlockedOr8:
5709 case Builtin::BI_InterlockedOr16:
5710 case Builtin::BI_InterlockedOr:
5712 case Builtin::BI_InterlockedXor8:
5713 case Builtin::BI_InterlockedXor16:
5714 case Builtin::BI_InterlockedXor:
5716
5717 case Builtin::BI_bittest64:
5718 case Builtin::BI_bittest:
5719 case Builtin::BI_bittestandcomplement64:
5720 case Builtin::BI_bittestandcomplement:
5721 case Builtin::BI_bittestandreset64:
5722 case Builtin::BI_bittestandreset:
5723 case Builtin::BI_bittestandset64:
5724 case Builtin::BI_bittestandset:
5725 case Builtin::BI_interlockedbittestandreset:
5726 case Builtin::BI_interlockedbittestandreset64:
5727 case Builtin::BI_interlockedbittestandreset64_acq:
5728 case Builtin::BI_interlockedbittestandreset64_rel:
5729 case Builtin::BI_interlockedbittestandreset64_nf:
5730 case Builtin::BI_interlockedbittestandset64:
5731 case Builtin::BI_interlockedbittestandset64_acq:
5732 case Builtin::BI_interlockedbittestandset64_rel:
5733 case Builtin::BI_interlockedbittestandset64_nf:
5734 case Builtin::BI_interlockedbittestandset:
5735 case Builtin::BI_interlockedbittestandset_acq:
5736 case Builtin::BI_interlockedbittestandset_rel:
5737 case Builtin::BI_interlockedbittestandset_nf:
5738 case Builtin::BI_interlockedbittestandreset_acq:
5739 case Builtin::BI_interlockedbittestandreset_rel:
5740 case Builtin::BI_interlockedbittestandreset_nf:
5741 return RValue::get(EmitBitTestIntrinsic(*this, BuiltinID, E));
5742
5743 // These builtins exist to emit regular volatile loads and stores not
5744 // affected by the -fms-volatile setting.
5745 case Builtin::BI__iso_volatile_load8:
5746 case Builtin::BI__iso_volatile_load16:
5747 case Builtin::BI__iso_volatile_load32:
5748 case Builtin::BI__iso_volatile_load64:
5749 return RValue::get(EmitISOVolatileLoad(*this, E));
5750 case Builtin::BI__iso_volatile_store8:
5751 case Builtin::BI__iso_volatile_store16:
5752 case Builtin::BI__iso_volatile_store32:
5753 case Builtin::BI__iso_volatile_store64:
5754 return RValue::get(EmitISOVolatileStore(*this, E));
5755
5756 case Builtin::BI__builtin_ptrauth_sign_constant:
5757 return RValue::get(ConstantEmitter(*this).emitAbstract(E, E->getType()));
5758
5759 case Builtin::BI__builtin_ptrauth_auth:
5760 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5761 case Builtin::BI__builtin_ptrauth_auth_load_relative_and_sign:
5762 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5763 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5764 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5765 case Builtin::BI__builtin_ptrauth_strip: {
5766 // Emit the arguments.
5768 for (auto argExpr : E->arguments())
5769 Args.push_back(EmitScalarExpr(argExpr));
5770
5771 // Cast the value to intptr_t, saving its original type.
5772 llvm::Type *OrigValueType = Args[0]->getType();
5773 if (OrigValueType->isPointerTy())
5774 Args[0] = Builder.CreatePtrToInt(Args[0], IntPtrTy);
5775
5776 switch (BuiltinID) {
5777 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5778 case Builtin::BI__builtin_ptrauth_auth_load_relative_and_sign:
5779 if (Args[4]->getType()->isPointerTy())
5780 Args[4] = Builder.CreatePtrToInt(Args[4], IntPtrTy);
5781 [[fallthrough]];
5782
5783 case Builtin::BI__builtin_ptrauth_auth:
5784 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5785 if (Args[2]->getType()->isPointerTy())
5786 Args[2] = Builder.CreatePtrToInt(Args[2], IntPtrTy);
5787 break;
5788
5789 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5790 if (Args[1]->getType()->isPointerTy())
5791 Args[1] = Builder.CreatePtrToInt(Args[1], IntPtrTy);
5792 break;
5793
5794 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5795 case Builtin::BI__builtin_ptrauth_strip:
5796 break;
5797 }
5798
5799 // Call the intrinsic.
5800 auto IntrinsicID = [&]() -> unsigned {
5801 switch (BuiltinID) {
5802 case Builtin::BI__builtin_ptrauth_auth:
5803 return Intrinsic::ptrauth_auth;
5804 case Builtin::BI__builtin_ptrauth_auth_and_resign:
5805 return Intrinsic::ptrauth_resign;
5806 case Builtin::BI__builtin_ptrauth_auth_load_relative_and_sign:
5807 return Intrinsic::ptrauth_resign_load_relative;
5808 case Builtin::BI__builtin_ptrauth_blend_discriminator:
5809 return Intrinsic::ptrauth_blend;
5810 case Builtin::BI__builtin_ptrauth_sign_generic_data:
5811 return Intrinsic::ptrauth_sign_generic;
5812 case Builtin::BI__builtin_ptrauth_sign_unauthenticated:
5813 return Intrinsic::ptrauth_sign;
5814 case Builtin::BI__builtin_ptrauth_strip:
5815 return Intrinsic::ptrauth_strip;
5816 }
5817 llvm_unreachable("bad ptrauth intrinsic");
5818 }();
5819 auto Intrinsic = CGM.getIntrinsic(IntrinsicID);
5820 llvm::Value *Result = EmitRuntimeCall(Intrinsic, Args);
5821
5822 if (BuiltinID != Builtin::BI__builtin_ptrauth_sign_generic_data &&
5823 BuiltinID != Builtin::BI__builtin_ptrauth_blend_discriminator &&
5824 OrigValueType->isPointerTy()) {
5825 Result = Builder.CreateIntToPtr(Result, OrigValueType);
5826 }
5827 return RValue::get(Result);
5828 }
5829
5830 case Builtin::BI__builtin_get_vtable_pointer: {
5831 const Expr *Target = E->getArg(0);
5832 QualType TargetType = Target->getType();
5833 const CXXRecordDecl *Decl = TargetType->getPointeeCXXRecordDecl();
5834 assert(Decl);
5835 auto ThisAddress = EmitPointerWithAlignment(Target);
5836 assert(ThisAddress.isValid());
5837 llvm::Value *VTablePointer =
5839 return RValue::get(VTablePointer);
5840 }
5841
5842 case Builtin::BI__exception_code:
5843 case Builtin::BI_exception_code:
5845 case Builtin::BI__exception_info:
5846 case Builtin::BI_exception_info:
5848 case Builtin::BI__abnormal_termination:
5849 case Builtin::BI_abnormal_termination:
5851 case Builtin::BI_setjmpex:
5852 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
5853 E->getArg(0)->getType()->isPointerType())
5854 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
5855 break;
5856 case Builtin::BI_setjmp:
5857 if (getTarget().getTriple().isOSMSVCRT() && E->getNumArgs() == 1 &&
5858 E->getArg(0)->getType()->isPointerType()) {
5859 if (getTarget().getTriple().getArch() == llvm::Triple::x86)
5860 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp3, E);
5861 else if (getTarget().getTriple().getArch() == llvm::Triple::aarch64)
5862 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmpex, E);
5863 return EmitMSVCRTSetJmp(*this, MSVCSetJmpKind::_setjmp, E);
5864 }
5865 break;
5866
5867 // C++ std:: builtins.
5868 case Builtin::BImove:
5869 case Builtin::BImove_if_noexcept:
5870 case Builtin::BIforward:
5871 case Builtin::BIforward_like:
5872 case Builtin::BIas_const:
5873 return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
5874 case Builtin::BI__GetExceptionInfo: {
5875 if (llvm::GlobalVariable *GV =
5876 CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
5877 return RValue::get(GV);
5878 break;
5879 }
5880
5881 case Builtin::BI__fastfail:
5883
5884 case Builtin::BI__builtin_coro_id:
5885 return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
5886 case Builtin::BI__builtin_coro_promise:
5887 return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
5888 case Builtin::BI__builtin_coro_resume:
5889 EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
5890 return RValue::get(nullptr);
5891 case Builtin::BI__builtin_coro_frame:
5892 return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
5893 case Builtin::BI__builtin_coro_noop:
5894 return EmitCoroutineIntrinsic(E, Intrinsic::coro_noop);
5895 case Builtin::BI__builtin_coro_free:
5896 return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
5897 case Builtin::BI__builtin_coro_destroy:
5898 EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
5899 return RValue::get(nullptr);
5900 case Builtin::BI__builtin_coro_done:
5901 return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
5902 case Builtin::BI__builtin_coro_alloc:
5903 return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
5904 case Builtin::BI__builtin_coro_begin:
5905 return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
5906 case Builtin::BI__builtin_coro_end:
5907 return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
5908 case Builtin::BI__builtin_coro_suspend:
5909 return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
5910 case Builtin::BI__builtin_coro_size:
5911 return EmitCoroutineIntrinsic(E, Intrinsic::coro_size);
5912 case Builtin::BI__builtin_coro_align:
5913 return EmitCoroutineIntrinsic(E, Intrinsic::coro_align);
5914
5915 // OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
5916 case Builtin::BIread_pipe:
5917 case Builtin::BIwrite_pipe: {
5918 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5919 *Arg1 = EmitScalarExpr(E->getArg(1));
5920 CGOpenCLRuntime OpenCLRT(CGM);
5921 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5922 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5923
5924 // Type of the generic packet parameter.
5925 unsigned GenericAS =
5927 llvm::Type *I8PTy = llvm::PointerType::get(getLLVMContext(), GenericAS);
5928
5929 // Testing which overloaded version we should generate the call for.
5930 if (2U == E->getNumArgs()) {
5931 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_2"
5932 : "__write_pipe_2";
5933 // Creating a generic function type to be able to call with any builtin or
5934 // user defined type.
5935 llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
5936 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5937 Value *ACast = Builder.CreateAddrSpaceCast(Arg1, I8PTy);
5938 return RValue::get(
5939 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5940 {Arg0, ACast, PacketSize, PacketAlign}));
5941 } else {
5942 assert(4 == E->getNumArgs() &&
5943 "Illegal number of parameters to pipe function");
5944 const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
5945 : "__write_pipe_4";
5946
5947 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
5948 Int32Ty, Int32Ty};
5949 Value *Arg2 = EmitScalarExpr(E->getArg(2)),
5950 *Arg3 = EmitScalarExpr(E->getArg(3));
5951 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
5952 Value *ACast = Builder.CreateAddrSpaceCast(Arg3, I8PTy);
5953 // We know the third argument is an integer type, but we may need to cast
5954 // it to i32.
5955 if (Arg2->getType() != Int32Ty)
5956 Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
5957 return RValue::get(
5958 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
5959 {Arg0, Arg1, Arg2, ACast, PacketSize, PacketAlign}));
5960 }
5961 }
5962 // OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
5963 // functions
5964 case Builtin::BIreserve_read_pipe:
5965 case Builtin::BIreserve_write_pipe:
5966 case Builtin::BIwork_group_reserve_read_pipe:
5967 case Builtin::BIwork_group_reserve_write_pipe:
5968 case Builtin::BIsub_group_reserve_read_pipe:
5969 case Builtin::BIsub_group_reserve_write_pipe: {
5970 // Composing the mangled name for the function.
5971 const char *Name;
5972 if (BuiltinID == Builtin::BIreserve_read_pipe)
5973 Name = "__reserve_read_pipe";
5974 else if (BuiltinID == Builtin::BIreserve_write_pipe)
5975 Name = "__reserve_write_pipe";
5976 else if (BuiltinID == Builtin::BIwork_group_reserve_read_pipe)
5977 Name = "__work_group_reserve_read_pipe";
5978 else if (BuiltinID == Builtin::BIwork_group_reserve_write_pipe)
5979 Name = "__work_group_reserve_write_pipe";
5980 else if (BuiltinID == Builtin::BIsub_group_reserve_read_pipe)
5981 Name = "__sub_group_reserve_read_pipe";
5982 else
5983 Name = "__sub_group_reserve_write_pipe";
5984
5985 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
5986 *Arg1 = EmitScalarExpr(E->getArg(1));
5987 llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
5988 CGOpenCLRuntime OpenCLRT(CGM);
5989 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
5990 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
5991
5992 // Building the generic function prototype.
5993 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
5994 llvm::FunctionType *FTy =
5995 llvm::FunctionType::get(ReservedIDTy, ArgTys, false);
5996 // We know the second argument is an integer type, but we may need to cast
5997 // it to i32.
5998 if (Arg1->getType() != Int32Ty)
5999 Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
6000 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6001 {Arg0, Arg1, PacketSize, PacketAlign}));
6002 }
6003 // OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
6004 // functions
6005 case Builtin::BIcommit_read_pipe:
6006 case Builtin::BIcommit_write_pipe:
6007 case Builtin::BIwork_group_commit_read_pipe:
6008 case Builtin::BIwork_group_commit_write_pipe:
6009 case Builtin::BIsub_group_commit_read_pipe:
6010 case Builtin::BIsub_group_commit_write_pipe: {
6011 const char *Name;
6012 if (BuiltinID == Builtin::BIcommit_read_pipe)
6013 Name = "__commit_read_pipe";
6014 else if (BuiltinID == Builtin::BIcommit_write_pipe)
6015 Name = "__commit_write_pipe";
6016 else if (BuiltinID == Builtin::BIwork_group_commit_read_pipe)
6017 Name = "__work_group_commit_read_pipe";
6018 else if (BuiltinID == Builtin::BIwork_group_commit_write_pipe)
6019 Name = "__work_group_commit_write_pipe";
6020 else if (BuiltinID == Builtin::BIsub_group_commit_read_pipe)
6021 Name = "__sub_group_commit_read_pipe";
6022 else
6023 Name = "__sub_group_commit_write_pipe";
6024
6025 Value *Arg0 = EmitScalarExpr(E->getArg(0)),
6026 *Arg1 = EmitScalarExpr(E->getArg(1));
6027 CGOpenCLRuntime OpenCLRT(CGM);
6028 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
6029 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
6030
6031 // Building the generic function prototype.
6032 llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
6033 llvm::FunctionType *FTy = llvm::FunctionType::get(
6034 llvm::Type::getVoidTy(getLLVMContext()), ArgTys, false);
6035
6036 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6037 {Arg0, Arg1, PacketSize, PacketAlign}));
6038 }
6039 // OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
6040 case Builtin::BIget_pipe_num_packets:
6041 case Builtin::BIget_pipe_max_packets: {
6042 const char *BaseName;
6043 const auto *PipeTy = E->getArg(0)->getType()->castAs<PipeType>();
6044 if (BuiltinID == Builtin::BIget_pipe_num_packets)
6045 BaseName = "__get_pipe_num_packets";
6046 else
6047 BaseName = "__get_pipe_max_packets";
6048 std::string Name = std::string(BaseName) +
6049 std::string(PipeTy->isReadOnly() ? "_ro" : "_wo");
6050
6051 // Building the generic function prototype.
6052 Value *Arg0 = EmitScalarExpr(E->getArg(0));
6053 CGOpenCLRuntime OpenCLRT(CGM);
6054 Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
6055 Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
6056 llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
6057 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6058
6059 return RValue::get(EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6060 {Arg0, PacketSize, PacketAlign}));
6061 }
6062
6063 // OpenCL v2.0 s6.13.9 - Address space qualifier functions.
6064 case Builtin::BIto_global:
6065 case Builtin::BIto_local:
6066 case Builtin::BIto_private: {
6067 auto Arg0 = EmitScalarExpr(E->getArg(0));
6068 auto NewArgT = llvm::PointerType::get(
6070 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
6071 auto NewRetT = llvm::PointerType::get(
6073 CGM.getContext().getTargetAddressSpace(
6075 auto FTy = llvm::FunctionType::get(NewRetT, {NewArgT}, false);
6076 llvm::Value *NewArg;
6077 if (Arg0->getType()->getPointerAddressSpace() !=
6078 NewArgT->getPointerAddressSpace())
6079 NewArg = Builder.CreateAddrSpaceCast(Arg0, NewArgT);
6080 else
6081 NewArg = Builder.CreateBitOrPointerCast(Arg0, NewArgT);
6082 auto NewName = std::string("__") + E->getDirectCallee()->getName().str();
6083 auto NewCall =
6084 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, NewName), {NewArg});
6085 return RValue::get(Builder.CreateBitOrPointerCast(NewCall,
6086 ConvertType(E->getType())));
6087 }
6088
6089 // OpenCL v2.0, s6.13.17 - Enqueue kernel function.
6090 // Table 6.13.17.1 specifies four overload forms of enqueue_kernel.
6091 // The code below expands the builtin call to a call to one of the following
6092 // functions that an OpenCL runtime library will have to provide:
6093 // __enqueue_kernel_basic
6094 // __enqueue_kernel_varargs
6095 // __enqueue_kernel_basic_events
6096 // __enqueue_kernel_events_varargs
6097 case Builtin::BIenqueue_kernel: {
6098 StringRef Name; // Generated function call name
6099 unsigned NumArgs = E->getNumArgs();
6100
6101 llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
6102 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6103 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6104
6105 llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
6106 llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
6107 LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
6108 llvm::Value *Range = NDRangeL.getAddress().emitRawPointer(*this);
6109
6110 // FIXME: Look through the addrspacecast which may exist to the stack
6111 // temporary as a hack.
6112 //
6113 // This is hardcoding the assumed ABI of the target function. This assumes
6114 // direct passing for every argument except NDRange, which is assumed to be
6115 // byval or byref indirect passed.
6116 //
6117 // This should be fixed to query a signature from CGOpenCLRuntime, and go
6118 // through EmitCallArgs to get the correct target ABI.
6119 Range = Range->stripPointerCasts();
6120
6121 llvm::Type *RangePtrTy = Range->getType();
6122
6123 if (NumArgs == 4) {
6124 // The most basic form of the call with parameters:
6125 // queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
6126 Name = "__enqueue_kernel_basic";
6127 llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangePtrTy, GenericVoidPtrTy,
6128 GenericVoidPtrTy};
6129 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6130
6131 auto Info =
6132 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
6133 llvm::Value *Kernel =
6134 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6135 llvm::Value *Block =
6136 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6137
6138 auto RTCall = EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name),
6139 {Queue, Flags, Range, Kernel, Block});
6140 return RValue::get(RTCall);
6141 }
6142 assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
6143
6144 // Create a temporary array to hold the sizes of local pointer arguments
6145 // for the block. \p First is the position of the first size argument.
6146 auto CreateArrayForSizeVar =
6147 [=](unsigned First) -> std::pair<llvm::Value *, llvm::Value *> {
6148 llvm::APInt ArraySize(32, NumArgs - First);
6150 getContext().getSizeType(), ArraySize, nullptr,
6152 /*IndexTypeQuals=*/0);
6153 auto Tmp = CreateMemTemp(SizeArrayTy, "block_sizes");
6154 llvm::Value *TmpPtr = Tmp.getPointer();
6155 // The EmitLifetime* pair expect a naked Alloca as their last argument,
6156 // however for cases where the default AS is not the Alloca AS, Tmp is
6157 // actually the Alloca ascasted to the default AS, hence the
6158 // stripPointerCasts()
6159 llvm::Value *Alloca = TmpPtr->stripPointerCasts();
6160 llvm::Value *ElemPtr;
6161 EmitLifetimeStart(Alloca);
6162 // Each of the following arguments specifies the size of the corresponding
6163 // argument passed to the enqueued block.
6164 auto *Zero = llvm::ConstantInt::get(IntTy, 0);
6165 for (unsigned I = First; I < NumArgs; ++I) {
6166 auto *Index = llvm::ConstantInt::get(IntTy, I - First);
6167 auto *GEP =
6168 Builder.CreateGEP(Tmp.getElementType(), Alloca, {Zero, Index});
6169 if (I == First)
6170 ElemPtr = GEP;
6171 auto *V =
6172 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy);
6173 Builder.CreateAlignedStore(
6174 V, GEP, CGM.getDataLayout().getPrefTypeAlign(SizeTy));
6175 }
6176 // Return the Alloca itself rather than a potential ascast as this is only
6177 // used by the paired EmitLifetimeEnd.
6178 return {ElemPtr, Alloca};
6179 };
6180
6181 // Could have events and/or varargs.
6182 if (E->getArg(3)->getType()->isBlockPointerType()) {
6183 // No events passed, but has variadic arguments.
6184 Name = "__enqueue_kernel_varargs";
6185 auto Info =
6186 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(3));
6187 llvm::Value *Kernel =
6188 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6189 auto *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6190 auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(4);
6191
6192 // Create a vector of the arguments, as well as a constant value to
6193 // express to the runtime the number of variadic arguments.
6194 llvm::Value *const Args[] = {Queue, Flags,
6195 Range, Kernel,
6196 Block, ConstantInt::get(IntTy, NumArgs - 4),
6197 ElemPtr};
6198 llvm::Type *const ArgTys[] = {
6199 QueueTy, IntTy, RangePtrTy, GenericVoidPtrTy,
6200 GenericVoidPtrTy, IntTy, ElemPtr->getType()};
6201
6202 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6203 auto Call = RValue::get(
6204 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6205 EmitLifetimeEnd(TmpPtr);
6206 return Call;
6207 }
6208 // Any calls now have event arguments passed.
6209 if (NumArgs >= 7) {
6210 llvm::PointerType *PtrTy = llvm::PointerType::get(
6211 CGM.getLLVMContext(),
6212 CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
6213
6214 llvm::Value *NumEvents =
6215 Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
6216
6217 // Since SemaOpenCLBuiltinEnqueueKernel allows fifth and sixth arguments
6218 // to be a null pointer constant (including `0` literal), we can take it
6219 // into account and emit null pointer directly.
6220 llvm::Value *EventWaitList = nullptr;
6221 if (E->getArg(4)->isNullPointerConstant(
6223 EventWaitList = llvm::ConstantPointerNull::get(PtrTy);
6224 } else {
6225 EventWaitList =
6226 E->getArg(4)->getType()->isArrayType()
6228 : EmitScalarExpr(E->getArg(4));
6229 // Convert to generic address space.
6230 EventWaitList = Builder.CreatePointerCast(EventWaitList, PtrTy);
6231 }
6232 llvm::Value *EventRet = nullptr;
6233 if (E->getArg(5)->isNullPointerConstant(
6235 EventRet = llvm::ConstantPointerNull::get(PtrTy);
6236 } else {
6237 EventRet =
6238 Builder.CreatePointerCast(EmitScalarExpr(E->getArg(5)), PtrTy);
6239 }
6240
6241 auto Info =
6242 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(6));
6243 llvm::Value *Kernel =
6244 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6245 llvm::Value *Block =
6246 Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6247
6248 std::vector<llvm::Type *> ArgTys = {
6249 QueueTy, Int32Ty, RangePtrTy, Int32Ty,
6250 PtrTy, PtrTy, GenericVoidPtrTy, GenericVoidPtrTy};
6251
6252 std::vector<llvm::Value *> Args = {Queue, Flags, Range,
6253 NumEvents, EventWaitList, EventRet,
6254 Kernel, Block};
6255
6256 if (NumArgs == 7) {
6257 // Has events but no variadics.
6258 Name = "__enqueue_kernel_basic_events";
6259 llvm::FunctionType *FTy =
6260 llvm::FunctionType::get(Int32Ty, ArgTys, false);
6261 return RValue::get(
6262 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6263 }
6264 // Has event info and variadics
6265 // Pass the number of variadics to the runtime function too.
6266 Args.push_back(ConstantInt::get(Int32Ty, NumArgs - 7));
6267 ArgTys.push_back(Int32Ty);
6268 Name = "__enqueue_kernel_events_varargs";
6269
6270 auto [ElemPtr, TmpPtr] = CreateArrayForSizeVar(7);
6271 Args.push_back(ElemPtr);
6272 ArgTys.push_back(ElemPtr->getType());
6273
6274 llvm::FunctionType *FTy = llvm::FunctionType::get(Int32Ty, ArgTys, false);
6275 auto Call = RValue::get(
6276 EmitRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Args));
6277 EmitLifetimeEnd(TmpPtr);
6278 return Call;
6279 }
6280 llvm_unreachable("Unexpected enqueue_kernel signature");
6281 }
6282 // OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
6283 // parameter.
6284 case Builtin::BIget_kernel_work_group_size: {
6285 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6286 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6287 auto Info =
6288 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
6289 Value *Kernel =
6290 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6291 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6293 CGM.CreateRuntimeFunction(
6294 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
6295 false),
6296 "__get_kernel_work_group_size_impl"),
6297 {Kernel, Arg}));
6298 }
6299 case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
6300 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6301 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6302 auto Info =
6303 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(0));
6304 Value *Kernel =
6305 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6306 Value *Arg = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6308 CGM.CreateRuntimeFunction(
6309 llvm::FunctionType::get(IntTy, {GenericVoidPtrTy, GenericVoidPtrTy},
6310 false),
6311 "__get_kernel_preferred_work_group_size_multiple_impl"),
6312 {Kernel, Arg}));
6313 }
6314 case Builtin::BIget_kernel_max_sub_group_size_for_ndrange:
6315 case Builtin::BIget_kernel_sub_group_count_for_ndrange: {
6316 llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
6317 getContext().getTargetAddressSpace(LangAS::opencl_generic));
6318 LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
6319 llvm::Value *NDRange = NDRangeL.getAddress().emitRawPointer(*this);
6320 auto Info =
6321 CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
6322 Value *Kernel =
6323 Builder.CreatePointerCast(Info.KernelHandle, GenericVoidPtrTy);
6324 Value *Block = Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
6325 const char *Name =
6326 BuiltinID == Builtin::BIget_kernel_max_sub_group_size_for_ndrange
6327 ? "__get_kernel_max_sub_group_size_for_ndrange_impl"
6328 : "__get_kernel_sub_group_count_for_ndrange_impl";
6330 CGM.CreateRuntimeFunction(
6331 llvm::FunctionType::get(
6332 IntTy, {NDRange->getType(), GenericVoidPtrTy, GenericVoidPtrTy},
6333 false),
6334 Name),
6335 {NDRange, Kernel, Block}));
6336 }
6337 case Builtin::BI__builtin_store_half:
6338 case Builtin::BI__builtin_store_halff: {
6339 CodeGenFunction::CGFPOptionsRAII FPOptsRAII(*this, E);
6340 Value *Val = EmitScalarExpr(E->getArg(0));
6342 Value *HalfVal = Builder.CreateFPTrunc(Val, Builder.getHalfTy());
6343 Builder.CreateStore(HalfVal, Address);
6344 return RValue::get(nullptr);
6345 }
6346 case Builtin::BI__builtin_load_half: {
6348 Value *HalfVal = Builder.CreateLoad(Address);
6349 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getDoubleTy()));
6350 }
6351 case Builtin::BI__builtin_load_halff: {
6353 Value *HalfVal = Builder.CreateLoad(Address);
6354 return RValue::get(Builder.CreateFPExt(HalfVal, Builder.getFloatTy()));
6355 }
6356 case Builtin::BI__builtin_printf:
6357 case Builtin::BIprintf:
6358 if (getTarget().getTriple().isNVPTX() ||
6359 getTarget().getTriple().isAMDGCN() ||
6360 (getTarget().getTriple().isSPIRV() &&
6361 getTarget().getTriple().getVendor() == Triple::VendorType::AMD)) {
6362 if (getTarget().getTriple().isNVPTX())
6364 if ((getTarget().getTriple().isAMDGCN() ||
6365 getTarget().getTriple().isSPIRV()) &&
6366 getLangOpts().HIP)
6368 }
6369
6370 break;
6371 case Builtin::BI__builtin_canonicalize:
6372 case Builtin::BI__builtin_canonicalizef:
6373 case Builtin::BI__builtin_canonicalizef16:
6374 case Builtin::BI__builtin_canonicalizel:
6375 return RValue::get(
6376 emitBuiltinWithOneOverloadedType<1>(*this, E, Intrinsic::canonicalize));
6377
6378 case Builtin::BI__builtin_thread_pointer: {
6379 if (!getContext().getTargetInfo().isTLSSupported())
6380 CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
6381
6382 return RValue::get(Builder.CreateIntrinsic(llvm::Intrinsic::thread_pointer,
6383 {GlobalsInt8PtrTy}, {}));
6384 }
6385 case Builtin::BI__builtin_os_log_format:
6386 return emitBuiltinOSLogFormat(*E);
6387
6388 case Builtin::BI__xray_customevent: {
6390 return RValue::getIgnored();
6391
6392 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
6394 return RValue::getIgnored();
6395
6396 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
6397 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayCustomEvents())
6398 return RValue::getIgnored();
6399
6400 Function *F = CGM.getIntrinsic(Intrinsic::xray_customevent);
6401 auto FTy = F->getFunctionType();
6402 auto Arg0 = E->getArg(0);
6403 auto Arg0Val = EmitScalarExpr(Arg0);
6404 auto Arg0Ty = Arg0->getType();
6405 auto PTy0 = FTy->getParamType(0);
6406 if (PTy0 != Arg0Val->getType()) {
6407 if (Arg0Ty->isArrayType())
6408 Arg0Val = EmitArrayToPointerDecay(Arg0).emitRawPointer(*this);
6409 else
6410 Arg0Val = Builder.CreatePointerCast(Arg0Val, PTy0);
6411 }
6412 auto Arg1 = EmitScalarExpr(E->getArg(1));
6413 auto PTy1 = FTy->getParamType(1);
6414 if (PTy1 != Arg1->getType())
6415 Arg1 = Builder.CreateTruncOrBitCast(Arg1, PTy1);
6416 return RValue::get(Builder.CreateCall(F, {Arg0Val, Arg1}));
6417 }
6418
6419 case Builtin::BI__xray_typedevent: {
6420 // TODO: There should be a way to always emit events even if the current
6421 // function is not instrumented. Losing events in a stream can cripple
6422 // a trace.
6424 return RValue::getIgnored();
6425
6426 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
6428 return RValue::getIgnored();
6429
6430 if (const auto *XRayAttr = CurFuncDecl->getAttr<XRayInstrumentAttr>())
6431 if (XRayAttr->neverXRayInstrument() && !AlwaysEmitXRayTypedEvents())
6432 return RValue::getIgnored();
6433
6434 Function *F = CGM.getIntrinsic(Intrinsic::xray_typedevent);
6435 auto FTy = F->getFunctionType();
6436 auto Arg0 = EmitScalarExpr(E->getArg(0));
6437 auto PTy0 = FTy->getParamType(0);
6438 if (PTy0 != Arg0->getType())
6439 Arg0 = Builder.CreateTruncOrBitCast(Arg0, PTy0);
6440 auto Arg1 = E->getArg(1);
6441 auto Arg1Val = EmitScalarExpr(Arg1);
6442 auto Arg1Ty = Arg1->getType();
6443 auto PTy1 = FTy->getParamType(1);
6444 if (PTy1 != Arg1Val->getType()) {
6445 if (Arg1Ty->isArrayType())
6446 Arg1Val = EmitArrayToPointerDecay(Arg1).emitRawPointer(*this);
6447 else
6448 Arg1Val = Builder.CreatePointerCast(Arg1Val, PTy1);
6449 }
6450 auto Arg2 = EmitScalarExpr(E->getArg(2));
6451 auto PTy2 = FTy->getParamType(2);
6452 if (PTy2 != Arg2->getType())
6453 Arg2 = Builder.CreateTruncOrBitCast(Arg2, PTy2);
6454 return RValue::get(Builder.CreateCall(F, {Arg0, Arg1Val, Arg2}));
6455 }
6456
6457 case Builtin::BI__builtin_ms_va_start:
6458 case Builtin::BI__builtin_ms_va_end:
6459 return RValue::get(
6461 BuiltinID == Builtin::BI__builtin_ms_va_start));
6462
6463 case Builtin::BI__builtin_ms_va_copy: {
6464 // Lower this manually. We can't reliably determine whether or not any
6465 // given va_copy() is for a Win64 va_list from the calling convention
6466 // alone, because it's legal to do this from a System V ABI function.
6467 // With opaque pointer types, we won't have enough information in LLVM
6468 // IR to determine this from the argument types, either. Best to do it
6469 // now, while we have enough information.
6470 Address DestAddr = EmitMSVAListRef(E->getArg(0));
6471 Address SrcAddr = EmitMSVAListRef(E->getArg(1));
6472
6473 DestAddr = DestAddr.withElementType(Int8PtrTy);
6474 SrcAddr = SrcAddr.withElementType(Int8PtrTy);
6475
6476 Value *ArgPtr = Builder.CreateLoad(SrcAddr, "ap.val");
6477 return RValue::get(Builder.CreateStore(ArgPtr, DestAddr));
6478 }
6479
6480 case Builtin::BI__builtin_get_device_side_mangled_name: {
6481 auto Name = CGM.getCUDARuntime().getDeviceSideName(
6482 cast<DeclRefExpr>(E->getArg(0)->IgnoreImpCasts())->getDecl());
6483 auto Str = CGM.GetAddrOfConstantCString(Name, "");
6484 return RValue::get(Str.getPointer());
6485 }
6486 }
6487
6488 // If this is an alias for a lib function (e.g. __builtin_sin), emit
6489 // the call using the normal call path, but using the unmangled
6490 // version of the function name.
6491 const auto &BI = getContext().BuiltinInfo;
6492 if (!shouldEmitBuiltinAsIR(BuiltinID, BI, *this) &&
6493 BI.isLibFunction(BuiltinID))
6494 return emitLibraryCall(*this, FD, E,
6495 CGM.getBuiltinLibFunction(FD, BuiltinID));
6496
6497 // If this is a predefined lib function (e.g. malloc), emit the call
6498 // using exactly the normal call path.
6499 if (BI.isPredefinedLibFunction(BuiltinID))
6500 return emitLibraryCall(*this, FD, E, CGM.getRawFunctionPointer(FD));
6501
6502 // Check that a call to a target specific builtin has the correct target
6503 // features.
6504 // This is down here to avoid non-target specific builtins, however, if
6505 // generic builtins start to require generic target features then we
6506 // can move this up to the beginning of the function.
6507 checkTargetFeatures(E, FD);
6508
6509 if (unsigned VectorWidth = getContext().BuiltinInfo.getRequiredVectorWidth(BuiltinID))
6510 LargestVectorWidth = std::max(LargestVectorWidth, VectorWidth);
6511
6512 // See if we have a target specific intrinsic.
6513 std::string Name = getContext().BuiltinInfo.getName(BuiltinID);
6514 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
6515 StringRef Prefix =
6516 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
6517 if (!Prefix.empty()) {
6518 IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin(Prefix.data(), Name);
6519 if (IntrinsicID == Intrinsic::not_intrinsic && Prefix == "spv" &&
6520 getTarget().getTriple().getOS() == llvm::Triple::OSType::AMDHSA)
6521 IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin("amdgcn", Name);
6522 // NOTE we don't need to perform a compatibility flag check here since the
6523 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
6524 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
6525 if (IntrinsicID == Intrinsic::not_intrinsic)
6526 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
6527 }
6528
6529 if (IntrinsicID != Intrinsic::not_intrinsic) {
6531
6532 // Find out if any arguments are required to be integer constant
6533 // expressions.
6534 unsigned ICEArguments = 0;
6536 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
6537 assert(Error == ASTContext::GE_None && "Should not codegen an error");
6538
6539 Function *F = CGM.getIntrinsic(IntrinsicID);
6540 llvm::FunctionType *FTy = F->getFunctionType();
6541
6542 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
6543 Value *ArgValue = EmitScalarOrConstFoldImmArg(ICEArguments, i, E);
6544 // If the intrinsic arg type is different from the builtin arg type
6545 // we need to do a bit cast.
6546 llvm::Type *PTy = FTy->getParamType(i);
6547 if (PTy != ArgValue->getType()) {
6548 // XXX - vector of pointers?
6549 if (auto *PtrTy = dyn_cast<llvm::PointerType>(PTy)) {
6550 if (PtrTy->getAddressSpace() !=
6551 ArgValue->getType()->getPointerAddressSpace()) {
6552 ArgValue = Builder.CreateAddrSpaceCast(
6553 ArgValue, llvm::PointerType::get(getLLVMContext(),
6554 PtrTy->getAddressSpace()));
6555 }
6556 }
6557
6558 // Cast vector type (e.g., v256i32) to x86_amx, this only happen
6559 // in amx intrinsics.
6560 if (PTy->isX86_AMXTy())
6561 ArgValue = Builder.CreateIntrinsic(Intrinsic::x86_cast_vector_to_tile,
6562 {ArgValue->getType()}, {ArgValue});
6563 else
6564 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
6565 }
6566
6567 Args.push_back(ArgValue);
6568 }
6569
6570 Value *V = Builder.CreateCall(F, Args);
6571 QualType BuiltinRetType = E->getType();
6572
6573 llvm::Type *RetTy = VoidTy;
6574 if (!BuiltinRetType->isVoidType())
6575 RetTy = ConvertType(BuiltinRetType);
6576
6577 if (RetTy != V->getType()) {
6578 // XXX - vector of pointers?
6579 if (auto *PtrTy = dyn_cast<llvm::PointerType>(RetTy)) {
6580 if (PtrTy->getAddressSpace() != V->getType()->getPointerAddressSpace()) {
6581 V = Builder.CreateAddrSpaceCast(
6582 V, llvm::PointerType::get(getLLVMContext(),
6583 PtrTy->getAddressSpace()));
6584 }
6585 }
6586
6587 // Cast x86_amx to vector type (e.g., v256i32), this only happen
6588 // in amx intrinsics.
6589 if (V->getType()->isX86_AMXTy())
6590 V = Builder.CreateIntrinsic(Intrinsic::x86_cast_tile_to_vector, {RetTy},
6591 {V});
6592 else
6593 V = Builder.CreateBitCast(V, RetTy);
6594 }
6595
6596 if (RetTy->isVoidTy())
6597 return RValue::get(nullptr);
6598
6599 return RValue::get(V);
6600 }
6601
6602 // Some target-specific builtins can have aggregate return values, e.g.
6603 // __builtin_arm_mve_vld2q_u32. So if the result is an aggregate, force
6604 // ReturnValue to be non-null, so that the target-specific emission code can
6605 // always just emit into it.
6607 if (EvalKind == TEK_Aggregate && ReturnValue.isNull()) {
6608 Address DestPtr = CreateMemTemp(E->getType(), "agg.tmp");
6609 ReturnValue = ReturnValueSlot(DestPtr, false);
6610 }
6611
6612 // Now see if we can emit a target-specific builtin.
6613 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E, ReturnValue)) {
6614 switch (EvalKind) {
6615 case TEK_Scalar:
6616 if (V->getType()->isVoidTy())
6617 return RValue::get(nullptr);
6618 return RValue::get(V);
6619 case TEK_Aggregate:
6620 return RValue::getAggregate(ReturnValue.getAddress(),
6621 ReturnValue.isVolatile());
6622 case TEK_Complex:
6623 llvm_unreachable("No current target builtin returns complex");
6624 }
6625 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
6626 }
6627
6628 // EmitHLSLBuiltinExpr will check getLangOpts().HLSL
6629 if (Value *V = EmitHLSLBuiltinExpr(BuiltinID, E, ReturnValue)) {
6630 switch (EvalKind) {
6631 case TEK_Scalar:
6632 if (V->getType()->isVoidTy())
6633 return RValue::get(nullptr);
6634 return RValue::get(V);
6635 case TEK_Aggregate:
6636 return RValue::getAggregate(ReturnValue.getAddress(),
6637 ReturnValue.isVolatile());
6638 case TEK_Complex:
6639 llvm_unreachable("No current hlsl builtin returns complex");
6640 }
6641 llvm_unreachable("Bad evaluation kind in EmitBuiltinExpr");
6642 }
6643
6644 if (getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice)
6645 return EmitHipStdParUnsupportedBuiltin(this, FD);
6646
6647 ErrorUnsupported(E, "builtin function");
6648
6649 // Unknown builtin, for now just dump it out and return undef.
6650 return GetUndefRValue(E->getType());
6651}
6652
6653namespace {
6654struct BuiltinAlignArgs {
6655 llvm::Value *Src = nullptr;
6656 llvm::Type *SrcType = nullptr;
6657 llvm::Value *Alignment = nullptr;
6658 llvm::Value *Mask = nullptr;
6659 llvm::IntegerType *IntType = nullptr;
6660
6661 BuiltinAlignArgs(const CallExpr *E, CodeGenFunction &CGF) {
6662 QualType AstType = E->getArg(0)->getType();
6663 if (AstType->isArrayType())
6664 Src = CGF.EmitArrayToPointerDecay(E->getArg(0)).emitRawPointer(CGF);
6665 else
6666 Src = CGF.EmitScalarExpr(E->getArg(0));
6667 SrcType = Src->getType();
6668 if (SrcType->isPointerTy()) {
6669 IntType = IntegerType::get(
6670 CGF.getLLVMContext(),
6671 CGF.CGM.getDataLayout().getIndexTypeSizeInBits(SrcType));
6672 } else {
6673 assert(SrcType->isIntegerTy());
6674 IntType = cast<llvm::IntegerType>(SrcType);
6675 }
6676 Alignment = CGF.EmitScalarExpr(E->getArg(1));
6677 Alignment = CGF.Builder.CreateZExtOrTrunc(Alignment, IntType, "alignment");
6678 auto *One = llvm::ConstantInt::get(IntType, 1);
6679 Mask = CGF.Builder.CreateSub(Alignment, One, "mask");
6680 }
6681};
6682} // namespace
6683
6684/// Generate (x & (y-1)) == 0.
6686 BuiltinAlignArgs Args(E, *this);
6687 llvm::Value *SrcAddress = Args.Src;
6688 if (Args.SrcType->isPointerTy())
6689 SrcAddress =
6690 Builder.CreateBitOrPointerCast(Args.Src, Args.IntType, "src_addr");
6691 return RValue::get(Builder.CreateICmpEQ(
6692 Builder.CreateAnd(SrcAddress, Args.Mask, "set_bits"),
6693 llvm::Constant::getNullValue(Args.IntType), "is_aligned"));
6694}
6695
6696/// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
6697/// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
6698/// llvm.ptrmask intrinsic (with a GEP before in the align_up case).
6700 BuiltinAlignArgs Args(E, *this);
6701 llvm::Value *SrcForMask = Args.Src;
6702 if (AlignUp) {
6703 // When aligning up we have to first add the mask to ensure we go over the
6704 // next alignment value and then align down to the next valid multiple.
6705 // By adding the mask, we ensure that align_up on an already aligned
6706 // value will not change the value.
6707 if (Args.Src->getType()->isPointerTy()) {
6708 if (getLangOpts().PointerOverflowDefined)
6709 SrcForMask =
6710 Builder.CreateGEP(Int8Ty, SrcForMask, Args.Mask, "over_boundary");
6711 else
6712 SrcForMask = EmitCheckedInBoundsGEP(Int8Ty, SrcForMask, Args.Mask,
6713 /*SignedIndices=*/true,
6714 /*isSubtraction=*/false,
6715 E->getExprLoc(), "over_boundary");
6716 } else {
6717 SrcForMask = Builder.CreateAdd(SrcForMask, Args.Mask, "over_boundary");
6718 }
6719 }
6720 // Invert the mask to only clear the lower bits.
6721 llvm::Value *InvertedMask = Builder.CreateNot(Args.Mask, "inverted_mask");
6722 llvm::Value *Result = nullptr;
6723 if (Args.Src->getType()->isPointerTy()) {
6724 Result = Builder.CreateIntrinsic(
6725 Intrinsic::ptrmask, {Args.SrcType, Args.IntType},
6726 {SrcForMask, InvertedMask}, nullptr, "aligned_result");
6727 } else {
6728 Result = Builder.CreateAnd(SrcForMask, InvertedMask, "aligned_result");
6729 }
6730 assert(Result->getType() == Args.SrcType);
6731 return RValue::get(Result);
6732}
#define V(N, I)
static char bitActionToX86BTCode(BitTest::ActionKind A)
static Value * EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering SuccessOrdering)
static void emitSincosBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static CanQualType getOSLogArgType(ASTContext &C, int Size)
Get the argument type for arguments to os_log_helper.
static Value * EmitOverflowCheckedAbs(CodeGenFunction &CGF, const CallExpr *E, bool SanitizeOverflow)
static llvm::Value * EmitBitCountExpr(CodeGenFunction &CGF, const Expr *E)
static Value * tryUseTestFPKind(CodeGenFunction &CGF, unsigned BuiltinID, Value *V)
static bool areBOSTypesCompatible(int From, int To)
Checks if using the result of __builtin_object_size(p, From) in place of __builtin_object_size(p,...
static std::pair< llvm::Value *, llvm::Value * > GetCountFieldAndIndex(CodeGenFunction &CGF, const MemberExpr *ME, const FieldDecl *ArrayFD, const FieldDecl *CountFD, const Expr *Idx, llvm::IntegerType *ResType, bool IsSigned)
Value * EmitFromInt(CodeGenFunction &CGF, llvm::Value *V, QualType T, llvm::Type *ResultType)
static bool TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty, llvm::SmallPtrSetImpl< const Decl * > &Seen)
Value * MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E, bool ReturnBool, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering)
Utility to insert an atomic cmpxchg instruction.
static Value * EmitAtomicIncrementValue(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)
static RValue EmitMSVCRTSetJmp(CodeGenFunction &CGF, MSVCSetJmpKind SJKind, const CallExpr *E)
MSVC handles setjmp a bit differently on different platforms.
#define MUTATE_LDBL(func)
static Value * emitMaybeConstrainedFPToIntRoundBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static bool TypeRequiresBuiltinLaunder(CodeGenModule &CGM, QualType Ty)
Determine if the specified type requires laundering by checking if it is a dynamic class type or cont...
static Value * EmitISOVolatileLoad(CodeGenFunction &CGF, const CallExpr *E)
static Value * EmitTargetArchBuiltinExpr(CodeGenFunction *CGF, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
Definition CGBuiltin.cpp:72
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, Instruction::BinaryOps Op, bool Invert=false)
Utility to insert an atomic instruction based Intrinsic::ID and the expression node,...
static bool HasNoIndirectArgumentsOrResults(CGFunctionInfo const &FnInfo)
Checks no arguments or results are passed indirectly in the ABI (i.e.
Value * EmitToInt(CodeGenFunction &CGF, llvm::Value *V, QualType T, llvm::IntegerType *IntType)
Emit the conversions required to turn the given value into an integer of the given size.
static Value * emitBinaryExpMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID, Intrinsic::ID ConstrainedIntrinsicID)
static llvm::Value * EmitBitTestIntrinsic(CodeGenFunction &CGF, unsigned BuiltinID, const CallExpr *E)
Emit a _bittest* intrinsic.
static Value * EmitSignBit(CodeGenFunction &CGF, Value *V)
Emit the computation of the sign bit for a floating point value.
static Value * EmitFAbs(CodeGenFunction &CGF, Value *V)
EmitFAbs - Emit a call to @llvm.fabs().
static llvm::Value * EmitPositiveResultOrZero(CodeGenFunction &CGF, llvm::Value *Res, llvm::Value *Index, llvm::IntegerType *ResType, bool IsSigned)
static bool shouldEmitBuiltinAsIR(unsigned BuiltinID, const Builtin::Context &BI, const CodeGenFunction &CGF)
Some builtins do not have library implementation on some targets and are instead emitted as LLVM IRs ...
Definition CGBuiltin.cpp:48
static bool isSpecialUnsignedMultiplySignedResult(unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, WidthAndSignedness ResultInfo)
static llvm::Value * getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType)
static Value * emitTernaryMaybeConstrainedFPBuiltin(CodeGenFunction &CGF, const CallExpr *E, unsigned IntrinsicID, unsigned ConstrainedIntrinsicID)
static RValue EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, const clang::Expr *Op2, WidthAndSignedness Op2Info, const clang::Expr *ResultArg, QualType ResultQTy, WidthAndSignedness ResultInfo)
Emit a checked mixed-sign multiply.
static unsigned mutateLongDoubleBuiltin(unsigned BuiltinID)
static RValue EmitBinaryAtomic(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E)
static void initializeAlloca(CodeGenFunction &CGF, AllocaInst *AI, Value *Size, Align AlignmentInBytes)
static Value * EmitAtomicCmpXchgForMSIntrin(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering SuccessOrdering=AtomicOrdering::SequentiallyConsistent)
This function should be invoked to emit atomic cmpxchg for Microsoft's _InterlockedCompareExchange* i...
static bool isSpecialMixedSignMultiply(unsigned BuiltinID, WidthAndSignedness Op1Info, WidthAndSignedness Op2Info, WidthAndSignedness ResultInfo)
Determine if a binop is a checked mixed-sign multiply we can specialize.
static Value * emitFrexpBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static llvm::Value * emitModfBuiltin(CodeGenFunction &CGF, const CallExpr *E, Intrinsic::ID IntrinsicID)
static Value * EmitNontemporalStore(CodeGenFunction &CGF, const CallExpr *E)
static const FieldDecl * FindFlexibleArrayMemberField(CodeGenFunction &CGF, ASTContext &Ctx, const RecordDecl *RD)
Find a struct's flexible array member.
static Value * EmitISOVolatileStore(CodeGenFunction &CGF, const CallExpr *E)
static RValue EmitHipStdParUnsupportedBuiltin(CodeGenFunction *CGF, const FunctionDecl *FD)
static llvm::Value * EmitX86BitTestIntrinsic(CodeGenFunction &CGF, BitTest BT, const CallExpr *E, Value *BitBase, Value *BitPos)
static RValue EmitCheckedUnsignedMultiplySignedResult(CodeGenFunction &CGF, const clang::Expr *Op1, WidthAndSignedness Op1Info, const clang::Expr *Op2, WidthAndSignedness Op2Info, const clang::Expr *ResultArg, QualType ResultQTy, WidthAndSignedness ResultInfo)
Address CheckAtomicAlignment(CodeGenFunction &CGF, const CallExpr *E)
static Value * EmitNontemporalLoad(CodeGenFunction &CGF, const CallExpr *E)
static llvm::AtomicOrdering getBitTestAtomicOrdering(BitTest::InterlockingKind I)
static bool GetFieldOffset(ASTContext &Ctx, const RecordDecl *RD, const FieldDecl *FD, int64_t &Offset)
Calculate the offset of a struct field.
Value * MakeBinaryAtomicValue(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, AtomicOrdering Ordering)
Utility to insert an atomic instruction based on Intrinsic::ID and the expression node.
llvm::Value * EmitOverflowIntrinsic(CodeGenFunction &CGF, const Intrinsic::ID IntrinsicID, llvm::Value *X, llvm::Value *Y, llvm::Value *&Carry)
Emit a call to llvm.
static Value * EmitAbs(CodeGenFunction &CGF, Value *ArgValue, bool HasNSW)
static Value * EmitAtomicDecrementValue(CodeGenFunction &CGF, const CallExpr *E, AtomicOrdering Ordering=AtomicOrdering::SequentiallyConsistent)
llvm::Value * emitBuiltinWithOneOverloadedType(clang::CodeGen::CodeGenFunction &CGF, const clang::CallExpr *E, unsigned IntrinsicID, llvm::StringRef Name="")
Definition CGBuiltin.h:63
static mlir::Value emitBinaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitUnaryMaybeConstrainedFPBuiltin(CIRGenFunction &cgf, const CallExpr &e)
static RValue emitLibraryCall(CIRGenFunction &cgf, const FunctionDecl *fd, const CallExpr *e, mlir::Operation *calleeValue)
static WidthAndSignedness getIntegerWidthAndSignedness(const clang::ASTContext &astContext, const clang::QualType type)
static struct WidthAndSignedness EncompassingIntegerType(ArrayRef< struct WidthAndSignedness > types)
TokenType getType() const
Returns the token's type, e.g.
FormatToken * Next
The next token in the unwrapped line.
#define X(type, name)
Definition Value.h:97
static unsigned getCharWidth(tok::TokenKind kind, const TargetInfo &Target)
llvm::MachO::Record Record
Definition MachO.h:31
static StringRef getTriple(const Command &Job)
SanitizerHandler
static QualType getPointeeType(const MemRegion *R)
__DEVICE__ float modf(float __x, float *__iptr)
__DEVICE__ double nan(const char *)
APSInt & getInt()
Definition APValue.h:508
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:226
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
CanQualType VoidPtrTy
IdentifierTable & Idents
Definition ASTContext.h:798
Builtin::Context & BuiltinInfo
Definition ASTContext.h:800
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, const Expr *SizeExpr, ArraySizeModifier ASM, unsigned IndexTypeQuals) const
Return the unique reference to the type for a constant array of the specified element type.
QualType getBaseElementType(const ArrayType *VAT) const
Return the innermost element type of an array type.
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
CanQualType VoidTy
QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error, unsigned *IntegerConstantArgs=nullptr) const
Return the type for the specified builtin.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:917
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
unsigned getTargetAddressSpace(LangAS AS) const
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
@ GE_None
No error.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
QualType getElementType() const
Definition TypeBase.h:3784
static std::unique_ptr< AtomicScopeModel > create(AtomicScopeModelKind K)
Create an atomic scope model by AtomicScopeModelKind.
Definition SyncScope.h:298
static bool isCommaOp(Opcode Opc)
Definition Expr.h:4144
Expr * getRHS() const
Definition Expr.h:4093
Holds information about both target-independent and target-specific builtins, allowing easy queries b...
Definition Builtins.h:235
bool shouldGenerateFPMathIntrinsic(unsigned BuiltinID, llvm::Triple Trip, std::optional< bool > ErrnoOverwritten, bool MathErrnoEnabled, bool HasOptNoneAttr, bool IsOptimizationEnabled) const
Determine whether we can generate LLVM intrinsics for the given builtin ID, based on whether it has s...
Definition Builtins.cpp:225
bool isConstWithoutErrnoAndExceptions(unsigned ID) const
Return true if this function has no side effects and doesn't read memory, except for possibly errno o...
Definition Builtins.h:412
std::string getName(unsigned ID) const
Return the identifier name for the specified builtin, e.g.
Definition Builtins.cpp:80
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2946
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3150
bool hasStoredFPFeatures() const
Definition Expr.h:3105
SourceLocation getBeginLoc() const
Definition Expr.h:3280
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3129
Expr * getCallee()
Definition Expr.h:3093
FPOptionsOverride getFPFeatures() const
Definition Expr.h:3245
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3137
arg_range arguments()
Definition Expr.h:3198
CastKind getCastKind() const
Definition Expr.h:3723
Expr * getSubExpr()
Definition Expr.h:3729
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
ABIArgInfo - Helper class to encapsulate information about how a specific C type should be passed to ...
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
CharUnits getAlignment() const
Definition Address.h:194
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
Address withElementType(llvm::Type *ElemTy) const
Return address with different element type, but same pointer and alignment.
Definition Address.h:276
Address withAlignment(CharUnits NewAlignment) const
Return address with different alignment, but same pointer and element type.
Definition Address.h:269
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition Address.h:204
A scoped helper to set the current debug location to the specified location or preferred location of ...
static ApplyDebugLocation CreateArtificial(CodeGenFunction &CGF)
Apply TemporaryLocation if it is valid.
static ApplyDebugLocation CreateEmpty(CodeGenFunction &CGF)
Set the IRBuilder to not attach debug locations.
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition CGBuilder.h:146
llvm::StoreInst * CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr, CharUnits Align, bool IsVolatile=false)
Definition CGBuilder.h:153
llvm::AtomicRMWInst * CreateAtomicRMW(llvm::AtomicRMWInst::BinOp Op, Address Addr, llvm::Value *Val, llvm::AtomicOrdering Ordering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition CGBuilder.h:190
llvm::CallInst * CreateMemSet(Address Dest, llvm::Value *Value, llvm::Value *Size, bool IsVolatile=false)
Definition CGBuilder.h:408
llvm::AtomicCmpXchgInst * CreateAtomicCmpXchg(Address Addr, llvm::Value *Cmp, llvm::Value *New, llvm::AtomicOrdering SuccessOrdering, llvm::AtomicOrdering FailureOrdering, llvm::SyncScope::ID SSID=llvm::SyncScope::System)
Definition CGBuilder.h:179
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition CGBuilder.h:118
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition CGBuilder.h:138
Address CreateInBoundsGEP(Address Addr, ArrayRef< llvm::Value * > IdxList, llvm::Type *ElementType, CharUnits Align, const Twine &Name="")
Definition CGBuilder.h:356
All available information about a concrete callee.
Definition CGCall.h:63
static CGCallee forDirect(llvm::Constant *functionPtr, const CGCalleeInfo &abstractInfo=CGCalleeInfo())
Definition CGCall.h:137
llvm::DILocation * CreateTrapFailureMessageFor(llvm::DebugLoc TrapLocation, StringRef Category, StringRef FailureMsg)
Create a debug location from TrapLocation that adds an artificial inline frame where the frame name i...
CGFunctionInfo - Class to encapsulate the information about a function definition.
MutableArrayRef< ArgInfo > arguments()
llvm::Value * getPipeElemAlign(const Expr *PipeArg)
llvm::Value * getPipeElemSize(const Expr *PipeArg)
llvm::StructType * getLLVMType() const
Return the "complete object" LLVM type associated with this record.
CallArgList - Type for representing both the value and type of arguments in a call.
Definition CGCall.h:274
void add(RValue rvalue, QualType type)
Definition CGCall.h:302
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
RValue EmitAMDGPUDevicePrintfCallExpr(const CallExpr *E)
llvm::Value * GetVTablePtr(Address This, llvm::Type *VTableTy, const CXXRecordDecl *VTableClass, VTableAuthMode AuthMode=VTableAuthMode::Authenticate)
GetVTablePtr - Return the Value of the vtable pointer member pointed to by This.
Definition CGClass.cpp:2812
RValue EmitNVPTXDevicePrintfCallExpr(const CallExpr *E)
RValue EmitCoroutineIntrinsic(const CallExpr *E, unsigned int IID)
llvm::Value * performAddrSpaceCast(llvm::Value *Src, llvm::Type *DestTy)
llvm::Value * EmitScalarOrConstFoldImmArg(unsigned ICEArguments, unsigned Idx, const CallExpr *E)
SanitizerSet SanOpts
Sanitizers enabled for this function.
void checkTargetFeatures(const CallExpr *E, const FunctionDecl *TargetDecl)
llvm::Value * GetCountedByFieldExprGEP(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Definition CGExpr.cpp:1193
llvm::Type * ConvertType(QualType T)
void addInstToNewSourceAtom(llvm::Instruction *KeyInstruction, llvm::Value *Backup)
Add KeyInstruction and an optional Backup instruction to a new atom group (See ApplyAtomGroup for mor...
BuiltinCheckKind
Specifies which type of sanitizer check to apply when handling a particular builtin.
llvm::Value * EmitSystemZBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition SystemZ.cpp:39
llvm::CallBase * EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee, ArrayRef< llvm::Value * > args, const Twine &name="")
Emits a call or invoke instruction to the given runtime function.
Definition CGCall.cpp:5187
llvm::Value * EmitSEHAbnormalTermination()
llvm::Value * EmitARCRetain(QualType type, llvm::Value *value)
Produce the code to do a retain.
Definition CGObjC.cpp:2355
CleanupKind getARCCleanupKind()
Retrieves the default cleanup kind for an ARC cleanup.
llvm::Value * EmitVAStartEnd(llvm::Value *ArgValue, bool IsStart)
Emits a call to an LLVM variable-argument intrinsic, either llvm.va_start or llvm....
llvm::Value * EmitAMDGPUBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition AMDGPU.cpp:521
llvm::Constant * EmitCheckSourceLocation(SourceLocation Loc)
Emit a description of a source location in a format suitable for passing to a runtime sanitizer handl...
Definition CGExpr.cpp:4001
void SetSqrtFPAccuracy(llvm::Value *Val)
Set the minimum required accuracy of the given sqrt operation based on CodeGenOpts.
Definition CGExpr.cpp:7170
RValue emitBuiltinOSLogFormat(const CallExpr &E)
Emit IR for __builtin_os_log_format.
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
llvm::Function * generateBuiltinOSLogHelperFunction(const analyze_os_log::OSLogBufferLayout &Layout, CharUnits BufferAlignment)
const LangOptions & getLangOpts() const
LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Address makeNaturalAddressForPointer(llvm::Value *Ptr, QualType T, CharUnits Alignment=CharUnits::Zero(), bool ForPointeeType=false, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
Construct an address with the natural alignment of T.
TypeCheckKind
Situations in which we might emit a check for the suitability of a pointer or glvalue.
@ TCK_Store
Checking the destination of a store. Must be suitably sized and aligned.
@ TCK_Load
Checking the operand of a load. Must be suitably sized and aligned.
llvm::Value * EmitRISCVBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
Definition RISCV.cpp:1073
llvm::Value * EmitCheckedArgForBuiltin(const Expr *E, BuiltinCheckKind Kind)
Emits an argument for a call to a builtin.
llvm::Constant * EmitCheckTypeDescriptor(QualType T)
Emit a description of a type in a format suitable for passing to a runtime sanitizer handler.
Definition CGExpr.cpp:3891
void EmitNonNullArgCheck(RValue RV, QualType ArgType, SourceLocation ArgLoc, AbstractCallee AC, unsigned ParmNum)
Create a check for a function parameter that may potentially be declared as non-null.
Definition CGCall.cpp:4683
const TargetInfo & getTarget() const
RValue emitRotate(const CallExpr *E, bool IsRotateRight)
llvm::Value * EmitAnnotationCall(llvm::Function *AnnotationFn, llvm::Value *AnnotatedVal, StringRef AnnotationStr, SourceLocation Location, const AnnotateAttr *Attr)
Emit an annotation call (intrinsic).
llvm::Value * EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue, llvm::Triple::ArchType Arch)
Definition ARM.cpp:2141
CGCallee EmitCallee(const Expr *E)
Definition CGExpr.cpp:6532
void pushCleanupAfterFullExpr(CleanupKind Kind, As... A)
Queue a cleanup to be pushed after finishing the current full-expression, potentially with an active ...
llvm::Value * EmitBPFBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition ARM.cpp:7244
bool AlwaysEmitXRayCustomEvents() const
AlwaysEmitXRayCustomEvents - Return true if we must unconditionally emit XRay custom event handling c...
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
LValue EmitAggExprToLValue(const Expr *E)
EmitAggExprToLValue - Emit the computation of the specified expression of aggregate type into a tempo...
llvm::Value * EvaluateExprAsBool(const Expr *E)
EvaluateExprAsBool - Perform the usual unary conversions on the specified expression and compare the ...
Definition CGExpr.cpp:232
llvm::Value * EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition PPC.cpp:205
void EmitCheck(ArrayRef< std::pair< llvm::Value *, SanitizerKind::SanitizerOrdinal > > Checked, SanitizerHandler Check, ArrayRef< llvm::Constant * > StaticArgs, ArrayRef< llvm::Value * > DynamicArgs, const TrapReason *TR=nullptr)
Create a basic block that will either trap or call a handler function in the UBSan runtime with the p...
Definition CGExpr.cpp:4149
bool AlwaysEmitXRayTypedEvents() const
AlwaysEmitXRayTypedEvents - Return true if clang must unconditionally emit XRay typed event handling ...
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
bool EmitLifetimeStart(llvm::Value *Addr)
Emit a lifetime.begin marker if some criteria are satisfied.
Definition CGDecl.cpp:1356
llvm::MDNode * buildAllocToken(QualType AllocType)
Build metadata used by the AllocToken instrumentation.
Definition CGExpr.cpp:1316
llvm::Value * EmitToMemory(llvm::Value *Value, QualType Ty)
EmitToMemory - Change a scalar value from its value representation to its in-memory representation.
Definition CGExpr.cpp:2223
ComplexPairTy EmitComplexExpr(const Expr *E, bool IgnoreReal=false, bool IgnoreImag=false)
EmitComplexExpr - Emit the computation of the specified expression of complex type,...
RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **CallOrInvoke, bool IsMustTail, SourceLocation Loc, bool IsVirtualFunctionPointerThunk=false)
EmitCall - Generate a call of the given function, expecting the given result type,...
Definition CGCall.cpp:5343
const TargetCodeGenInfo & getTargetHooks() const
RValue EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp)
Emit IR for __builtin_align_up/__builtin_align_down.
void EmitLifetimeEnd(llvm::Value *Addr)
Definition CGDecl.cpp:1368
llvm::Value * EmitWebAssemblyBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
bool IsInPreservedAIRegion
True if CodeGen currently emits code inside presereved access index region.
llvm::Value * EmitDirectXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition DirectX.cpp:22
llvm::Value * EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E, llvm::Triple::ArchType Arch)
Definition ARM.cpp:4486
llvm::Value * EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID, const CallExpr *E)
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
const Decl * CurFuncDecl
CurFuncDecl - Holds the Decl for the current outermost non-closure context.
Address EmitArrayToPointerDecay(const Expr *Array, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition CGExpr.cpp:4584
void pushLifetimeExtendedDestroy(CleanupKind kind, Address addr, QualType type, Destroyer *destroyer, bool useEHCleanupForArray)
Definition CGDecl.cpp:2353
llvm::Value * EmitSPIRVBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition SPIR.cpp:22
RValue EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
Address EmitVAListRef(const Expr *E)
RValue GetUndefRValue(QualType Ty)
GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
Definition CGExpr.cpp:1608
RValue EmitBuiltinIsAligned(const CallExpr *E)
Emit IR for __builtin_is_aligned.
RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type, const CallExpr *TheCallExpr, bool IsDelete)
llvm::CallInst * EmitRuntimeCall(llvm::FunctionCallee callee, const Twine &name="")
llvm::Value * EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition Hexagon.cpp:77
CodeGenTypes & getTypes() const
llvm::Value * EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition X86.cpp:793
static TypeEvaluationKind getEvaluationKind(QualType T)
getEvaluationKind - Return the TypeEvaluationKind of QualType T.
void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, LValue LV, QualType Type, SanitizerSet SkippedChecks=SanitizerSet(), llvm::Value *ArraySize=nullptr)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1591
RawAddress CreateMemTemp(QualType T, const Twine &Name="tmp", RawAddress *Alloca=nullptr)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignmen and cas...
Definition CGExpr.cpp:189
llvm::Value * EmitCheckedInBoundsGEP(llvm::Type *ElemTy, llvm::Value *Ptr, ArrayRef< llvm::Value * > IdxList, bool SignedIndices, bool IsSubtraction, SourceLocation Loc, const Twine &Name="")
Same as IRBuilder::CreateInBoundsGEP, but additionally emits a check to detect undefined behavior whe...
Address EmitMSVAListRef(const Expr *E)
Emit a "reference" to a __builtin_ms_va_list; this is always the value of the expression,...
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::CallInst * EmitTrapCall(llvm::Intrinsic::ID IntrID)
Emit a call to trap or debugtrap and attach function attribute "trap-func-name" if specified.
Definition CGExpr.cpp:4569
LValue MakeAddrLValue(Address Addr, QualType T, AlignmentSource Source=AlignmentSource::Type)
void EmitTrapCheck(llvm::Value *Checked, SanitizerHandler CheckHandlerID, bool NoMerge=false, const TrapReason *TR=nullptr)
Create a basic block that will call the trap intrinsic, and emit a conditional branch to it,...
Definition CGExpr.cpp:4491
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
llvm::Value * EmitFromMemory(llvm::Value *Value, QualType Ty)
EmitFromMemory - Change a scalar value from its memory representation to its value representation.
Definition CGExpr.cpp:2257
llvm::Value * EmitCheckedArgForAssume(const Expr *E)
Emits an argument for a call to a __builtin_assume.
llvm::Value * EmitLoadOfCountedByField(const Expr *Base, const FieldDecl *FD, const FieldDecl *CountDecl)
Build an expression accessing the "counted_by" field.
Definition CGExpr.cpp:1247
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
llvm::Value * EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition NVPTX.cpp:428
void EmitUnreachable(SourceLocation Loc)
Emit a reached-unreachable diagnostic if Loc is valid and runtime checking is enabled.
Definition CGExpr.cpp:4479
void ErrorUnsupported(const Stmt *S, const char *Type)
ErrorUnsupported - Print out an error that codegen doesn't support the specified stmt yet.
std::pair< llvm::Value *, llvm::Value * > ComplexPairTy
Address ReturnValue
ReturnValue - The temporary alloca to hold the return value.
LValue EmitLValue(const Expr *E, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitLValue - Emit code to compute a designator that specifies the location of the expression.
Definition CGExpr.cpp:1707
bool ShouldXRayInstrumentFunction() const
ShouldXRayInstrument - Return true if the current function should be instrumented with XRay nop sleds...
llvm::LLVMContext & getLLVMContext()
llvm::Value * EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
EmitTargetBuiltinExpr - Emit the given builtin call.
void emitAlignmentAssumption(llvm::Value *PtrValue, QualType Ty, SourceLocation Loc, SourceLocation AssumptionLoc, llvm::Value *Alignment, llvm::Value *OffsetValue=nullptr)
llvm::Value * EmitHLSLBuiltinExpr(unsigned BuiltinID, const CallExpr *E, ReturnValueSlot ReturnValue)
void EmitARCIntrinsicUse(ArrayRef< llvm::Value * > values)
Given a number of pointers, inform the optimizer that they're being intrinsically used up until this ...
Definition CGObjC.cpp:2194
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition CGStmt.cpp:640
This class organizes the cross-function state that is used while generating LLVM code.
llvm::Module & getModule() const
llvm::FunctionCallee CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false, bool AssumeConvergent=false)
Create or return a runtime function declaration with the specified type and name.
llvm::Constant * getBuiltinLibFunction(const FunctionDecl *FD, unsigned BuiltinID)
Given a builtin id for a function like "__builtin_fabsf", return a Function* for "fabsf".
DiagnosticsEngine & getDiags() const
const LangOptions & getLangOpts() const
const TargetInfo & getTarget() const
const llvm::DataLayout & getDataLayout() const
const llvm::Triple & getTriple() const
void DecorateInstructionWithTBAA(llvm::Instruction *Inst, TBAAAccessInfo TBAAInfo)
DecorateInstructionWithTBAA - Decorate the instruction with a TBAA tag.
TBAAAccessInfo getTBAAAccessInfo(QualType AccessType)
getTBAAAccessInfo - Get TBAA information that describes an access to an object of the given type.
ASTContext & getContext() const
const TargetCodeGenInfo & getTargetCodeGenInfo()
const CodeGenOptions & getCodeGenOpts() const
StringRef getMangledName(GlobalDecl GD)
llvm::LLVMContext & getLLVMContext()
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition CGCall.cpp:1801
const CGRecordLayout & getCGRecordLayout(const RecordDecl *)
getCGRecordLayout - Return record layout info for the given record decl.
llvm::Constant * emitAbstract(const Expr *E, QualType T)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
FunctionArgList - Type for representing both the decl and type of parameters to a function.
Definition CGCall.h:375
LValue - This represents an lvalue references.
Definition CGValue.h:183
llvm::Value * getPointer(CodeGenFunction &CGF) const
Address getAddress() const
Definition CGValue.h:373
RValue - This trivial value class is used to represent the result of an expression that is evaluated.
Definition CGValue.h:42
static RValue getIgnored()
Definition CGValue.h:94
static RValue get(llvm::Value *V)
Definition CGValue.h:99
static RValue getAggregate(Address addr, bool isVolatile=false)
Convert an Address to an RValue.
Definition CGValue.h:126
static RValue getComplex(llvm::Value *V1, llvm::Value *V2)
Definition CGValue.h:109
An abstract representation of an aligned address.
Definition Address.h:42
static RawAddress invalid()
Definition Address.h:61
ReturnValueSlot - Contains the address where the return value of a function can be stored,...
Definition CGCall.h:379
virtual bool supportsLibCall() const
supportsLibCall - Query to whether or not target supports all lib calls.
Definition TargetInfo.h:78
virtual llvm::Value * encodeReturnAddress(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Performs the code-generation required to convert the address of an instruction into a return address ...
Definition TargetInfo.h:176
virtual llvm::Value * decodeReturnAddress(CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const
Performs the code-generation required to convert a return address as stored by the system into the ac...
Definition TargetInfo.h:166
virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const
Determines the DWARF register number for the stack pointer, for exception-handling purposes.
Definition TargetInfo.h:148
virtual llvm::Value * testFPKind(llvm::Value *V, unsigned BuiltinID, CGBuilderTy &Builder, CodeGenModule &CGM) const
Performs a target specific test of a floating point value for things like IsNaN, Infinity,...
Definition TargetInfo.h:185
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3325
Represents a concrete matrix type with constant number of rows and columns.
Definition TypeBase.h:4437
Represents a sugar type with __counted_by or __sized_by annotations, including their _or_null variant...
Definition TypeBase.h:3486
DynamicCountPointerKind getKind() const
Definition TypeBase.h:3516
static bool isFlexibleArrayMemberLike(const ASTContext &Context, const Decl *D, QualType Ty, LangOptions::StrictFlexArraysLevelKind StrictFlexArraysLevel, bool IgnoreTemplateOrMacroSubstitution)
Whether it resembles a flexible array member.
Definition DeclBase.cpp:460
bool isImplicit() const
isImplicit - Indicates whether the declaration was implicitly generated by the implementation.
Definition DeclBase.h:601
FunctionDecl * getAsFunction() LLVM_READONLY
Returns the function itself, or the templated function if this is a function template.
Definition DeclBase.cpp:273
bool hasAttr() const
Definition DeclBase.h:585
Concrete class used by the front-end to report problems and issues.
Definition Diagnostic.h:232
DiagnosticBuilder Report(SourceLocation Loc, unsigned DiagID)
Issue the message to the client.
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
Expr * IgnoreParenNoopCasts(const ASTContext &Ctx) LLVM_READONLY
Skip past any parentheses and casts which do not change the value (including ptr->int casts of the sa...
Definition Expr.cpp:3117
Expr * IgnoreParenCasts() LLVM_READONLY
Skip past any parentheses and casts which might surround this expression until reaching a fixed point...
Definition Expr.cpp:3095
llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const
EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded integer.
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3090
bool EvaluateAsFloat(llvm::APFloat &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsFloat - Return true if this is a constant which we can fold and convert to a floating point...
bool isPRValue() const
Definition Expr.h:285
@ NPC_ValueDependentIsNotNull
Specifies that a value-dependent expression should be considered to never be a null pointer constant.
Definition Expr.h:838
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
bool HasSideEffects(const ASTContext &Ctx, bool IncludePossibleEffects=true) const
HasSideEffects - This routine returns true for all those expressions which have any effect other than...
Definition Expr.cpp:3688
std::optional< std::string > tryEvaluateString(ASTContext &Ctx) const
If the current Expr can be evaluated to a pointer to a null-terminated constant string,...
Expr * IgnoreImpCasts() LLVM_READONLY
Skip past any implicit casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3070
NullPointerConstantKind isNullPointerConstant(ASTContext &Ctx, NullPointerConstantValueDependence NPC) const
isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to a Null pointer constant.
Definition Expr.cpp:4068
std::optional< uint64_t > tryEvaluateObjectSize(const ASTContext &Ctx, unsigned Type) const
If the current Expr is a pointer, this will try to statically determine the number of bytes available...
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:277
QualType getType() const
Definition Expr.h:144
const ValueDecl * getAsBuiltinConstantDeclRef(const ASTContext &Context) const
If this expression is an unambiguous reference to a single declaration, in the style of __builtin_fun...
Definition Expr.cpp:226
Represents difference between two FPOptions values.
LangOptions::FPExceptionModeKind getExceptionMode() const
Represents a member of a struct/union/class.
Definition Decl.h:3175
const FieldDecl * findCountedByField() const
Find the FieldDecl specified in a FAM's "counted_by" attribute.
Definition Decl.cpp:4857
Represents a function declaration or definition.
Definition Decl.h:2015
const ParmVarDecl * getParamDecl(unsigned i) const
Definition Decl.h:2812
unsigned getBuiltinID(bool ConsiderWrapperFunctions=false) const
Returns a value indicating whether this function corresponds to a builtin function.
Definition Decl.cpp:3764
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5357
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
IdentifierInfo & get(StringRef Name)
Return the identifier token info for the specified named identifier.
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition Decl.cpp:5604
@ FPE_Ignore
Assume that floating-point exceptions are masked.
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3367
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3450
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
std::string getNameAsString() const
Get a human-readable name for the declaration, even if it is one of the special kinds of names (C++ c...
Definition Decl.h:317
const Expr * getSubExpr() const
Definition Expr.h:2202
PipeType - OpenCL20.
Definition TypeBase.h:8249
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3378
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isVolatileQualified() const
Determine whether this type is volatile-qualified.
Definition TypeBase.h:8515
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8557
Represents a struct/union/class.
Definition Decl.h:4342
field_range fields() const
Definition Decl.h:4545
Scope - A scope is a transient data structure that is used while parsing the program.
Definition Scope.h:41
Encodes a location in the source.
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:355
bool isUnion() const
Definition Decl.h:3943
Exposes information about the current target.
Definition TargetInfo.h:227
const llvm::Triple & getTriple() const
Returns the target triple of the primary target.
bool isBigEndian() const
virtual bool checkArithmeticFenceSupported() const
Controls if __arithmetic_fence is supported in the targeted backend.
unsigned getSuitableAlign() const
Return the alignment that is the largest alignment ever used for any scalar/SIMD data type on the tar...
Definition TargetInfo.h:748
virtual std::string_view getClobbers() const =0
Returns a string of target-specific clobbers, in LLVM format.
The base class of the type hierarchy.
Definition TypeBase.h:1866
bool isBlockPointerType() const
Definition TypeBase.h:8688
bool isVoidType() const
Definition TypeBase.h:9034
bool isSignedIntegerType() const
Return true if this is an integer type that is signed, according to C99 6.2.5p4 [char,...
Definition Type.cpp:2231
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isArrayType() const
Definition TypeBase.h:8767
bool isCountAttributedType() const
Definition Type.cpp:743
bool isPointerType() const
Definition TypeBase.h:8668
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:9078
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9328
const CXXRecordDecl * getPointeeCXXRecordDecl() const
If this is a pointer or reference to a RecordType, return the CXXRecordDecl that the type refers to.
Definition Type.cpp:1923
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:754
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9261
Expr * getSubExpr() const
Definition Expr.h:2288
QualType getType() const
Definition Decl.h:723
QualType getType() const
Definition Value.cpp:237
Represents a GCC generic vector type.
Definition TypeBase.h:4225
QualType getElementType() const
Definition TypeBase.h:4239
SmallVector< OSLogBufferItem, 4 > Items
Definition OSLog.h:113
unsigned char getNumArgsByte() const
Definition OSLog.h:148
unsigned char getSummaryByte() const
Definition OSLog.h:139
Defines the clang::TargetInfo interface.
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
Definition CGValue.h:155
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
Definition CGValue.h:146
llvm::Constant * initializationPatternFor(CodeGenModule &, llvm::Type *)
TypeEvaluationKind
The kind of evaluation to perform on values of a particular type.
@ EHCleanup
Denotes a cleanup that should run when a scope is exited using exceptional control flow (a throw stat...
constexpr XRayInstrMask Typed
Definition XRayInstr.h:42
constexpr XRayInstrMask Custom
Definition XRayInstr.h:41
bool computeOSLogBufferLayout(clang::ASTContext &Ctx, const clang::CallExpr *E, OSLogBufferLayout &layout)
Definition OSLog.cpp:192
bool Mul(InterpState &S, CodePtr OpPC)
Definition Interp.h:381
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ Success
Annotation was successful.
Definition Parser.h:65
@ Vector
'vector' clause, allowed on 'loop', Combined, and 'routine' directives.
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Expr * Cond
};
@ Asm
Assembly: we accept this only so that we can preprocess it.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
SyncScope
Defines sync scope values used internally by clang.
Definition SyncScope.h:42
llvm::StringRef getAsString(SyncScope S)
Definition SyncScope.h:62
U cast(CodeGen::Address addr)
Definition Address.h:327
@ Other
Other implicit parameter.
Definition Decl.h:1761
long int64_t
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:648
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:650
void clear(SanitizerMask K=SanitizerKind::All)
Disable the sanitizers specified in K.
Definition Sanitizers.h:195
void set(SanitizerMask K, bool Value)
Enable or disable a certain (single) sanitizer.
Definition Sanitizers.h:187
#define sinh(__x)
Definition tgmath.h:373
#define asin(__x)
Definition tgmath.h:112
#define scalbln(__x, __y)
Definition tgmath.h:1182
#define sqrt(__x)
Definition tgmath.h:520
#define acos(__x)
Definition tgmath.h:83
#define fmin(__x, __y)
Definition tgmath.h:780
#define exp(__x)
Definition tgmath.h:431
#define ilogb(__x)
Definition tgmath.h:851
#define copysign(__x, __y)
Definition tgmath.h:618
#define erf(__x)
Definition tgmath.h:636
#define atanh(__x)
Definition tgmath.h:228
#define remquo(__x, __y, __z)
Definition tgmath.h:1111
#define nextafter(__x, __y)
Definition tgmath.h:1055
#define frexp(__x, __y)
Definition tgmath.h:816
#define asinh(__x)
Definition tgmath.h:199
#define erfc(__x)
Definition tgmath.h:653
#define atan2(__x, __y)
Definition tgmath.h:566
#define nexttoward(__x, __y)
Definition tgmath.h:1073
#define hypot(__x, __y)
Definition tgmath.h:833
#define exp2(__x)
Definition tgmath.h:670
#define sin(__x)
Definition tgmath.h:286
#define cbrt(__x)
Definition tgmath.h:584
#define log2(__x)
Definition tgmath.h:970
#define llround(__x)
Definition tgmath.h:919
#define cosh(__x)
Definition tgmath.h:344
#define trunc(__x)
Definition tgmath.h:1216
#define fmax(__x, __y)
Definition tgmath.h:762
#define ldexp(__x, __y)
Definition tgmath.h:868
#define acosh(__x)
Definition tgmath.h:170
#define tgamma(__x)
Definition tgmath.h:1199
#define scalbn(__x, __y)
Definition tgmath.h:1165
#define round(__x)
Definition tgmath.h:1148
#define fmod(__x, __y)
Definition tgmath.h:798
#define llrint(__x)
Definition tgmath.h:902
#define tan(__x)
Definition tgmath.h:315
#define cos(__x)
Definition tgmath.h:257
#define log10(__x)
Definition tgmath.h:936
#define fabs(__x)
Definition tgmath.h:549
#define pow(__x, __y)
Definition tgmath.h:490
#define log1p(__x)
Definition tgmath.h:953
#define rint(__x)
Definition tgmath.h:1131
#define expm1(__x)
Definition tgmath.h:687
#define remainder(__x, __y)
Definition tgmath.h:1090
#define fdim(__x, __y)
Definition tgmath.h:704
#define lgamma(__x)
Definition tgmath.h:885
#define tanh(__x)
Definition tgmath.h:402
#define lrint(__x)
Definition tgmath.h:1004
#define atan(__x)
Definition tgmath.h:141
#define floor(__x)
Definition tgmath.h:722
#define ceil(__x)
Definition tgmath.h:601
#define log(__x)
Definition tgmath.h:460
#define logb(__x)
Definition tgmath.h:987
#define nearbyint(__x)
Definition tgmath.h:1038
#define lround(__x)
Definition tgmath.h:1021
#define fma(__x, __y, __z)
Definition tgmath.h:742