clang 23.0.0git
NVPTX.cpp
Go to the documentation of this file.
1//===-------- NVPTX.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGBuiltin.h"
15#include "llvm/IR/IntrinsicsNVPTX.h"
16
17using namespace clang;
18using namespace CodeGen;
19using namespace llvm;
20
21namespace {
22// Helper classes for mapping MMA builtins to particular LLVM intrinsic variant.
23struct NVPTXMmaLdstInfo {
24 unsigned NumResults; // Number of elements to load/store
25 // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported.
26 unsigned IID_col;
27 unsigned IID_row;
28};
29
30#define MMA_INTR(geom_op_type, layout) \
31 Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride
32#define MMA_LDST(n, geom_op_type) \
33 { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) }
34
35static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
36 switch (BuiltinID) {
37 // FP MMA loads
38 case NVPTX::BI__hmma_m16n16k16_ld_a:
39 return MMA_LDST(8, m16n16k16_load_a_f16);
40 case NVPTX::BI__hmma_m16n16k16_ld_b:
41 return MMA_LDST(8, m16n16k16_load_b_f16);
42 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
43 return MMA_LDST(4, m16n16k16_load_c_f16);
44 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
45 return MMA_LDST(8, m16n16k16_load_c_f32);
46 case NVPTX::BI__hmma_m32n8k16_ld_a:
47 return MMA_LDST(8, m32n8k16_load_a_f16);
48 case NVPTX::BI__hmma_m32n8k16_ld_b:
49 return MMA_LDST(8, m32n8k16_load_b_f16);
50 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
51 return MMA_LDST(4, m32n8k16_load_c_f16);
52 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
53 return MMA_LDST(8, m32n8k16_load_c_f32);
54 case NVPTX::BI__hmma_m8n32k16_ld_a:
55 return MMA_LDST(8, m8n32k16_load_a_f16);
56 case NVPTX::BI__hmma_m8n32k16_ld_b:
57 return MMA_LDST(8, m8n32k16_load_b_f16);
58 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
59 return MMA_LDST(4, m8n32k16_load_c_f16);
60 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
61 return MMA_LDST(8, m8n32k16_load_c_f32);
62
63 // Integer MMA loads
64 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
65 return MMA_LDST(2, m16n16k16_load_a_s8);
66 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
67 return MMA_LDST(2, m16n16k16_load_a_u8);
68 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
69 return MMA_LDST(2, m16n16k16_load_b_s8);
70 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
71 return MMA_LDST(2, m16n16k16_load_b_u8);
72 case NVPTX::BI__imma_m16n16k16_ld_c:
73 return MMA_LDST(8, m16n16k16_load_c_s32);
74 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
75 return MMA_LDST(4, m32n8k16_load_a_s8);
76 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
77 return MMA_LDST(4, m32n8k16_load_a_u8);
78 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
79 return MMA_LDST(1, m32n8k16_load_b_s8);
80 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
81 return MMA_LDST(1, m32n8k16_load_b_u8);
82 case NVPTX::BI__imma_m32n8k16_ld_c:
83 return MMA_LDST(8, m32n8k16_load_c_s32);
84 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
85 return MMA_LDST(1, m8n32k16_load_a_s8);
86 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
87 return MMA_LDST(1, m8n32k16_load_a_u8);
88 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
89 return MMA_LDST(4, m8n32k16_load_b_s8);
90 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
91 return MMA_LDST(4, m8n32k16_load_b_u8);
92 case NVPTX::BI__imma_m8n32k16_ld_c:
93 return MMA_LDST(8, m8n32k16_load_c_s32);
94
95 // Sub-integer MMA loads.
96 // Only row/col layout is supported by A/B fragments.
97 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
98 return {1, 0, MMA_INTR(m8n8k32_load_a_s4, row)};
99 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
100 return {1, 0, MMA_INTR(m8n8k32_load_a_u4, row)};
101 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
102 return {1, MMA_INTR(m8n8k32_load_b_s4, col), 0};
103 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
104 return {1, MMA_INTR(m8n8k32_load_b_u4, col), 0};
105 case NVPTX::BI__imma_m8n8k32_ld_c:
106 return MMA_LDST(2, m8n8k32_load_c_s32);
107 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
108 return {1, 0, MMA_INTR(m8n8k128_load_a_b1, row)};
109 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
110 return {1, MMA_INTR(m8n8k128_load_b_b1, col), 0};
111 case NVPTX::BI__bmma_m8n8k128_ld_c:
112 return MMA_LDST(2, m8n8k128_load_c_s32);
113
114 // Double MMA loads
115 case NVPTX::BI__dmma_m8n8k4_ld_a:
116 return MMA_LDST(1, m8n8k4_load_a_f64);
117 case NVPTX::BI__dmma_m8n8k4_ld_b:
118 return MMA_LDST(1, m8n8k4_load_b_f64);
119 case NVPTX::BI__dmma_m8n8k4_ld_c:
120 return MMA_LDST(2, m8n8k4_load_c_f64);
121
122 // Alternate float MMA loads
123 case NVPTX::BI__mma_bf16_m16n16k16_ld_a:
124 return MMA_LDST(4, m16n16k16_load_a_bf16);
125 case NVPTX::BI__mma_bf16_m16n16k16_ld_b:
126 return MMA_LDST(4, m16n16k16_load_b_bf16);
127 case NVPTX::BI__mma_bf16_m8n32k16_ld_a:
128 return MMA_LDST(2, m8n32k16_load_a_bf16);
129 case NVPTX::BI__mma_bf16_m8n32k16_ld_b:
130 return MMA_LDST(8, m8n32k16_load_b_bf16);
131 case NVPTX::BI__mma_bf16_m32n8k16_ld_a:
132 return MMA_LDST(8, m32n8k16_load_a_bf16);
133 case NVPTX::BI__mma_bf16_m32n8k16_ld_b:
134 return MMA_LDST(2, m32n8k16_load_b_bf16);
135 case NVPTX::BI__mma_tf32_m16n16k8_ld_a:
136 return MMA_LDST(4, m16n16k8_load_a_tf32);
137 case NVPTX::BI__mma_tf32_m16n16k8_ld_b:
138 return MMA_LDST(4, m16n16k8_load_b_tf32);
139 case NVPTX::BI__mma_tf32_m16n16k8_ld_c:
140 return MMA_LDST(8, m16n16k8_load_c_f32);
141
142 // NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike
143 // PTX and LLVM IR where stores always use fragment D, NVCC builtins always
144 // use fragment C for both loads and stores.
145 // FP MMA stores.
146 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
147 return MMA_LDST(4, m16n16k16_store_d_f16);
148 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
149 return MMA_LDST(8, m16n16k16_store_d_f32);
150 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
151 return MMA_LDST(4, m32n8k16_store_d_f16);
152 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
153 return MMA_LDST(8, m32n8k16_store_d_f32);
154 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
155 return MMA_LDST(4, m8n32k16_store_d_f16);
156 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
157 return MMA_LDST(8, m8n32k16_store_d_f32);
158
159 // Integer and sub-integer MMA stores.
160 // Another naming quirk. Unlike other MMA builtins that use PTX types in the
161 // name, integer loads/stores use LLVM's i32.
162 case NVPTX::BI__imma_m16n16k16_st_c_i32:
163 return MMA_LDST(8, m16n16k16_store_d_s32);
164 case NVPTX::BI__imma_m32n8k16_st_c_i32:
165 return MMA_LDST(8, m32n8k16_store_d_s32);
166 case NVPTX::BI__imma_m8n32k16_st_c_i32:
167 return MMA_LDST(8, m8n32k16_store_d_s32);
168 case NVPTX::BI__imma_m8n8k32_st_c_i32:
169 return MMA_LDST(2, m8n8k32_store_d_s32);
170 case NVPTX::BI__bmma_m8n8k128_st_c_i32:
171 return MMA_LDST(2, m8n8k128_store_d_s32);
172
173 // Double MMA store
174 case NVPTX::BI__dmma_m8n8k4_st_c_f64:
175 return MMA_LDST(2, m8n8k4_store_d_f64);
176
177 // Alternate float MMA store
178 case NVPTX::BI__mma_m16n16k8_st_c_f32:
179 return MMA_LDST(8, m16n16k8_store_d_f32);
180
181 default:
182 llvm_unreachable("Unknown MMA builtin");
183 }
184}
185#undef MMA_LDST
186#undef MMA_INTR
187
188
189struct NVPTXMmaInfo {
190 unsigned NumEltsA;
191 unsigned NumEltsB;
192 unsigned NumEltsC;
193 unsigned NumEltsD;
194
195 // Variants are ordered by layout-A/layout-B/satf, where 'row' has priority
196 // over 'col' for layout. The index of non-satf variants is expected to match
197 // the undocumented layout constants used by CUDA's mma.hpp.
198 std::array<unsigned, 8> Variants;
199
200 unsigned getMMAIntrinsic(int Layout, bool Satf) {
201 unsigned Index = Layout + 4 * Satf;
202 if (Index >= Variants.size())
203 return 0;
204 return Variants[Index];
205 }
206};
207
208 // Returns an intrinsic that matches Layout and Satf for valid combinations of
209 // Layout and Satf, 0 otherwise.
210static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {
211 // clang-format off
212#define MMA_VARIANTS(geom, type) \
213 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \
214 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
215 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \
216 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type
217#define MMA_SATF_VARIANTS(geom, type) \
218 MMA_VARIANTS(geom, type), \
219 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
220 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
221 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
222 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite
223// Sub-integer MMA only supports row.col layout.
224#define MMA_VARIANTS_I4(geom, type) \
225 0, \
226 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
227 0, \
228 0, \
229 0, \
230 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
231 0, \
232 0
233// b1 MMA does not support .satfinite.
234#define MMA_VARIANTS_B1_XOR(geom, type) \
235 0, \
236 Intrinsic::nvvm_wmma_##geom##_mma_xor_popc_row_col_##type, \
237 0, \
238 0, \
239 0, \
240 0, \
241 0, \
242 0
243#define MMA_VARIANTS_B1_AND(geom, type) \
244 0, \
245 Intrinsic::nvvm_wmma_##geom##_mma_and_popc_row_col_##type, \
246 0, \
247 0, \
248 0, \
249 0, \
250 0, \
251 0
252 // clang-format on
253 switch (BuiltinID) {
254 // FP MMA
255 // Note that 'type' argument of MMA_SATF_VARIANTS uses D_C notation, while
256 // NumEltsN of return value are ordered as A,B,C,D.
257 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
258 return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m16n16k16, f16_f16)}}};
259 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
260 return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m16n16k16, f32_f16)}}};
261 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
262 return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m16n16k16, f16_f32)}}};
263 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
264 return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, f32_f32)}}};
265 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
266 return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m32n8k16, f16_f16)}}};
267 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
268 return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m32n8k16, f32_f16)}}};
269 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
270 return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m32n8k16, f16_f32)}}};
271 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
272 return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, f32_f32)}}};
273 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
274 return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m8n32k16, f16_f16)}}};
275 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
276 return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m8n32k16, f32_f16)}}};
277 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
278 return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m8n32k16, f16_f32)}}};
279 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
280 return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, f32_f32)}}};
281
282 // Integer MMA
283 case NVPTX::BI__imma_m16n16k16_mma_s8:
284 return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, s8)}}};
285 case NVPTX::BI__imma_m16n16k16_mma_u8:
286 return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, u8)}}};
287 case NVPTX::BI__imma_m32n8k16_mma_s8:
288 return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, s8)}}};
289 case NVPTX::BI__imma_m32n8k16_mma_u8:
290 return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, u8)}}};
291 case NVPTX::BI__imma_m8n32k16_mma_s8:
292 return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, s8)}}};
293 case NVPTX::BI__imma_m8n32k16_mma_u8:
294 return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, u8)}}};
295
296 // Sub-integer MMA
297 case NVPTX::BI__imma_m8n8k32_mma_s4:
298 return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32, s4)}}};
299 case NVPTX::BI__imma_m8n8k32_mma_u4:
300 return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32, u4)}}};
301 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
302 return {1, 1, 2, 2, {{MMA_VARIANTS_B1_XOR(m8n8k128, b1)}}};
303 case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1:
304 return {1, 1, 2, 2, {{MMA_VARIANTS_B1_AND(m8n8k128, b1)}}};
305
306 // Double MMA
307 case NVPTX::BI__dmma_m8n8k4_mma_f64:
308 return {1, 1, 2, 2, {{MMA_VARIANTS(m8n8k4, f64)}}};
309
310 // Alternate FP MMA
311 case NVPTX::BI__mma_bf16_m16n16k16_mma_f32:
312 return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k16, bf16)}}};
313 case NVPTX::BI__mma_bf16_m8n32k16_mma_f32:
314 return {2, 8, 8, 8, {{MMA_VARIANTS(m8n32k16, bf16)}}};
315 case NVPTX::BI__mma_bf16_m32n8k16_mma_f32:
316 return {8, 2, 8, 8, {{MMA_VARIANTS(m32n8k16, bf16)}}};
317 case NVPTX::BI__mma_tf32_m16n16k8_mma_f32:
318 return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k8, tf32)}}};
319 default:
320 llvm_unreachable("Unexpected builtin ID.");
321 }
322#undef MMA_VARIANTS
323#undef MMA_SATF_VARIANTS
324#undef MMA_VARIANTS_I4
325#undef MMA_VARIANTS_B1_AND
326#undef MMA_VARIANTS_B1_XOR
327}
328
329static Value *MakeLdu(unsigned IntrinsicID, CodeGenFunction &CGF,
330 const CallExpr *E) {
331 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
332 QualType ArgType = E->getArg(0)->getType();
334 llvm::Type *ElemTy = CGF.ConvertTypeForMem(ArgType->getPointeeType());
335 return CGF.Builder.CreateCall(
336 CGF.CGM.getIntrinsic(IntrinsicID, {ElemTy, Ptr->getType()}),
337 {Ptr, ConstantInt::get(CGF.Builder.getInt32Ty(), Align.getQuantity())});
338}
339
340static Value *MakeLdg(CodeGenFunction &CGF, const CallExpr *E) {
341 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
342 QualType ArgType = E->getArg(0)->getType();
344 llvm::Type *ElemTy = CGF.ConvertTypeForMem(ArgType->getPointeeType());
345
346 // Use addrspace(1) for NVPTX ADDRESS_SPACE_GLOBAL
347 auto *ASC = CGF.Builder.CreateAddrSpaceCast(Ptr, CGF.Builder.getPtrTy(1));
348 auto *LD = CGF.Builder.CreateAlignedLoad(ElemTy, ASC, AlignV.getAsAlign());
349 MDNode *MD = MDNode::get(CGF.Builder.getContext(), {});
350 LD->setMetadata(LLVMContext::MD_invariant_load, MD);
351
352 return LD;
353}
354
355static Value *MakeScopedAtomic(unsigned IntrinsicID, CodeGenFunction &CGF,
356 const CallExpr *E) {
357 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
358 llvm::Type *ElemTy =
360 return CGF.Builder.CreateCall(
361 CGF.CGM.getIntrinsic(IntrinsicID, {ElemTy, Ptr->getType()}),
362 {Ptr, CGF.EmitScalarExpr(E->getArg(1))});
363}
364
365static Value *MakeCpAsync(unsigned IntrinsicID, unsigned IntrinsicIDS,
366 CodeGenFunction &CGF, const CallExpr *E,
367 int SrcSize) {
368 return E->getNumArgs() == 3
369 ? CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IntrinsicIDS),
370 {CGF.EmitScalarExpr(E->getArg(0)),
371 CGF.EmitScalarExpr(E->getArg(1)),
372 CGF.EmitScalarExpr(E->getArg(2))})
373 : CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IntrinsicID),
374 {CGF.EmitScalarExpr(E->getArg(0)),
375 CGF.EmitScalarExpr(E->getArg(1))});
376}
377
378static bool EnsureNativeHalfSupport(unsigned BuiltinID, const CallExpr *E,
379 CodeGenFunction &CGF) {
380 auto &C = CGF.CGM.getContext();
381 if (!C.getLangOpts().NativeHalfType &&
382 C.getTargetInfo().useFP16ConversionIntrinsics()) {
383 CGF.CGM.Error(E->getExprLoc(), C.BuiltinInfo.getQuotedName(BuiltinID) +
384 " requires native half type support.");
385 return false;
386 }
387 return true;
388}
389
390static Value *MakeHalfType(Function *Intrinsic, unsigned BuiltinID,
391 const CallExpr *E, CodeGenFunction &CGF) {
392 if (!EnsureNativeHalfSupport(BuiltinID, E, CGF))
393 return nullptr;
394
396 auto *FTy = Intrinsic->getFunctionType();
397 unsigned ICEArguments = 0;
399 CGF.CGM.getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
400 assert(Error == ASTContext::GE_None && "Should not codegen an error");
401 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
402 assert((ICEArguments & (1 << i)) == 0);
403 auto *ArgValue = CGF.EmitScalarExpr(E->getArg(i));
404 auto *PTy = FTy->getParamType(i);
405 if (PTy != ArgValue->getType())
406 ArgValue = CGF.Builder.CreateBitCast(ArgValue, PTy);
407 Args.push_back(ArgValue);
408 }
409
410 return CGF.Builder.CreateCall(Intrinsic, Args);
411}
412
413static Value *MakeHalfType(unsigned IntrinsicID, unsigned BuiltinID,
414 const CallExpr *E, CodeGenFunction &CGF) {
415 return MakeHalfType(CGF.CGM.getIntrinsic(IntrinsicID), BuiltinID, E, CGF);
416}
417
418static Value *MakeFMAOOB(unsigned IntrinsicID, llvm::Type *Ty,
419 const CallExpr *E, CodeGenFunction &CGF) {
420 return CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IntrinsicID, {Ty}),
421 {CGF.EmitScalarExpr(E->getArg(0)),
422 CGF.EmitScalarExpr(E->getArg(1)),
423 CGF.EmitScalarExpr(E->getArg(2))});
424}
425
426} // namespace
427
429 const CallExpr *E) {
430 switch (BuiltinID) {
431 case NVPTX::BI__nvvm_atom_add_gen_i:
432 case NVPTX::BI__nvvm_atom_add_gen_l:
433 case NVPTX::BI__nvvm_atom_add_gen_ll:
434 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E);
435
436 case NVPTX::BI__nvvm_atom_sub_gen_i:
437 case NVPTX::BI__nvvm_atom_sub_gen_l:
438 case NVPTX::BI__nvvm_atom_sub_gen_ll:
439 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E);
440
441 case NVPTX::BI__nvvm_atom_and_gen_i:
442 case NVPTX::BI__nvvm_atom_and_gen_l:
443 case NVPTX::BI__nvvm_atom_and_gen_ll:
444 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E);
445
446 case NVPTX::BI__nvvm_atom_or_gen_i:
447 case NVPTX::BI__nvvm_atom_or_gen_l:
448 case NVPTX::BI__nvvm_atom_or_gen_ll:
449 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E);
450
451 case NVPTX::BI__nvvm_atom_xor_gen_i:
452 case NVPTX::BI__nvvm_atom_xor_gen_l:
453 case NVPTX::BI__nvvm_atom_xor_gen_ll:
454 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E);
455
456 case NVPTX::BI__nvvm_atom_xchg_gen_i:
457 case NVPTX::BI__nvvm_atom_xchg_gen_l:
458 case NVPTX::BI__nvvm_atom_xchg_gen_ll:
459 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E);
460
461 case NVPTX::BI__nvvm_atom_max_gen_i:
462 case NVPTX::BI__nvvm_atom_max_gen_l:
463 case NVPTX::BI__nvvm_atom_max_gen_ll:
464 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
465
466 case NVPTX::BI__nvvm_atom_max_gen_ui:
467 case NVPTX::BI__nvvm_atom_max_gen_ul:
468 case NVPTX::BI__nvvm_atom_max_gen_ull:
469 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E);
470
471 case NVPTX::BI__nvvm_atom_min_gen_i:
472 case NVPTX::BI__nvvm_atom_min_gen_l:
473 case NVPTX::BI__nvvm_atom_min_gen_ll:
474 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
475
476 case NVPTX::BI__nvvm_atom_min_gen_ui:
477 case NVPTX::BI__nvvm_atom_min_gen_ul:
478 case NVPTX::BI__nvvm_atom_min_gen_ull:
479 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E);
480
481 case NVPTX::BI__nvvm_atom_cas_gen_us:
482 case NVPTX::BI__nvvm_atom_cas_gen_i:
483 case NVPTX::BI__nvvm_atom_cas_gen_l:
484 case NVPTX::BI__nvvm_atom_cas_gen_ll:
485 // __nvvm_atom_cas_gen_* should return the old value rather than the
486 // success flag.
487 return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false);
488
489 case NVPTX::BI__nvvm_atom_add_gen_f:
490 case NVPTX::BI__nvvm_atom_add_gen_d: {
491 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
492 Value *Val = EmitScalarExpr(E->getArg(1));
493
494 return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd, DestAddr, Val,
495 AtomicOrdering::SequentiallyConsistent);
496 }
497
498 case NVPTX::BI__nvvm_atom_inc_gen_ui:
499 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UIncWrap, E);
500
501 case NVPTX::BI__nvvm_atom_dec_gen_ui:
502 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UDecWrap, E);
503
504 case NVPTX::BI__nvvm_ldg_c:
505 case NVPTX::BI__nvvm_ldg_sc:
506 case NVPTX::BI__nvvm_ldg_c2:
507 case NVPTX::BI__nvvm_ldg_sc2:
508 case NVPTX::BI__nvvm_ldg_c4:
509 case NVPTX::BI__nvvm_ldg_sc4:
510 case NVPTX::BI__nvvm_ldg_s:
511 case NVPTX::BI__nvvm_ldg_s2:
512 case NVPTX::BI__nvvm_ldg_s4:
513 case NVPTX::BI__nvvm_ldg_i:
514 case NVPTX::BI__nvvm_ldg_i2:
515 case NVPTX::BI__nvvm_ldg_i4:
516 case NVPTX::BI__nvvm_ldg_l:
517 case NVPTX::BI__nvvm_ldg_l2:
518 case NVPTX::BI__nvvm_ldg_ll:
519 case NVPTX::BI__nvvm_ldg_ll2:
520 case NVPTX::BI__nvvm_ldg_uc:
521 case NVPTX::BI__nvvm_ldg_uc2:
522 case NVPTX::BI__nvvm_ldg_uc4:
523 case NVPTX::BI__nvvm_ldg_us:
524 case NVPTX::BI__nvvm_ldg_us2:
525 case NVPTX::BI__nvvm_ldg_us4:
526 case NVPTX::BI__nvvm_ldg_ui:
527 case NVPTX::BI__nvvm_ldg_ui2:
528 case NVPTX::BI__nvvm_ldg_ui4:
529 case NVPTX::BI__nvvm_ldg_ul:
530 case NVPTX::BI__nvvm_ldg_ul2:
531 case NVPTX::BI__nvvm_ldg_ull:
532 case NVPTX::BI__nvvm_ldg_ull2:
533 case NVPTX::BI__nvvm_ldg_f:
534 case NVPTX::BI__nvvm_ldg_f2:
535 case NVPTX::BI__nvvm_ldg_f4:
536 case NVPTX::BI__nvvm_ldg_d:
537 case NVPTX::BI__nvvm_ldg_d2:
538 // PTX Interoperability section 2.2: "For a vector with an even number of
539 // elements, its alignment is set to number of elements times the alignment
540 // of its member: n*alignof(t)."
541 return MakeLdg(*this, E);
542
543 case NVPTX::BI__nvvm_ldu_c:
544 case NVPTX::BI__nvvm_ldu_sc:
545 case NVPTX::BI__nvvm_ldu_c2:
546 case NVPTX::BI__nvvm_ldu_sc2:
547 case NVPTX::BI__nvvm_ldu_c4:
548 case NVPTX::BI__nvvm_ldu_sc4:
549 case NVPTX::BI__nvvm_ldu_s:
550 case NVPTX::BI__nvvm_ldu_s2:
551 case NVPTX::BI__nvvm_ldu_s4:
552 case NVPTX::BI__nvvm_ldu_i:
553 case NVPTX::BI__nvvm_ldu_i2:
554 case NVPTX::BI__nvvm_ldu_i4:
555 case NVPTX::BI__nvvm_ldu_l:
556 case NVPTX::BI__nvvm_ldu_l2:
557 case NVPTX::BI__nvvm_ldu_ll:
558 case NVPTX::BI__nvvm_ldu_ll2:
559 case NVPTX::BI__nvvm_ldu_uc:
560 case NVPTX::BI__nvvm_ldu_uc2:
561 case NVPTX::BI__nvvm_ldu_uc4:
562 case NVPTX::BI__nvvm_ldu_us:
563 case NVPTX::BI__nvvm_ldu_us2:
564 case NVPTX::BI__nvvm_ldu_us4:
565 case NVPTX::BI__nvvm_ldu_ui:
566 case NVPTX::BI__nvvm_ldu_ui2:
567 case NVPTX::BI__nvvm_ldu_ui4:
568 case NVPTX::BI__nvvm_ldu_ul:
569 case NVPTX::BI__nvvm_ldu_ul2:
570 case NVPTX::BI__nvvm_ldu_ull:
571 case NVPTX::BI__nvvm_ldu_ull2:
572 return MakeLdu(Intrinsic::nvvm_ldu_global_i, *this, E);
573 case NVPTX::BI__nvvm_ldu_f:
574 case NVPTX::BI__nvvm_ldu_f2:
575 case NVPTX::BI__nvvm_ldu_f4:
576 case NVPTX::BI__nvvm_ldu_d:
577 case NVPTX::BI__nvvm_ldu_d2:
578 return MakeLdu(Intrinsic::nvvm_ldu_global_f, *this, E);
579
580 case NVPTX::BI__nvvm_atom_cta_add_gen_i:
581 case NVPTX::BI__nvvm_atom_cta_add_gen_l:
582 case NVPTX::BI__nvvm_atom_cta_add_gen_ll:
583 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta, *this, E);
584 case NVPTX::BI__nvvm_atom_sys_add_gen_i:
585 case NVPTX::BI__nvvm_atom_sys_add_gen_l:
586 case NVPTX::BI__nvvm_atom_sys_add_gen_ll:
587 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys, *this, E);
588 case NVPTX::BI__nvvm_atom_cta_add_gen_f:
589 case NVPTX::BI__nvvm_atom_cta_add_gen_d:
590 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta, *this, E);
591 case NVPTX::BI__nvvm_atom_sys_add_gen_f:
592 case NVPTX::BI__nvvm_atom_sys_add_gen_d:
593 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys, *this, E);
594 case NVPTX::BI__nvvm_atom_cta_xchg_gen_i:
595 case NVPTX::BI__nvvm_atom_cta_xchg_gen_l:
596 case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll:
597 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta, *this, E);
598 case NVPTX::BI__nvvm_atom_sys_xchg_gen_i:
599 case NVPTX::BI__nvvm_atom_sys_xchg_gen_l:
600 case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll:
601 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys, *this, E);
602 case NVPTX::BI__nvvm_atom_cta_max_gen_i:
603 case NVPTX::BI__nvvm_atom_cta_max_gen_ui:
604 case NVPTX::BI__nvvm_atom_cta_max_gen_l:
605 case NVPTX::BI__nvvm_atom_cta_max_gen_ul:
606 case NVPTX::BI__nvvm_atom_cta_max_gen_ll:
607 case NVPTX::BI__nvvm_atom_cta_max_gen_ull:
608 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta, *this, E);
609 case NVPTX::BI__nvvm_atom_sys_max_gen_i:
610 case NVPTX::BI__nvvm_atom_sys_max_gen_ui:
611 case NVPTX::BI__nvvm_atom_sys_max_gen_l:
612 case NVPTX::BI__nvvm_atom_sys_max_gen_ul:
613 case NVPTX::BI__nvvm_atom_sys_max_gen_ll:
614 case NVPTX::BI__nvvm_atom_sys_max_gen_ull:
615 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys, *this, E);
616 case NVPTX::BI__nvvm_atom_cta_min_gen_i:
617 case NVPTX::BI__nvvm_atom_cta_min_gen_ui:
618 case NVPTX::BI__nvvm_atom_cta_min_gen_l:
619 case NVPTX::BI__nvvm_atom_cta_min_gen_ul:
620 case NVPTX::BI__nvvm_atom_cta_min_gen_ll:
621 case NVPTX::BI__nvvm_atom_cta_min_gen_ull:
622 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta, *this, E);
623 case NVPTX::BI__nvvm_atom_sys_min_gen_i:
624 case NVPTX::BI__nvvm_atom_sys_min_gen_ui:
625 case NVPTX::BI__nvvm_atom_sys_min_gen_l:
626 case NVPTX::BI__nvvm_atom_sys_min_gen_ul:
627 case NVPTX::BI__nvvm_atom_sys_min_gen_ll:
628 case NVPTX::BI__nvvm_atom_sys_min_gen_ull:
629 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys, *this, E);
630 case NVPTX::BI__nvvm_atom_cta_inc_gen_ui:
631 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta, *this, E);
632 case NVPTX::BI__nvvm_atom_cta_dec_gen_ui:
633 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta, *this, E);
634 case NVPTX::BI__nvvm_atom_sys_inc_gen_ui:
635 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys, *this, E);
636 case NVPTX::BI__nvvm_atom_sys_dec_gen_ui:
637 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys, *this, E);
638 case NVPTX::BI__nvvm_atom_cta_and_gen_i:
639 case NVPTX::BI__nvvm_atom_cta_and_gen_l:
640 case NVPTX::BI__nvvm_atom_cta_and_gen_ll:
641 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta, *this, E);
642 case NVPTX::BI__nvvm_atom_sys_and_gen_i:
643 case NVPTX::BI__nvvm_atom_sys_and_gen_l:
644 case NVPTX::BI__nvvm_atom_sys_and_gen_ll:
645 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys, *this, E);
646 case NVPTX::BI__nvvm_atom_cta_or_gen_i:
647 case NVPTX::BI__nvvm_atom_cta_or_gen_l:
648 case NVPTX::BI__nvvm_atom_cta_or_gen_ll:
649 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta, *this, E);
650 case NVPTX::BI__nvvm_atom_sys_or_gen_i:
651 case NVPTX::BI__nvvm_atom_sys_or_gen_l:
652 case NVPTX::BI__nvvm_atom_sys_or_gen_ll:
653 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys, *this, E);
654 case NVPTX::BI__nvvm_atom_cta_xor_gen_i:
655 case NVPTX::BI__nvvm_atom_cta_xor_gen_l:
656 case NVPTX::BI__nvvm_atom_cta_xor_gen_ll:
657 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta, *this, E);
658 case NVPTX::BI__nvvm_atom_sys_xor_gen_i:
659 case NVPTX::BI__nvvm_atom_sys_xor_gen_l:
660 case NVPTX::BI__nvvm_atom_sys_xor_gen_ll:
661 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys, *this, E);
662 case NVPTX::BI__nvvm_atom_cta_cas_gen_us:
663 case NVPTX::BI__nvvm_atom_cta_cas_gen_i:
664 case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
665 case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: {
666 Value *Ptr = EmitScalarExpr(E->getArg(0));
667 llvm::Type *ElemTy =
669 return Builder.CreateCall(
670 CGM.getIntrinsic(
671 Intrinsic::nvvm_atomic_cas_gen_i_cta, {ElemTy, Ptr->getType()}),
672 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
673 }
674 case NVPTX::BI__nvvm_atom_sys_cas_gen_us:
675 case NVPTX::BI__nvvm_atom_sys_cas_gen_i:
676 case NVPTX::BI__nvvm_atom_sys_cas_gen_l:
677 case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: {
678 Value *Ptr = EmitScalarExpr(E->getArg(0));
679 llvm::Type *ElemTy =
681 return Builder.CreateCall(
682 CGM.getIntrinsic(
683 Intrinsic::nvvm_atomic_cas_gen_i_sys, {ElemTy, Ptr->getType()}),
684 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
685 }
686 case NVPTX::BI__nvvm_match_all_sync_i32p:
687 case NVPTX::BI__nvvm_match_all_sync_i64p: {
688 Value *Mask = EmitScalarExpr(E->getArg(0));
689 Value *Val = EmitScalarExpr(E->getArg(1));
690 Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2));
691 Value *ResultPair = Builder.CreateCall(
692 CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p
693 ? Intrinsic::nvvm_match_all_sync_i32p
694 : Intrinsic::nvvm_match_all_sync_i64p),
695 {Mask, Val});
696 Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1),
697 PredOutPtr.getElementType());
698 Builder.CreateStore(Pred, PredOutPtr);
699 return Builder.CreateExtractValue(ResultPair, 0);
700 }
701
702 // FP MMA loads
703 case NVPTX::BI__hmma_m16n16k16_ld_a:
704 case NVPTX::BI__hmma_m16n16k16_ld_b:
705 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
706 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
707 case NVPTX::BI__hmma_m32n8k16_ld_a:
708 case NVPTX::BI__hmma_m32n8k16_ld_b:
709 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
710 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
711 case NVPTX::BI__hmma_m8n32k16_ld_a:
712 case NVPTX::BI__hmma_m8n32k16_ld_b:
713 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
714 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
715 // Integer MMA loads.
716 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
717 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
718 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
719 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
720 case NVPTX::BI__imma_m16n16k16_ld_c:
721 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
722 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
723 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
724 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
725 case NVPTX::BI__imma_m32n8k16_ld_c:
726 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
727 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
728 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
729 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
730 case NVPTX::BI__imma_m8n32k16_ld_c:
731 // Sub-integer MMA loads.
732 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
733 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
734 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
735 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
736 case NVPTX::BI__imma_m8n8k32_ld_c:
737 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
738 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
739 case NVPTX::BI__bmma_m8n8k128_ld_c:
740 // Double MMA loads.
741 case NVPTX::BI__dmma_m8n8k4_ld_a:
742 case NVPTX::BI__dmma_m8n8k4_ld_b:
743 case NVPTX::BI__dmma_m8n8k4_ld_c:
744 // Alternate float MMA loads.
745 case NVPTX::BI__mma_bf16_m16n16k16_ld_a:
746 case NVPTX::BI__mma_bf16_m16n16k16_ld_b:
747 case NVPTX::BI__mma_bf16_m8n32k16_ld_a:
748 case NVPTX::BI__mma_bf16_m8n32k16_ld_b:
749 case NVPTX::BI__mma_bf16_m32n8k16_ld_a:
750 case NVPTX::BI__mma_bf16_m32n8k16_ld_b:
751 case NVPTX::BI__mma_tf32_m16n16k8_ld_a:
752 case NVPTX::BI__mma_tf32_m16n16k8_ld_b:
753 case NVPTX::BI__mma_tf32_m16n16k8_ld_c: {
755 Value *Src = EmitScalarExpr(E->getArg(1));
756 Value *Ldm = EmitScalarExpr(E->getArg(2));
757 std::optional<llvm::APSInt> isColMajorArg =
759 if (!isColMajorArg)
760 return nullptr;
761 bool isColMajor = isColMajorArg->getSExtValue();
762 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
763 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
764 if (IID == 0)
765 return nullptr;
766
767 Value *Result =
768 Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm});
769
770 // Save returned values.
771 assert(II.NumResults);
772 if (II.NumResults == 1) {
773 Builder.CreateAlignedStore(Result, Dst.emitRawPointer(*this),
775 } else {
776 for (unsigned i = 0; i < II.NumResults; ++i) {
777 Builder.CreateAlignedStore(
778 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
779 Dst.getElementType()),
780 Builder.CreateGEP(Dst.getElementType(), Dst.emitRawPointer(*this),
781 llvm::ConstantInt::get(IntTy, i)),
783 }
784 }
785 return Result;
786 }
787
788 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
789 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
790 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
791 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
792 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
793 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
794 case NVPTX::BI__imma_m16n16k16_st_c_i32:
795 case NVPTX::BI__imma_m32n8k16_st_c_i32:
796 case NVPTX::BI__imma_m8n32k16_st_c_i32:
797 case NVPTX::BI__imma_m8n8k32_st_c_i32:
798 case NVPTX::BI__bmma_m8n8k128_st_c_i32:
799 case NVPTX::BI__dmma_m8n8k4_st_c_f64:
800 case NVPTX::BI__mma_m16n16k8_st_c_f32: {
801 Value *Dst = EmitScalarExpr(E->getArg(0));
803 Value *Ldm = EmitScalarExpr(E->getArg(2));
804 std::optional<llvm::APSInt> isColMajorArg =
806 if (!isColMajorArg)
807 return nullptr;
808 bool isColMajor = isColMajorArg->getSExtValue();
809 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
810 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
811 if (IID == 0)
812 return nullptr;
813 Function *Intrinsic =
814 CGM.getIntrinsic(IID, Dst->getType());
815 llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1);
816 SmallVector<Value *, 10> Values = {Dst};
817 for (unsigned i = 0; i < II.NumResults; ++i) {
818 Value *V = Builder.CreateAlignedLoad(
819 Src.getElementType(),
820 Builder.CreateGEP(Src.getElementType(), Src.emitRawPointer(*this),
821 llvm::ConstantInt::get(IntTy, i)),
823 Values.push_back(Builder.CreateBitCast(V, ParamType));
824 }
825 Values.push_back(Ldm);
826 Value *Result = Builder.CreateCall(Intrinsic, Values);
827 return Result;
828 }
829
830 // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) -->
831 // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf>
832 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
833 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
834 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
835 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
836 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
837 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
838 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
839 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
840 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
841 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
842 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
843 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
844 case NVPTX::BI__imma_m16n16k16_mma_s8:
845 case NVPTX::BI__imma_m16n16k16_mma_u8:
846 case NVPTX::BI__imma_m32n8k16_mma_s8:
847 case NVPTX::BI__imma_m32n8k16_mma_u8:
848 case NVPTX::BI__imma_m8n32k16_mma_s8:
849 case NVPTX::BI__imma_m8n32k16_mma_u8:
850 case NVPTX::BI__imma_m8n8k32_mma_s4:
851 case NVPTX::BI__imma_m8n8k32_mma_u4:
852 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
853 case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1:
854 case NVPTX::BI__dmma_m8n8k4_mma_f64:
855 case NVPTX::BI__mma_bf16_m16n16k16_mma_f32:
856 case NVPTX::BI__mma_bf16_m8n32k16_mma_f32:
857 case NVPTX::BI__mma_bf16_m32n8k16_mma_f32:
858 case NVPTX::BI__mma_tf32_m16n16k8_mma_f32: {
863 std::optional<llvm::APSInt> LayoutArg =
865 if (!LayoutArg)
866 return nullptr;
867 int Layout = LayoutArg->getSExtValue();
868 if (Layout < 0 || Layout > 3)
869 return nullptr;
870 llvm::APSInt SatfArg;
871 if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1 ||
872 BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1)
873 SatfArg = 0; // .b1 does not have satf argument.
874 else if (std::optional<llvm::APSInt> OptSatfArg =
876 SatfArg = *OptSatfArg;
877 else
878 return nullptr;
879 bool Satf = SatfArg.getSExtValue();
880 NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID);
881 unsigned IID = MI.getMMAIntrinsic(Layout, Satf);
882 if (IID == 0) // Unsupported combination of Layout/Satf.
883 return nullptr;
884
886 Function *Intrinsic = CGM.getIntrinsic(IID);
887 llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0);
888 // Load A
889 for (unsigned i = 0; i < MI.NumEltsA; ++i) {
890 Value *V = Builder.CreateAlignedLoad(
891 SrcA.getElementType(),
892 Builder.CreateGEP(SrcA.getElementType(), SrcA.emitRawPointer(*this),
893 llvm::ConstantInt::get(IntTy, i)),
895 Values.push_back(Builder.CreateBitCast(V, AType));
896 }
897 // Load B
898 llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA);
899 for (unsigned i = 0; i < MI.NumEltsB; ++i) {
900 Value *V = Builder.CreateAlignedLoad(
901 SrcB.getElementType(),
902 Builder.CreateGEP(SrcB.getElementType(), SrcB.emitRawPointer(*this),
903 llvm::ConstantInt::get(IntTy, i)),
905 Values.push_back(Builder.CreateBitCast(V, BType));
906 }
907 // Load C
908 llvm::Type *CType =
909 Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB);
910 for (unsigned i = 0; i < MI.NumEltsC; ++i) {
911 Value *V = Builder.CreateAlignedLoad(
912 SrcC.getElementType(),
913 Builder.CreateGEP(SrcC.getElementType(), SrcC.emitRawPointer(*this),
914 llvm::ConstantInt::get(IntTy, i)),
916 Values.push_back(Builder.CreateBitCast(V, CType));
917 }
918 Value *Result = Builder.CreateCall(Intrinsic, Values);
919 llvm::Type *DType = Dst.getElementType();
920 for (unsigned i = 0; i < MI.NumEltsD; ++i)
921 Builder.CreateAlignedStore(
922 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType),
923 Builder.CreateGEP(Dst.getElementType(), Dst.emitRawPointer(*this),
924 llvm::ConstantInt::get(IntTy, i)),
926 return Result;
927 }
928 // The following builtins require half type support
929 case NVPTX::BI__nvvm_ex2_approx_f16:
930 return MakeHalfType(
931 CGM.getIntrinsic(Intrinsic::nvvm_ex2_approx, Builder.getHalfTy()),
932 BuiltinID, E, *this);
933 case NVPTX::BI__nvvm_ex2_approx_f16x2:
934 return MakeHalfType(
935 CGM.getIntrinsic(Intrinsic::nvvm_ex2_approx,
936 FixedVectorType::get(Builder.getHalfTy(), 2)),
937 BuiltinID, E, *this);
938 case NVPTX::BI__nvvm_ff2f16x2_rn:
939 return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rn, BuiltinID, E, *this);
940 case NVPTX::BI__nvvm_ff2f16x2_rn_relu:
941 return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rn_relu, BuiltinID, E, *this);
942 case NVPTX::BI__nvvm_ff2f16x2_rz:
943 return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rz, BuiltinID, E, *this);
944 case NVPTX::BI__nvvm_ff2f16x2_rz_relu:
945 return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rz_relu, BuiltinID, E, *this);
946 case NVPTX::BI__nvvm_fma_rn_f16:
947 return MakeHalfType(Intrinsic::nvvm_fma_rn_f16, BuiltinID, E, *this);
948 case NVPTX::BI__nvvm_fma_rn_f16x2:
949 return MakeHalfType(Intrinsic::nvvm_fma_rn_f16x2, BuiltinID, E, *this);
950 case NVPTX::BI__nvvm_fma_rn_ftz_f16:
951 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_f16, BuiltinID, E, *this);
952 case NVPTX::BI__nvvm_fma_rn_ftz_f16x2:
953 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_f16x2, BuiltinID, E, *this);
954 case NVPTX::BI__nvvm_fma_rn_ftz_relu_f16:
955 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_relu_f16, BuiltinID, E,
956 *this);
957 case NVPTX::BI__nvvm_fma_rn_ftz_relu_f16x2:
958 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_relu_f16x2, BuiltinID, E,
959 *this);
960 case NVPTX::BI__nvvm_fma_rn_ftz_sat_f16:
961 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_sat_f16, BuiltinID, E,
962 *this);
963 case NVPTX::BI__nvvm_fma_rn_ftz_sat_f16x2:
964 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_sat_f16x2, BuiltinID, E,
965 *this);
966 case NVPTX::BI__nvvm_fma_rn_relu_f16:
967 return MakeHalfType(Intrinsic::nvvm_fma_rn_relu_f16, BuiltinID, E, *this);
968 case NVPTX::BI__nvvm_fma_rn_relu_f16x2:
969 return MakeHalfType(Intrinsic::nvvm_fma_rn_relu_f16x2, BuiltinID, E, *this);
970 case NVPTX::BI__nvvm_fma_rn_sat_f16:
971 return MakeHalfType(Intrinsic::nvvm_fma_rn_sat_f16, BuiltinID, E, *this);
972 case NVPTX::BI__nvvm_fma_rn_sat_f16x2:
973 return MakeHalfType(Intrinsic::nvvm_fma_rn_sat_f16x2, BuiltinID, E, *this);
974 case NVPTX::BI__nvvm_fma_rn_oob_f16:
975 return MakeFMAOOB(Intrinsic::nvvm_fma_rn_oob, Builder.getHalfTy(), E,
976 *this);
977 case NVPTX::BI__nvvm_fma_rn_oob_f16x2:
978 return MakeFMAOOB(Intrinsic::nvvm_fma_rn_oob,
979 llvm::FixedVectorType::get(Builder.getHalfTy(), 2), E,
980 *this);
981 case NVPTX::BI__nvvm_fma_rn_oob_bf16:
982 return MakeFMAOOB(Intrinsic::nvvm_fma_rn_oob, Builder.getBFloatTy(), E,
983 *this);
984 case NVPTX::BI__nvvm_fma_rn_oob_bf16x2:
985 return MakeFMAOOB(Intrinsic::nvvm_fma_rn_oob,
986 llvm::FixedVectorType::get(Builder.getBFloatTy(), 2), E,
987 *this);
988 case NVPTX::BI__nvvm_fma_rn_oob_relu_f16:
989 return MakeFMAOOB(Intrinsic::nvvm_fma_rn_oob_relu, Builder.getHalfTy(), E,
990 *this);
991 case NVPTX::BI__nvvm_fma_rn_oob_relu_f16x2:
992 return MakeFMAOOB(Intrinsic::nvvm_fma_rn_oob_relu,
993 llvm::FixedVectorType::get(Builder.getHalfTy(), 2), E,
994 *this);
995 case NVPTX::BI__nvvm_fma_rn_oob_relu_bf16:
996 return MakeFMAOOB(Intrinsic::nvvm_fma_rn_oob_relu, Builder.getBFloatTy(), E,
997 *this);
998 case NVPTX::BI__nvvm_fma_rn_oob_relu_bf16x2:
999 return MakeFMAOOB(Intrinsic::nvvm_fma_rn_oob_relu,
1000 llvm::FixedVectorType::get(Builder.getBFloatTy(), 2), E,
1001 *this);
1002 case NVPTX::BI__nvvm_fmax_f16:
1003 return MakeHalfType(Intrinsic::nvvm_fmax_f16, BuiltinID, E, *this);
1004 case NVPTX::BI__nvvm_fmax_f16x2:
1005 return MakeHalfType(Intrinsic::nvvm_fmax_f16x2, BuiltinID, E, *this);
1006 case NVPTX::BI__nvvm_fmax_ftz_f16:
1007 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_f16, BuiltinID, E, *this);
1008 case NVPTX::BI__nvvm_fmax_ftz_f16x2:
1009 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_f16x2, BuiltinID, E, *this);
1010 case NVPTX::BI__nvvm_fmax_ftz_nan_f16:
1011 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_f16, BuiltinID, E, *this);
1012 case NVPTX::BI__nvvm_fmax_ftz_nan_f16x2:
1013 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_f16x2, BuiltinID, E,
1014 *this);
1015 case NVPTX::BI__nvvm_fmax_ftz_nan_xorsign_abs_f16:
1016 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f16, BuiltinID,
1017 E, *this);
1018 case NVPTX::BI__nvvm_fmax_ftz_nan_xorsign_abs_f16x2:
1019 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f16x2,
1020 BuiltinID, E, *this);
1021 case NVPTX::BI__nvvm_fmax_ftz_xorsign_abs_f16:
1022 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_xorsign_abs_f16, BuiltinID, E,
1023 *this);
1024 case NVPTX::BI__nvvm_fmax_ftz_xorsign_abs_f16x2:
1025 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_xorsign_abs_f16x2, BuiltinID,
1026 E, *this);
1027 case NVPTX::BI__nvvm_fmax_nan_f16:
1028 return MakeHalfType(Intrinsic::nvvm_fmax_nan_f16, BuiltinID, E, *this);
1029 case NVPTX::BI__nvvm_fmax_nan_f16x2:
1030 return MakeHalfType(Intrinsic::nvvm_fmax_nan_f16x2, BuiltinID, E, *this);
1031 case NVPTX::BI__nvvm_fmax_nan_xorsign_abs_f16:
1032 return MakeHalfType(Intrinsic::nvvm_fmax_nan_xorsign_abs_f16, BuiltinID, E,
1033 *this);
1034 case NVPTX::BI__nvvm_fmax_nan_xorsign_abs_f16x2:
1035 return MakeHalfType(Intrinsic::nvvm_fmax_nan_xorsign_abs_f16x2, BuiltinID,
1036 E, *this);
1037 case NVPTX::BI__nvvm_fmax_xorsign_abs_f16:
1038 return MakeHalfType(Intrinsic::nvvm_fmax_xorsign_abs_f16, BuiltinID, E,
1039 *this);
1040 case NVPTX::BI__nvvm_fmax_xorsign_abs_f16x2:
1041 return MakeHalfType(Intrinsic::nvvm_fmax_xorsign_abs_f16x2, BuiltinID, E,
1042 *this);
1043 case NVPTX::BI__nvvm_fmin_f16:
1044 return MakeHalfType(Intrinsic::nvvm_fmin_f16, BuiltinID, E, *this);
1045 case NVPTX::BI__nvvm_fmin_f16x2:
1046 return MakeHalfType(Intrinsic::nvvm_fmin_f16x2, BuiltinID, E, *this);
1047 case NVPTX::BI__nvvm_fmin_ftz_f16:
1048 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_f16, BuiltinID, E, *this);
1049 case NVPTX::BI__nvvm_fmin_ftz_f16x2:
1050 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_f16x2, BuiltinID, E, *this);
1051 case NVPTX::BI__nvvm_fmin_ftz_nan_f16:
1052 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_f16, BuiltinID, E, *this);
1053 case NVPTX::BI__nvvm_fmin_ftz_nan_f16x2:
1054 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_f16x2, BuiltinID, E,
1055 *this);
1056 case NVPTX::BI__nvvm_fmin_ftz_nan_xorsign_abs_f16:
1057 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f16, BuiltinID,
1058 E, *this);
1059 case NVPTX::BI__nvvm_fmin_ftz_nan_xorsign_abs_f16x2:
1060 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f16x2,
1061 BuiltinID, E, *this);
1062 case NVPTX::BI__nvvm_fmin_ftz_xorsign_abs_f16:
1063 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_xorsign_abs_f16, BuiltinID, E,
1064 *this);
1065 case NVPTX::BI__nvvm_fmin_ftz_xorsign_abs_f16x2:
1066 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_xorsign_abs_f16x2, BuiltinID,
1067 E, *this);
1068 case NVPTX::BI__nvvm_fmin_nan_f16:
1069 return MakeHalfType(Intrinsic::nvvm_fmin_nan_f16, BuiltinID, E, *this);
1070 case NVPTX::BI__nvvm_fmin_nan_f16x2:
1071 return MakeHalfType(Intrinsic::nvvm_fmin_nan_f16x2, BuiltinID, E, *this);
1072 case NVPTX::BI__nvvm_fmin_nan_xorsign_abs_f16:
1073 return MakeHalfType(Intrinsic::nvvm_fmin_nan_xorsign_abs_f16, BuiltinID, E,
1074 *this);
1075 case NVPTX::BI__nvvm_fmin_nan_xorsign_abs_f16x2:
1076 return MakeHalfType(Intrinsic::nvvm_fmin_nan_xorsign_abs_f16x2, BuiltinID,
1077 E, *this);
1078 case NVPTX::BI__nvvm_fmin_xorsign_abs_f16:
1079 return MakeHalfType(Intrinsic::nvvm_fmin_xorsign_abs_f16, BuiltinID, E,
1080 *this);
1081 case NVPTX::BI__nvvm_fmin_xorsign_abs_f16x2:
1082 return MakeHalfType(Intrinsic::nvvm_fmin_xorsign_abs_f16x2, BuiltinID, E,
1083 *this);
1084 case NVPTX::BI__nvvm_fabs_f:
1085 case NVPTX::BI__nvvm_abs_bf16:
1086 case NVPTX::BI__nvvm_abs_bf16x2:
1087 case NVPTX::BI__nvvm_fabs_f16:
1088 case NVPTX::BI__nvvm_fabs_f16x2:
1089 return Builder.CreateUnaryIntrinsic(Intrinsic::nvvm_fabs,
1090 EmitScalarExpr(E->getArg(0)));
1091 case NVPTX::BI__nvvm_fabs_ftz_f:
1092 case NVPTX::BI__nvvm_fabs_ftz_f16:
1093 case NVPTX::BI__nvvm_fabs_ftz_f16x2:
1094 return Builder.CreateUnaryIntrinsic(Intrinsic::nvvm_fabs_ftz,
1095 EmitScalarExpr(E->getArg(0)));
1096 case NVPTX::BI__nvvm_fabs_d:
1097 return Builder.CreateUnaryIntrinsic(Intrinsic::fabs,
1098 EmitScalarExpr(E->getArg(0)));
1099 case NVPTX::BI__nvvm_ex2_approx_d:
1100 case NVPTX::BI__nvvm_ex2_approx_f:
1101 return Builder.CreateUnaryIntrinsic(Intrinsic::nvvm_ex2_approx,
1102 EmitScalarExpr(E->getArg(0)));
1103 case NVPTX::BI__nvvm_ex2_approx_ftz_f:
1104 return Builder.CreateUnaryIntrinsic(Intrinsic::nvvm_ex2_approx_ftz,
1105 EmitScalarExpr(E->getArg(0)));
1106 case NVPTX::BI__nvvm_ldg_h:
1107 case NVPTX::BI__nvvm_ldg_h2:
1108 return EnsureNativeHalfSupport(BuiltinID, E, *this) ? MakeLdg(*this, E)
1109 : nullptr;
1110 case NVPTX::BI__nvvm_ldu_h:
1111 case NVPTX::BI__nvvm_ldu_h2:
1112 return EnsureNativeHalfSupport(BuiltinID, E, *this)
1113 ? MakeLdu(Intrinsic::nvvm_ldu_global_f, *this, E)
1114 : nullptr;
1115 case NVPTX::BI__nvvm_cp_async_ca_shared_global_4:
1116 return MakeCpAsync(Intrinsic::nvvm_cp_async_ca_shared_global_4,
1117 Intrinsic::nvvm_cp_async_ca_shared_global_4_s, *this, E,
1118 4);
1119 case NVPTX::BI__nvvm_cp_async_ca_shared_global_8:
1120 return MakeCpAsync(Intrinsic::nvvm_cp_async_ca_shared_global_8,
1121 Intrinsic::nvvm_cp_async_ca_shared_global_8_s, *this, E,
1122 8);
1123 case NVPTX::BI__nvvm_cp_async_ca_shared_global_16:
1124 return MakeCpAsync(Intrinsic::nvvm_cp_async_ca_shared_global_16,
1125 Intrinsic::nvvm_cp_async_ca_shared_global_16_s, *this, E,
1126 16);
1127 case NVPTX::BI__nvvm_cp_async_cg_shared_global_16:
1128 return MakeCpAsync(Intrinsic::nvvm_cp_async_cg_shared_global_16,
1129 Intrinsic::nvvm_cp_async_cg_shared_global_16_s, *this, E,
1130 16);
1131 case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_x:
1132 return Builder.CreateCall(
1133 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_x));
1134 case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_y:
1135 return Builder.CreateCall(
1136 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_y));
1137 case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_z:
1138 return Builder.CreateCall(
1139 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_z));
1140 case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_w:
1141 return Builder.CreateCall(
1142 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_w));
1143 case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_x:
1144 return Builder.CreateCall(
1145 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_x));
1146 case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_y:
1147 return Builder.CreateCall(
1148 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_y));
1149 case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_z:
1150 return Builder.CreateCall(
1151 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_z));
1152 case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_w:
1153 return Builder.CreateCall(
1154 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_w));
1155 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_x:
1156 return Builder.CreateCall(
1157 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_x));
1158 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_y:
1159 return Builder.CreateCall(
1160 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_y));
1161 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_z:
1162 return Builder.CreateCall(
1163 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_z));
1164 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_w:
1165 return Builder.CreateCall(
1166 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_w));
1167 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_x:
1168 return Builder.CreateCall(
1169 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_x));
1170 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_y:
1171 return Builder.CreateCall(
1172 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_y));
1173 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_z:
1174 return Builder.CreateCall(
1175 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_z));
1176 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_w:
1177 return Builder.CreateCall(
1178 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_w));
1179 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctarank:
1180 return Builder.CreateCall(
1181 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctarank));
1182 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctarank:
1183 return Builder.CreateCall(
1184 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctarank));
1185 case NVPTX::BI__nvvm_is_explicit_cluster:
1186 return Builder.CreateCall(
1187 CGM.getIntrinsic(Intrinsic::nvvm_is_explicit_cluster));
1188 case NVPTX::BI__nvvm_isspacep_shared_cluster:
1189 return Builder.CreateCall(
1190 CGM.getIntrinsic(Intrinsic::nvvm_isspacep_shared_cluster),
1191 EmitScalarExpr(E->getArg(0)));
1192 case NVPTX::BI__nvvm_mapa:
1193 return Builder.CreateCall(
1194 CGM.getIntrinsic(Intrinsic::nvvm_mapa),
1195 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
1196 case NVPTX::BI__nvvm_mapa_shared_cluster:
1197 return Builder.CreateCall(
1198 CGM.getIntrinsic(Intrinsic::nvvm_mapa_shared_cluster),
1199 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
1200 case NVPTX::BI__nvvm_getctarank:
1201 return Builder.CreateCall(
1202 CGM.getIntrinsic(Intrinsic::nvvm_getctarank),
1203 EmitScalarExpr(E->getArg(0)));
1204 case NVPTX::BI__nvvm_getctarank_shared_cluster:
1205 return Builder.CreateCall(
1206 CGM.getIntrinsic(Intrinsic::nvvm_getctarank_shared_cluster),
1207 EmitScalarExpr(E->getArg(0)));
1208 case NVPTX::BI__nvvm_barrier_cluster_arrive:
1209 return Builder.CreateCall(
1210 CGM.getIntrinsic(Intrinsic::nvvm_barrier_cluster_arrive));
1211 case NVPTX::BI__nvvm_barrier_cluster_arrive_relaxed:
1212 return Builder.CreateCall(
1213 CGM.getIntrinsic(Intrinsic::nvvm_barrier_cluster_arrive_relaxed));
1214 case NVPTX::BI__nvvm_barrier_cluster_wait:
1215 return Builder.CreateCall(
1216 CGM.getIntrinsic(Intrinsic::nvvm_barrier_cluster_wait));
1217 case NVPTX::BI__nvvm_fence_sc_cluster:
1218 return Builder.CreateCall(
1219 CGM.getIntrinsic(Intrinsic::nvvm_fence_sc_cluster));
1220 case NVPTX::BI__nvvm_bar_sync:
1221 return Builder.CreateCall(
1222 CGM.getIntrinsic(Intrinsic::nvvm_barrier_cta_sync_aligned_all),
1223 EmitScalarExpr(E->getArg(0)));
1224 case NVPTX::BI__syncthreads:
1225 return Builder.CreateCall(
1226 CGM.getIntrinsic(Intrinsic::nvvm_barrier_cta_sync_aligned_all),
1227 Builder.getInt32(0));
1228 case NVPTX::BI__nvvm_barrier_sync:
1229 return Builder.CreateCall(
1230 CGM.getIntrinsic(Intrinsic::nvvm_barrier_cta_sync_all),
1231 EmitScalarExpr(E->getArg(0)));
1232 case NVPTX::BI__nvvm_barrier_sync_cnt:
1233 return Builder.CreateCall(
1234 CGM.getIntrinsic(Intrinsic::nvvm_barrier_cta_sync_count),
1235 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
1236 case NVPTX::BI__nvvm_bar0_and:
1237 return Builder.CreateZExt(
1238 Builder.CreateIntrinsic(
1239 Intrinsic::nvvm_barrier_cta_red_and_aligned_all, {},
1240 {Builder.getInt32(0),
1241 Builder.CreateICmpNE(EmitScalarExpr(E->getArg(0)),
1242 Builder.getInt32(0))}),
1243 Builder.getInt32Ty());
1244 case NVPTX::BI__nvvm_bar0_or:
1245 return Builder.CreateZExt(
1246 Builder.CreateIntrinsic(
1247 Intrinsic::nvvm_barrier_cta_red_or_aligned_all, {},
1248 {Builder.getInt32(0),
1249 Builder.CreateICmpNE(EmitScalarExpr(E->getArg(0)),
1250 Builder.getInt32(0))}),
1251 Builder.getInt32Ty());
1252 case NVPTX::BI__nvvm_bar0_popc:
1253 return Builder.CreateIntrinsic(
1254 Intrinsic::nvvm_barrier_cta_red_popc_aligned_all, {},
1255 {Builder.getInt32(0), Builder.CreateICmpNE(EmitScalarExpr(E->getArg(0)),
1256 Builder.getInt32(0))});
1257 default:
1258 return nullptr;
1259 }
1260}
#define V(N, I)
Value * MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E, bool ReturnBool)
Utility to insert an atomic cmpxchg instruction.
Value * MakeBinaryAtomicValue(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, AtomicOrdering Ordering)
Utility to insert an atomic instruction based on Intrinsic::ID and the expression node.
#define MMA_VARIANTS_B1_AND(geom, type)
#define MMA_INTR(geom_op_type, layout)
Definition NVPTX.cpp:30
#define MMA_VARIANTS(geom, type)
#define MMA_SATF_VARIANTS(geom, type)
#define MMA_LDST(n, geom_op_type)
Definition NVPTX.cpp:32
#define MMA_VARIANTS_B1_XOR(geom, type)
#define MMA_VARIANTS_I4(geom, type)
Enumerates target-specific builtins in their own namespaces within namespace clang.
@ GE_None
No error.
QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error, unsigned *IntegerConstantArgs=nullptr) const
Return the type for the specified builtin.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2943
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3147
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3134
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition CGBuilder.h:132
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition CGBuilder.h:193
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertTypeForMem(QualType T)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1576
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::Value * EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition NVPTX.cpp:428
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
ASTContext & getContext() const
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
std::optional< llvm::APSInt > getIntegerConstantExpr(const ASTContext &Ctx) const
isIntegerConstantExpr - Return the value if this expression is a valid integer constant expression.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:276
QualType getType() const
Definition Expr.h:144
A (possibly-)qualified type.
Definition TypeBase.h:937
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:753
QualType getType() const
Definition Value.cpp:237
The JSON file list parser is used to communicate input to InstallAPI.
@ DType
'dtype' clause, an alias for 'device_type', stored separately for diagnostic purposes.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30