clang 22.0.0git
NVPTX.cpp
Go to the documentation of this file.
1//===-------- NVPTX.cpp - Emit LLVM Code for builtins ---------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Builtin calls as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGBuiltin.h"
15#include "llvm/IR/IntrinsicsNVPTX.h"
16
17using namespace clang;
18using namespace CodeGen;
19using namespace llvm;
20
21namespace {
22// Helper classes for mapping MMA builtins to particular LLVM intrinsic variant.
23struct NVPTXMmaLdstInfo {
24 unsigned NumResults; // Number of elements to load/store
25 // Intrinsic IDs for row/col variants. 0 if particular layout is unsupported.
26 unsigned IID_col;
27 unsigned IID_row;
28};
29
30#define MMA_INTR(geom_op_type, layout) \
31 Intrinsic::nvvm_wmma_##geom_op_type##_##layout##_stride
32#define MMA_LDST(n, geom_op_type) \
33 { n, MMA_INTR(geom_op_type, col), MMA_INTR(geom_op_type, row) }
34
35static NVPTXMmaLdstInfo getNVPTXMmaLdstInfo(unsigned BuiltinID) {
36 switch (BuiltinID) {
37 // FP MMA loads
38 case NVPTX::BI__hmma_m16n16k16_ld_a:
39 return MMA_LDST(8, m16n16k16_load_a_f16);
40 case NVPTX::BI__hmma_m16n16k16_ld_b:
41 return MMA_LDST(8, m16n16k16_load_b_f16);
42 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
43 return MMA_LDST(4, m16n16k16_load_c_f16);
44 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
45 return MMA_LDST(8, m16n16k16_load_c_f32);
46 case NVPTX::BI__hmma_m32n8k16_ld_a:
47 return MMA_LDST(8, m32n8k16_load_a_f16);
48 case NVPTX::BI__hmma_m32n8k16_ld_b:
49 return MMA_LDST(8, m32n8k16_load_b_f16);
50 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
51 return MMA_LDST(4, m32n8k16_load_c_f16);
52 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
53 return MMA_LDST(8, m32n8k16_load_c_f32);
54 case NVPTX::BI__hmma_m8n32k16_ld_a:
55 return MMA_LDST(8, m8n32k16_load_a_f16);
56 case NVPTX::BI__hmma_m8n32k16_ld_b:
57 return MMA_LDST(8, m8n32k16_load_b_f16);
58 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
59 return MMA_LDST(4, m8n32k16_load_c_f16);
60 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
61 return MMA_LDST(8, m8n32k16_load_c_f32);
62
63 // Integer MMA loads
64 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
65 return MMA_LDST(2, m16n16k16_load_a_s8);
66 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
67 return MMA_LDST(2, m16n16k16_load_a_u8);
68 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
69 return MMA_LDST(2, m16n16k16_load_b_s8);
70 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
71 return MMA_LDST(2, m16n16k16_load_b_u8);
72 case NVPTX::BI__imma_m16n16k16_ld_c:
73 return MMA_LDST(8, m16n16k16_load_c_s32);
74 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
75 return MMA_LDST(4, m32n8k16_load_a_s8);
76 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
77 return MMA_LDST(4, m32n8k16_load_a_u8);
78 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
79 return MMA_LDST(1, m32n8k16_load_b_s8);
80 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
81 return MMA_LDST(1, m32n8k16_load_b_u8);
82 case NVPTX::BI__imma_m32n8k16_ld_c:
83 return MMA_LDST(8, m32n8k16_load_c_s32);
84 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
85 return MMA_LDST(1, m8n32k16_load_a_s8);
86 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
87 return MMA_LDST(1, m8n32k16_load_a_u8);
88 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
89 return MMA_LDST(4, m8n32k16_load_b_s8);
90 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
91 return MMA_LDST(4, m8n32k16_load_b_u8);
92 case NVPTX::BI__imma_m8n32k16_ld_c:
93 return MMA_LDST(8, m8n32k16_load_c_s32);
94
95 // Sub-integer MMA loads.
96 // Only row/col layout is supported by A/B fragments.
97 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
98 return {1, 0, MMA_INTR(m8n8k32_load_a_s4, row)};
99 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
100 return {1, 0, MMA_INTR(m8n8k32_load_a_u4, row)};
101 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
102 return {1, MMA_INTR(m8n8k32_load_b_s4, col), 0};
103 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
104 return {1, MMA_INTR(m8n8k32_load_b_u4, col), 0};
105 case NVPTX::BI__imma_m8n8k32_ld_c:
106 return MMA_LDST(2, m8n8k32_load_c_s32);
107 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
108 return {1, 0, MMA_INTR(m8n8k128_load_a_b1, row)};
109 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
110 return {1, MMA_INTR(m8n8k128_load_b_b1, col), 0};
111 case NVPTX::BI__bmma_m8n8k128_ld_c:
112 return MMA_LDST(2, m8n8k128_load_c_s32);
113
114 // Double MMA loads
115 case NVPTX::BI__dmma_m8n8k4_ld_a:
116 return MMA_LDST(1, m8n8k4_load_a_f64);
117 case NVPTX::BI__dmma_m8n8k4_ld_b:
118 return MMA_LDST(1, m8n8k4_load_b_f64);
119 case NVPTX::BI__dmma_m8n8k4_ld_c:
120 return MMA_LDST(2, m8n8k4_load_c_f64);
121
122 // Alternate float MMA loads
123 case NVPTX::BI__mma_bf16_m16n16k16_ld_a:
124 return MMA_LDST(4, m16n16k16_load_a_bf16);
125 case NVPTX::BI__mma_bf16_m16n16k16_ld_b:
126 return MMA_LDST(4, m16n16k16_load_b_bf16);
127 case NVPTX::BI__mma_bf16_m8n32k16_ld_a:
128 return MMA_LDST(2, m8n32k16_load_a_bf16);
129 case NVPTX::BI__mma_bf16_m8n32k16_ld_b:
130 return MMA_LDST(8, m8n32k16_load_b_bf16);
131 case NVPTX::BI__mma_bf16_m32n8k16_ld_a:
132 return MMA_LDST(8, m32n8k16_load_a_bf16);
133 case NVPTX::BI__mma_bf16_m32n8k16_ld_b:
134 return MMA_LDST(2, m32n8k16_load_b_bf16);
135 case NVPTX::BI__mma_tf32_m16n16k8_ld_a:
136 return MMA_LDST(4, m16n16k8_load_a_tf32);
137 case NVPTX::BI__mma_tf32_m16n16k8_ld_b:
138 return MMA_LDST(4, m16n16k8_load_b_tf32);
139 case NVPTX::BI__mma_tf32_m16n16k8_ld_c:
140 return MMA_LDST(8, m16n16k8_load_c_f32);
141
142 // NOTE: We need to follow inconsitent naming scheme used by NVCC. Unlike
143 // PTX and LLVM IR where stores always use fragment D, NVCC builtins always
144 // use fragment C for both loads and stores.
145 // FP MMA stores.
146 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
147 return MMA_LDST(4, m16n16k16_store_d_f16);
148 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
149 return MMA_LDST(8, m16n16k16_store_d_f32);
150 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
151 return MMA_LDST(4, m32n8k16_store_d_f16);
152 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
153 return MMA_LDST(8, m32n8k16_store_d_f32);
154 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
155 return MMA_LDST(4, m8n32k16_store_d_f16);
156 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
157 return MMA_LDST(8, m8n32k16_store_d_f32);
158
159 // Integer and sub-integer MMA stores.
160 // Another naming quirk. Unlike other MMA builtins that use PTX types in the
161 // name, integer loads/stores use LLVM's i32.
162 case NVPTX::BI__imma_m16n16k16_st_c_i32:
163 return MMA_LDST(8, m16n16k16_store_d_s32);
164 case NVPTX::BI__imma_m32n8k16_st_c_i32:
165 return MMA_LDST(8, m32n8k16_store_d_s32);
166 case NVPTX::BI__imma_m8n32k16_st_c_i32:
167 return MMA_LDST(8, m8n32k16_store_d_s32);
168 case NVPTX::BI__imma_m8n8k32_st_c_i32:
169 return MMA_LDST(2, m8n8k32_store_d_s32);
170 case NVPTX::BI__bmma_m8n8k128_st_c_i32:
171 return MMA_LDST(2, m8n8k128_store_d_s32);
172
173 // Double MMA store
174 case NVPTX::BI__dmma_m8n8k4_st_c_f64:
175 return MMA_LDST(2, m8n8k4_store_d_f64);
176
177 // Alternate float MMA store
178 case NVPTX::BI__mma_m16n16k8_st_c_f32:
179 return MMA_LDST(8, m16n16k8_store_d_f32);
180
181 default:
182 llvm_unreachable("Unknown MMA builtin");
183 }
184}
185#undef MMA_LDST
186#undef MMA_INTR
187
188
189struct NVPTXMmaInfo {
190 unsigned NumEltsA;
191 unsigned NumEltsB;
192 unsigned NumEltsC;
193 unsigned NumEltsD;
194
195 // Variants are ordered by layout-A/layout-B/satf, where 'row' has priority
196 // over 'col' for layout. The index of non-satf variants is expected to match
197 // the undocumented layout constants used by CUDA's mma.hpp.
198 std::array<unsigned, 8> Variants;
199
200 unsigned getMMAIntrinsic(int Layout, bool Satf) {
201 unsigned Index = Layout + 4 * Satf;
202 if (Index >= Variants.size())
203 return 0;
204 return Variants[Index];
205 }
206};
207
208 // Returns an intrinsic that matches Layout and Satf for valid combinations of
209 // Layout and Satf, 0 otherwise.
210static NVPTXMmaInfo getNVPTXMmaInfo(unsigned BuiltinID) {
211 // clang-format off
212#define MMA_VARIANTS(geom, type) \
213 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type, \
214 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
215 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type, \
216 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type
217#define MMA_SATF_VARIANTS(geom, type) \
218 MMA_VARIANTS(geom, type), \
219 Intrinsic::nvvm_wmma_##geom##_mma_row_row_##type##_satfinite, \
220 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
221 Intrinsic::nvvm_wmma_##geom##_mma_col_row_##type##_satfinite, \
222 Intrinsic::nvvm_wmma_##geom##_mma_col_col_##type##_satfinite
223// Sub-integer MMA only supports row.col layout.
224#define MMA_VARIANTS_I4(geom, type) \
225 0, \
226 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type, \
227 0, \
228 0, \
229 0, \
230 Intrinsic::nvvm_wmma_##geom##_mma_row_col_##type##_satfinite, \
231 0, \
232 0
233// b1 MMA does not support .satfinite.
234#define MMA_VARIANTS_B1_XOR(geom, type) \
235 0, \
236 Intrinsic::nvvm_wmma_##geom##_mma_xor_popc_row_col_##type, \
237 0, \
238 0, \
239 0, \
240 0, \
241 0, \
242 0
243#define MMA_VARIANTS_B1_AND(geom, type) \
244 0, \
245 Intrinsic::nvvm_wmma_##geom##_mma_and_popc_row_col_##type, \
246 0, \
247 0, \
248 0, \
249 0, \
250 0, \
251 0
252 // clang-format on
253 switch (BuiltinID) {
254 // FP MMA
255 // Note that 'type' argument of MMA_SATF_VARIANTS uses D_C notation, while
256 // NumEltsN of return value are ordered as A,B,C,D.
257 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
258 return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m16n16k16, f16_f16)}}};
259 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
260 return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m16n16k16, f32_f16)}}};
261 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
262 return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m16n16k16, f16_f32)}}};
263 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
264 return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, f32_f32)}}};
265 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
266 return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m32n8k16, f16_f16)}}};
267 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
268 return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m32n8k16, f32_f16)}}};
269 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
270 return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m32n8k16, f16_f32)}}};
271 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
272 return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, f32_f32)}}};
273 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
274 return {8, 8, 4, 4, {{MMA_SATF_VARIANTS(m8n32k16, f16_f16)}}};
275 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
276 return {8, 8, 4, 8, {{MMA_SATF_VARIANTS(m8n32k16, f32_f16)}}};
277 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
278 return {8, 8, 8, 4, {{MMA_SATF_VARIANTS(m8n32k16, f16_f32)}}};
279 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
280 return {8, 8, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, f32_f32)}}};
281
282 // Integer MMA
283 case NVPTX::BI__imma_m16n16k16_mma_s8:
284 return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, s8)}}};
285 case NVPTX::BI__imma_m16n16k16_mma_u8:
286 return {2, 2, 8, 8, {{MMA_SATF_VARIANTS(m16n16k16, u8)}}};
287 case NVPTX::BI__imma_m32n8k16_mma_s8:
288 return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, s8)}}};
289 case NVPTX::BI__imma_m32n8k16_mma_u8:
290 return {4, 1, 8, 8, {{MMA_SATF_VARIANTS(m32n8k16, u8)}}};
291 case NVPTX::BI__imma_m8n32k16_mma_s8:
292 return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, s8)}}};
293 case NVPTX::BI__imma_m8n32k16_mma_u8:
294 return {1, 4, 8, 8, {{MMA_SATF_VARIANTS(m8n32k16, u8)}}};
295
296 // Sub-integer MMA
297 case NVPTX::BI__imma_m8n8k32_mma_s4:
298 return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32, s4)}}};
299 case NVPTX::BI__imma_m8n8k32_mma_u4:
300 return {1, 1, 2, 2, {{MMA_VARIANTS_I4(m8n8k32, u4)}}};
301 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
302 return {1, 1, 2, 2, {{MMA_VARIANTS_B1_XOR(m8n8k128, b1)}}};
303 case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1:
304 return {1, 1, 2, 2, {{MMA_VARIANTS_B1_AND(m8n8k128, b1)}}};
305
306 // Double MMA
307 case NVPTX::BI__dmma_m8n8k4_mma_f64:
308 return {1, 1, 2, 2, {{MMA_VARIANTS(m8n8k4, f64)}}};
309
310 // Alternate FP MMA
311 case NVPTX::BI__mma_bf16_m16n16k16_mma_f32:
312 return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k16, bf16)}}};
313 case NVPTX::BI__mma_bf16_m8n32k16_mma_f32:
314 return {2, 8, 8, 8, {{MMA_VARIANTS(m8n32k16, bf16)}}};
315 case NVPTX::BI__mma_bf16_m32n8k16_mma_f32:
316 return {8, 2, 8, 8, {{MMA_VARIANTS(m32n8k16, bf16)}}};
317 case NVPTX::BI__mma_tf32_m16n16k8_mma_f32:
318 return {4, 4, 8, 8, {{MMA_VARIANTS(m16n16k8, tf32)}}};
319 default:
320 llvm_unreachable("Unexpected builtin ID.");
321 }
322#undef MMA_VARIANTS
323#undef MMA_SATF_VARIANTS
324#undef MMA_VARIANTS_I4
325#undef MMA_VARIANTS_B1_AND
326#undef MMA_VARIANTS_B1_XOR
327}
328
329static Value *MakeLdu(unsigned IntrinsicID, CodeGenFunction &CGF,
330 const CallExpr *E) {
331 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
332 QualType ArgType = E->getArg(0)->getType();
334 llvm::Type *ElemTy = CGF.ConvertTypeForMem(ArgType->getPointeeType());
335 return CGF.Builder.CreateCall(
336 CGF.CGM.getIntrinsic(IntrinsicID, {ElemTy, Ptr->getType()}),
337 {Ptr, ConstantInt::get(CGF.Builder.getInt32Ty(), Align.getQuantity())});
338}
339
340static Value *MakeLdg(CodeGenFunction &CGF, const CallExpr *E) {
341 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
342 QualType ArgType = E->getArg(0)->getType();
344 llvm::Type *ElemTy = CGF.ConvertTypeForMem(ArgType->getPointeeType());
345
346 // Use addrspace(1) for NVPTX ADDRESS_SPACE_GLOBAL
347 auto *ASC = CGF.Builder.CreateAddrSpaceCast(Ptr, CGF.Builder.getPtrTy(1));
348 auto *LD = CGF.Builder.CreateAlignedLoad(ElemTy, ASC, AlignV.getAsAlign());
349 MDNode *MD = MDNode::get(CGF.Builder.getContext(), {});
350 LD->setMetadata(LLVMContext::MD_invariant_load, MD);
351
352 return LD;
353}
354
355static Value *MakeScopedAtomic(unsigned IntrinsicID, CodeGenFunction &CGF,
356 const CallExpr *E) {
357 Value *Ptr = CGF.EmitScalarExpr(E->getArg(0));
358 llvm::Type *ElemTy =
360 return CGF.Builder.CreateCall(
361 CGF.CGM.getIntrinsic(IntrinsicID, {ElemTy, Ptr->getType()}),
362 {Ptr, CGF.EmitScalarExpr(E->getArg(1))});
363}
364
365static Value *MakeCpAsync(unsigned IntrinsicID, unsigned IntrinsicIDS,
366 CodeGenFunction &CGF, const CallExpr *E,
367 int SrcSize) {
368 return E->getNumArgs() == 3
369 ? CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IntrinsicIDS),
370 {CGF.EmitScalarExpr(E->getArg(0)),
371 CGF.EmitScalarExpr(E->getArg(1)),
372 CGF.EmitScalarExpr(E->getArg(2))})
373 : CGF.Builder.CreateCall(CGF.CGM.getIntrinsic(IntrinsicID),
374 {CGF.EmitScalarExpr(E->getArg(0)),
375 CGF.EmitScalarExpr(E->getArg(1))});
376}
377
378static bool EnsureNativeHalfSupport(unsigned BuiltinID, const CallExpr *E,
379 CodeGenFunction &CGF) {
380 auto &C = CGF.CGM.getContext();
381 if (!C.getLangOpts().NativeHalfType &&
382 C.getTargetInfo().useFP16ConversionIntrinsics()) {
383 CGF.CGM.Error(E->getExprLoc(), C.BuiltinInfo.getQuotedName(BuiltinID) +
384 " requires native half type support.");
385 return false;
386 }
387 return true;
388}
389
390static Value *MakeHalfType(Function *Intrinsic, unsigned BuiltinID,
391 const CallExpr *E, CodeGenFunction &CGF) {
392 if (!EnsureNativeHalfSupport(BuiltinID, E, CGF))
393 return nullptr;
394
396 auto *FTy = Intrinsic->getFunctionType();
397 unsigned ICEArguments = 0;
399 CGF.CGM.getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
400 assert(Error == ASTContext::GE_None && "Should not codegen an error");
401 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
402 assert((ICEArguments & (1 << i)) == 0);
403 auto *ArgValue = CGF.EmitScalarExpr(E->getArg(i));
404 auto *PTy = FTy->getParamType(i);
405 if (PTy != ArgValue->getType())
406 ArgValue = CGF.Builder.CreateBitCast(ArgValue, PTy);
407 Args.push_back(ArgValue);
408 }
409
410 return CGF.Builder.CreateCall(Intrinsic, Args);
411}
412
413static Value *MakeHalfType(unsigned IntrinsicID, unsigned BuiltinID,
414 const CallExpr *E, CodeGenFunction &CGF) {
415 return MakeHalfType(CGF.CGM.getIntrinsic(IntrinsicID), BuiltinID, E, CGF);
416}
417
418} // namespace
419
421 const CallExpr *E) {
422 switch (BuiltinID) {
423 case NVPTX::BI__nvvm_atom_add_gen_i:
424 case NVPTX::BI__nvvm_atom_add_gen_l:
425 case NVPTX::BI__nvvm_atom_add_gen_ll:
426 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Add, E);
427
428 case NVPTX::BI__nvvm_atom_sub_gen_i:
429 case NVPTX::BI__nvvm_atom_sub_gen_l:
430 case NVPTX::BI__nvvm_atom_sub_gen_ll:
431 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Sub, E);
432
433 case NVPTX::BI__nvvm_atom_and_gen_i:
434 case NVPTX::BI__nvvm_atom_and_gen_l:
435 case NVPTX::BI__nvvm_atom_and_gen_ll:
436 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::And, E);
437
438 case NVPTX::BI__nvvm_atom_or_gen_i:
439 case NVPTX::BI__nvvm_atom_or_gen_l:
440 case NVPTX::BI__nvvm_atom_or_gen_ll:
441 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Or, E);
442
443 case NVPTX::BI__nvvm_atom_xor_gen_i:
444 case NVPTX::BI__nvvm_atom_xor_gen_l:
445 case NVPTX::BI__nvvm_atom_xor_gen_ll:
446 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xor, E);
447
448 case NVPTX::BI__nvvm_atom_xchg_gen_i:
449 case NVPTX::BI__nvvm_atom_xchg_gen_l:
450 case NVPTX::BI__nvvm_atom_xchg_gen_ll:
451 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Xchg, E);
452
453 case NVPTX::BI__nvvm_atom_max_gen_i:
454 case NVPTX::BI__nvvm_atom_max_gen_l:
455 case NVPTX::BI__nvvm_atom_max_gen_ll:
456 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Max, E);
457
458 case NVPTX::BI__nvvm_atom_max_gen_ui:
459 case NVPTX::BI__nvvm_atom_max_gen_ul:
460 case NVPTX::BI__nvvm_atom_max_gen_ull:
461 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMax, E);
462
463 case NVPTX::BI__nvvm_atom_min_gen_i:
464 case NVPTX::BI__nvvm_atom_min_gen_l:
465 case NVPTX::BI__nvvm_atom_min_gen_ll:
466 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::Min, E);
467
468 case NVPTX::BI__nvvm_atom_min_gen_ui:
469 case NVPTX::BI__nvvm_atom_min_gen_ul:
470 case NVPTX::BI__nvvm_atom_min_gen_ull:
471 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UMin, E);
472
473 case NVPTX::BI__nvvm_atom_cas_gen_us:
474 case NVPTX::BI__nvvm_atom_cas_gen_i:
475 case NVPTX::BI__nvvm_atom_cas_gen_l:
476 case NVPTX::BI__nvvm_atom_cas_gen_ll:
477 // __nvvm_atom_cas_gen_* should return the old value rather than the
478 // success flag.
479 return MakeAtomicCmpXchgValue(*this, E, /*ReturnBool=*/false);
480
481 case NVPTX::BI__nvvm_atom_add_gen_f:
482 case NVPTX::BI__nvvm_atom_add_gen_d: {
483 Address DestAddr = EmitPointerWithAlignment(E->getArg(0));
484 Value *Val = EmitScalarExpr(E->getArg(1));
485
486 return Builder.CreateAtomicRMW(llvm::AtomicRMWInst::FAdd, DestAddr, Val,
487 AtomicOrdering::SequentiallyConsistent);
488 }
489
490 case NVPTX::BI__nvvm_atom_inc_gen_ui:
491 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UIncWrap, E);
492
493 case NVPTX::BI__nvvm_atom_dec_gen_ui:
494 return MakeBinaryAtomicValue(*this, llvm::AtomicRMWInst::UDecWrap, E);
495
496 case NVPTX::BI__nvvm_ldg_c:
497 case NVPTX::BI__nvvm_ldg_sc:
498 case NVPTX::BI__nvvm_ldg_c2:
499 case NVPTX::BI__nvvm_ldg_sc2:
500 case NVPTX::BI__nvvm_ldg_c4:
501 case NVPTX::BI__nvvm_ldg_sc4:
502 case NVPTX::BI__nvvm_ldg_s:
503 case NVPTX::BI__nvvm_ldg_s2:
504 case NVPTX::BI__nvvm_ldg_s4:
505 case NVPTX::BI__nvvm_ldg_i:
506 case NVPTX::BI__nvvm_ldg_i2:
507 case NVPTX::BI__nvvm_ldg_i4:
508 case NVPTX::BI__nvvm_ldg_l:
509 case NVPTX::BI__nvvm_ldg_l2:
510 case NVPTX::BI__nvvm_ldg_ll:
511 case NVPTX::BI__nvvm_ldg_ll2:
512 case NVPTX::BI__nvvm_ldg_uc:
513 case NVPTX::BI__nvvm_ldg_uc2:
514 case NVPTX::BI__nvvm_ldg_uc4:
515 case NVPTX::BI__nvvm_ldg_us:
516 case NVPTX::BI__nvvm_ldg_us2:
517 case NVPTX::BI__nvvm_ldg_us4:
518 case NVPTX::BI__nvvm_ldg_ui:
519 case NVPTX::BI__nvvm_ldg_ui2:
520 case NVPTX::BI__nvvm_ldg_ui4:
521 case NVPTX::BI__nvvm_ldg_ul:
522 case NVPTX::BI__nvvm_ldg_ul2:
523 case NVPTX::BI__nvvm_ldg_ull:
524 case NVPTX::BI__nvvm_ldg_ull2:
525 case NVPTX::BI__nvvm_ldg_f:
526 case NVPTX::BI__nvvm_ldg_f2:
527 case NVPTX::BI__nvvm_ldg_f4:
528 case NVPTX::BI__nvvm_ldg_d:
529 case NVPTX::BI__nvvm_ldg_d2:
530 // PTX Interoperability section 2.2: "For a vector with an even number of
531 // elements, its alignment is set to number of elements times the alignment
532 // of its member: n*alignof(t)."
533 return MakeLdg(*this, E);
534
535 case NVPTX::BI__nvvm_ldu_c:
536 case NVPTX::BI__nvvm_ldu_sc:
537 case NVPTX::BI__nvvm_ldu_c2:
538 case NVPTX::BI__nvvm_ldu_sc2:
539 case NVPTX::BI__nvvm_ldu_c4:
540 case NVPTX::BI__nvvm_ldu_sc4:
541 case NVPTX::BI__nvvm_ldu_s:
542 case NVPTX::BI__nvvm_ldu_s2:
543 case NVPTX::BI__nvvm_ldu_s4:
544 case NVPTX::BI__nvvm_ldu_i:
545 case NVPTX::BI__nvvm_ldu_i2:
546 case NVPTX::BI__nvvm_ldu_i4:
547 case NVPTX::BI__nvvm_ldu_l:
548 case NVPTX::BI__nvvm_ldu_l2:
549 case NVPTX::BI__nvvm_ldu_ll:
550 case NVPTX::BI__nvvm_ldu_ll2:
551 case NVPTX::BI__nvvm_ldu_uc:
552 case NVPTX::BI__nvvm_ldu_uc2:
553 case NVPTX::BI__nvvm_ldu_uc4:
554 case NVPTX::BI__nvvm_ldu_us:
555 case NVPTX::BI__nvvm_ldu_us2:
556 case NVPTX::BI__nvvm_ldu_us4:
557 case NVPTX::BI__nvvm_ldu_ui:
558 case NVPTX::BI__nvvm_ldu_ui2:
559 case NVPTX::BI__nvvm_ldu_ui4:
560 case NVPTX::BI__nvvm_ldu_ul:
561 case NVPTX::BI__nvvm_ldu_ul2:
562 case NVPTX::BI__nvvm_ldu_ull:
563 case NVPTX::BI__nvvm_ldu_ull2:
564 return MakeLdu(Intrinsic::nvvm_ldu_global_i, *this, E);
565 case NVPTX::BI__nvvm_ldu_f:
566 case NVPTX::BI__nvvm_ldu_f2:
567 case NVPTX::BI__nvvm_ldu_f4:
568 case NVPTX::BI__nvvm_ldu_d:
569 case NVPTX::BI__nvvm_ldu_d2:
570 return MakeLdu(Intrinsic::nvvm_ldu_global_f, *this, E);
571
572 case NVPTX::BI__nvvm_atom_cta_add_gen_i:
573 case NVPTX::BI__nvvm_atom_cta_add_gen_l:
574 case NVPTX::BI__nvvm_atom_cta_add_gen_ll:
575 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta, *this, E);
576 case NVPTX::BI__nvvm_atom_sys_add_gen_i:
577 case NVPTX::BI__nvvm_atom_sys_add_gen_l:
578 case NVPTX::BI__nvvm_atom_sys_add_gen_ll:
579 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys, *this, E);
580 case NVPTX::BI__nvvm_atom_cta_add_gen_f:
581 case NVPTX::BI__nvvm_atom_cta_add_gen_d:
582 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta, *this, E);
583 case NVPTX::BI__nvvm_atom_sys_add_gen_f:
584 case NVPTX::BI__nvvm_atom_sys_add_gen_d:
585 return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys, *this, E);
586 case NVPTX::BI__nvvm_atom_cta_xchg_gen_i:
587 case NVPTX::BI__nvvm_atom_cta_xchg_gen_l:
588 case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll:
589 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta, *this, E);
590 case NVPTX::BI__nvvm_atom_sys_xchg_gen_i:
591 case NVPTX::BI__nvvm_atom_sys_xchg_gen_l:
592 case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll:
593 return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys, *this, E);
594 case NVPTX::BI__nvvm_atom_cta_max_gen_i:
595 case NVPTX::BI__nvvm_atom_cta_max_gen_ui:
596 case NVPTX::BI__nvvm_atom_cta_max_gen_l:
597 case NVPTX::BI__nvvm_atom_cta_max_gen_ul:
598 case NVPTX::BI__nvvm_atom_cta_max_gen_ll:
599 case NVPTX::BI__nvvm_atom_cta_max_gen_ull:
600 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta, *this, E);
601 case NVPTX::BI__nvvm_atom_sys_max_gen_i:
602 case NVPTX::BI__nvvm_atom_sys_max_gen_ui:
603 case NVPTX::BI__nvvm_atom_sys_max_gen_l:
604 case NVPTX::BI__nvvm_atom_sys_max_gen_ul:
605 case NVPTX::BI__nvvm_atom_sys_max_gen_ll:
606 case NVPTX::BI__nvvm_atom_sys_max_gen_ull:
607 return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys, *this, E);
608 case NVPTX::BI__nvvm_atom_cta_min_gen_i:
609 case NVPTX::BI__nvvm_atom_cta_min_gen_ui:
610 case NVPTX::BI__nvvm_atom_cta_min_gen_l:
611 case NVPTX::BI__nvvm_atom_cta_min_gen_ul:
612 case NVPTX::BI__nvvm_atom_cta_min_gen_ll:
613 case NVPTX::BI__nvvm_atom_cta_min_gen_ull:
614 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta, *this, E);
615 case NVPTX::BI__nvvm_atom_sys_min_gen_i:
616 case NVPTX::BI__nvvm_atom_sys_min_gen_ui:
617 case NVPTX::BI__nvvm_atom_sys_min_gen_l:
618 case NVPTX::BI__nvvm_atom_sys_min_gen_ul:
619 case NVPTX::BI__nvvm_atom_sys_min_gen_ll:
620 case NVPTX::BI__nvvm_atom_sys_min_gen_ull:
621 return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys, *this, E);
622 case NVPTX::BI__nvvm_atom_cta_inc_gen_ui:
623 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta, *this, E);
624 case NVPTX::BI__nvvm_atom_cta_dec_gen_ui:
625 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta, *this, E);
626 case NVPTX::BI__nvvm_atom_sys_inc_gen_ui:
627 return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys, *this, E);
628 case NVPTX::BI__nvvm_atom_sys_dec_gen_ui:
629 return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys, *this, E);
630 case NVPTX::BI__nvvm_atom_cta_and_gen_i:
631 case NVPTX::BI__nvvm_atom_cta_and_gen_l:
632 case NVPTX::BI__nvvm_atom_cta_and_gen_ll:
633 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta, *this, E);
634 case NVPTX::BI__nvvm_atom_sys_and_gen_i:
635 case NVPTX::BI__nvvm_atom_sys_and_gen_l:
636 case NVPTX::BI__nvvm_atom_sys_and_gen_ll:
637 return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys, *this, E);
638 case NVPTX::BI__nvvm_atom_cta_or_gen_i:
639 case NVPTX::BI__nvvm_atom_cta_or_gen_l:
640 case NVPTX::BI__nvvm_atom_cta_or_gen_ll:
641 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta, *this, E);
642 case NVPTX::BI__nvvm_atom_sys_or_gen_i:
643 case NVPTX::BI__nvvm_atom_sys_or_gen_l:
644 case NVPTX::BI__nvvm_atom_sys_or_gen_ll:
645 return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys, *this, E);
646 case NVPTX::BI__nvvm_atom_cta_xor_gen_i:
647 case NVPTX::BI__nvvm_atom_cta_xor_gen_l:
648 case NVPTX::BI__nvvm_atom_cta_xor_gen_ll:
649 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta, *this, E);
650 case NVPTX::BI__nvvm_atom_sys_xor_gen_i:
651 case NVPTX::BI__nvvm_atom_sys_xor_gen_l:
652 case NVPTX::BI__nvvm_atom_sys_xor_gen_ll:
653 return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys, *this, E);
654 case NVPTX::BI__nvvm_atom_cta_cas_gen_us:
655 case NVPTX::BI__nvvm_atom_cta_cas_gen_i:
656 case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
657 case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: {
658 Value *Ptr = EmitScalarExpr(E->getArg(0));
659 llvm::Type *ElemTy =
661 return Builder.CreateCall(
662 CGM.getIntrinsic(
663 Intrinsic::nvvm_atomic_cas_gen_i_cta, {ElemTy, Ptr->getType()}),
664 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
665 }
666 case NVPTX::BI__nvvm_atom_sys_cas_gen_us:
667 case NVPTX::BI__nvvm_atom_sys_cas_gen_i:
668 case NVPTX::BI__nvvm_atom_sys_cas_gen_l:
669 case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: {
670 Value *Ptr = EmitScalarExpr(E->getArg(0));
671 llvm::Type *ElemTy =
673 return Builder.CreateCall(
674 CGM.getIntrinsic(
675 Intrinsic::nvvm_atomic_cas_gen_i_sys, {ElemTy, Ptr->getType()}),
676 {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
677 }
678 case NVPTX::BI__nvvm_match_all_sync_i32p:
679 case NVPTX::BI__nvvm_match_all_sync_i64p: {
680 Value *Mask = EmitScalarExpr(E->getArg(0));
681 Value *Val = EmitScalarExpr(E->getArg(1));
682 Address PredOutPtr = EmitPointerWithAlignment(E->getArg(2));
683 Value *ResultPair = Builder.CreateCall(
684 CGM.getIntrinsic(BuiltinID == NVPTX::BI__nvvm_match_all_sync_i32p
685 ? Intrinsic::nvvm_match_all_sync_i32p
686 : Intrinsic::nvvm_match_all_sync_i64p),
687 {Mask, Val});
688 Value *Pred = Builder.CreateZExt(Builder.CreateExtractValue(ResultPair, 1),
689 PredOutPtr.getElementType());
690 Builder.CreateStore(Pred, PredOutPtr);
691 return Builder.CreateExtractValue(ResultPair, 0);
692 }
693
694 // FP MMA loads
695 case NVPTX::BI__hmma_m16n16k16_ld_a:
696 case NVPTX::BI__hmma_m16n16k16_ld_b:
697 case NVPTX::BI__hmma_m16n16k16_ld_c_f16:
698 case NVPTX::BI__hmma_m16n16k16_ld_c_f32:
699 case NVPTX::BI__hmma_m32n8k16_ld_a:
700 case NVPTX::BI__hmma_m32n8k16_ld_b:
701 case NVPTX::BI__hmma_m32n8k16_ld_c_f16:
702 case NVPTX::BI__hmma_m32n8k16_ld_c_f32:
703 case NVPTX::BI__hmma_m8n32k16_ld_a:
704 case NVPTX::BI__hmma_m8n32k16_ld_b:
705 case NVPTX::BI__hmma_m8n32k16_ld_c_f16:
706 case NVPTX::BI__hmma_m8n32k16_ld_c_f32:
707 // Integer MMA loads.
708 case NVPTX::BI__imma_m16n16k16_ld_a_s8:
709 case NVPTX::BI__imma_m16n16k16_ld_a_u8:
710 case NVPTX::BI__imma_m16n16k16_ld_b_s8:
711 case NVPTX::BI__imma_m16n16k16_ld_b_u8:
712 case NVPTX::BI__imma_m16n16k16_ld_c:
713 case NVPTX::BI__imma_m32n8k16_ld_a_s8:
714 case NVPTX::BI__imma_m32n8k16_ld_a_u8:
715 case NVPTX::BI__imma_m32n8k16_ld_b_s8:
716 case NVPTX::BI__imma_m32n8k16_ld_b_u8:
717 case NVPTX::BI__imma_m32n8k16_ld_c:
718 case NVPTX::BI__imma_m8n32k16_ld_a_s8:
719 case NVPTX::BI__imma_m8n32k16_ld_a_u8:
720 case NVPTX::BI__imma_m8n32k16_ld_b_s8:
721 case NVPTX::BI__imma_m8n32k16_ld_b_u8:
722 case NVPTX::BI__imma_m8n32k16_ld_c:
723 // Sub-integer MMA loads.
724 case NVPTX::BI__imma_m8n8k32_ld_a_s4:
725 case NVPTX::BI__imma_m8n8k32_ld_a_u4:
726 case NVPTX::BI__imma_m8n8k32_ld_b_s4:
727 case NVPTX::BI__imma_m8n8k32_ld_b_u4:
728 case NVPTX::BI__imma_m8n8k32_ld_c:
729 case NVPTX::BI__bmma_m8n8k128_ld_a_b1:
730 case NVPTX::BI__bmma_m8n8k128_ld_b_b1:
731 case NVPTX::BI__bmma_m8n8k128_ld_c:
732 // Double MMA loads.
733 case NVPTX::BI__dmma_m8n8k4_ld_a:
734 case NVPTX::BI__dmma_m8n8k4_ld_b:
735 case NVPTX::BI__dmma_m8n8k4_ld_c:
736 // Alternate float MMA loads.
737 case NVPTX::BI__mma_bf16_m16n16k16_ld_a:
738 case NVPTX::BI__mma_bf16_m16n16k16_ld_b:
739 case NVPTX::BI__mma_bf16_m8n32k16_ld_a:
740 case NVPTX::BI__mma_bf16_m8n32k16_ld_b:
741 case NVPTX::BI__mma_bf16_m32n8k16_ld_a:
742 case NVPTX::BI__mma_bf16_m32n8k16_ld_b:
743 case NVPTX::BI__mma_tf32_m16n16k8_ld_a:
744 case NVPTX::BI__mma_tf32_m16n16k8_ld_b:
745 case NVPTX::BI__mma_tf32_m16n16k8_ld_c: {
747 Value *Src = EmitScalarExpr(E->getArg(1));
748 Value *Ldm = EmitScalarExpr(E->getArg(2));
749 std::optional<llvm::APSInt> isColMajorArg =
751 if (!isColMajorArg)
752 return nullptr;
753 bool isColMajor = isColMajorArg->getSExtValue();
754 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
755 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
756 if (IID == 0)
757 return nullptr;
758
759 Value *Result =
760 Builder.CreateCall(CGM.getIntrinsic(IID, Src->getType()), {Src, Ldm});
761
762 // Save returned values.
763 assert(II.NumResults);
764 if (II.NumResults == 1) {
765 Builder.CreateAlignedStore(Result, Dst.emitRawPointer(*this),
767 } else {
768 for (unsigned i = 0; i < II.NumResults; ++i) {
769 Builder.CreateAlignedStore(
770 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i),
771 Dst.getElementType()),
772 Builder.CreateGEP(Dst.getElementType(), Dst.emitRawPointer(*this),
773 llvm::ConstantInt::get(IntTy, i)),
775 }
776 }
777 return Result;
778 }
779
780 case NVPTX::BI__hmma_m16n16k16_st_c_f16:
781 case NVPTX::BI__hmma_m16n16k16_st_c_f32:
782 case NVPTX::BI__hmma_m32n8k16_st_c_f16:
783 case NVPTX::BI__hmma_m32n8k16_st_c_f32:
784 case NVPTX::BI__hmma_m8n32k16_st_c_f16:
785 case NVPTX::BI__hmma_m8n32k16_st_c_f32:
786 case NVPTX::BI__imma_m16n16k16_st_c_i32:
787 case NVPTX::BI__imma_m32n8k16_st_c_i32:
788 case NVPTX::BI__imma_m8n32k16_st_c_i32:
789 case NVPTX::BI__imma_m8n8k32_st_c_i32:
790 case NVPTX::BI__bmma_m8n8k128_st_c_i32:
791 case NVPTX::BI__dmma_m8n8k4_st_c_f64:
792 case NVPTX::BI__mma_m16n16k8_st_c_f32: {
793 Value *Dst = EmitScalarExpr(E->getArg(0));
795 Value *Ldm = EmitScalarExpr(E->getArg(2));
796 std::optional<llvm::APSInt> isColMajorArg =
798 if (!isColMajorArg)
799 return nullptr;
800 bool isColMajor = isColMajorArg->getSExtValue();
801 NVPTXMmaLdstInfo II = getNVPTXMmaLdstInfo(BuiltinID);
802 unsigned IID = isColMajor ? II.IID_col : II.IID_row;
803 if (IID == 0)
804 return nullptr;
805 Function *Intrinsic =
806 CGM.getIntrinsic(IID, Dst->getType());
807 llvm::Type *ParamType = Intrinsic->getFunctionType()->getParamType(1);
808 SmallVector<Value *, 10> Values = {Dst};
809 for (unsigned i = 0; i < II.NumResults; ++i) {
810 Value *V = Builder.CreateAlignedLoad(
811 Src.getElementType(),
812 Builder.CreateGEP(Src.getElementType(), Src.emitRawPointer(*this),
813 llvm::ConstantInt::get(IntTy, i)),
815 Values.push_back(Builder.CreateBitCast(V, ParamType));
816 }
817 Values.push_back(Ldm);
818 Value *Result = Builder.CreateCall(Intrinsic, Values);
819 return Result;
820 }
821
822 // BI__hmma_m16n16k16_mma_<Dtype><CType>(d, a, b, c, layout, satf) -->
823 // Intrinsic::nvvm_wmma_m16n16k16_mma_sync<layout A,B><DType><CType><Satf>
824 case NVPTX::BI__hmma_m16n16k16_mma_f16f16:
825 case NVPTX::BI__hmma_m16n16k16_mma_f32f16:
826 case NVPTX::BI__hmma_m16n16k16_mma_f32f32:
827 case NVPTX::BI__hmma_m16n16k16_mma_f16f32:
828 case NVPTX::BI__hmma_m32n8k16_mma_f16f16:
829 case NVPTX::BI__hmma_m32n8k16_mma_f32f16:
830 case NVPTX::BI__hmma_m32n8k16_mma_f32f32:
831 case NVPTX::BI__hmma_m32n8k16_mma_f16f32:
832 case NVPTX::BI__hmma_m8n32k16_mma_f16f16:
833 case NVPTX::BI__hmma_m8n32k16_mma_f32f16:
834 case NVPTX::BI__hmma_m8n32k16_mma_f32f32:
835 case NVPTX::BI__hmma_m8n32k16_mma_f16f32:
836 case NVPTX::BI__imma_m16n16k16_mma_s8:
837 case NVPTX::BI__imma_m16n16k16_mma_u8:
838 case NVPTX::BI__imma_m32n8k16_mma_s8:
839 case NVPTX::BI__imma_m32n8k16_mma_u8:
840 case NVPTX::BI__imma_m8n32k16_mma_s8:
841 case NVPTX::BI__imma_m8n32k16_mma_u8:
842 case NVPTX::BI__imma_m8n8k32_mma_s4:
843 case NVPTX::BI__imma_m8n8k32_mma_u4:
844 case NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1:
845 case NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1:
846 case NVPTX::BI__dmma_m8n8k4_mma_f64:
847 case NVPTX::BI__mma_bf16_m16n16k16_mma_f32:
848 case NVPTX::BI__mma_bf16_m8n32k16_mma_f32:
849 case NVPTX::BI__mma_bf16_m32n8k16_mma_f32:
850 case NVPTX::BI__mma_tf32_m16n16k8_mma_f32: {
855 std::optional<llvm::APSInt> LayoutArg =
857 if (!LayoutArg)
858 return nullptr;
859 int Layout = LayoutArg->getSExtValue();
860 if (Layout < 0 || Layout > 3)
861 return nullptr;
862 llvm::APSInt SatfArg;
863 if (BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_xor_popc_b1 ||
864 BuiltinID == NVPTX::BI__bmma_m8n8k128_mma_and_popc_b1)
865 SatfArg = 0; // .b1 does not have satf argument.
866 else if (std::optional<llvm::APSInt> OptSatfArg =
868 SatfArg = *OptSatfArg;
869 else
870 return nullptr;
871 bool Satf = SatfArg.getSExtValue();
872 NVPTXMmaInfo MI = getNVPTXMmaInfo(BuiltinID);
873 unsigned IID = MI.getMMAIntrinsic(Layout, Satf);
874 if (IID == 0) // Unsupported combination of Layout/Satf.
875 return nullptr;
876
878 Function *Intrinsic = CGM.getIntrinsic(IID);
879 llvm::Type *AType = Intrinsic->getFunctionType()->getParamType(0);
880 // Load A
881 for (unsigned i = 0; i < MI.NumEltsA; ++i) {
882 Value *V = Builder.CreateAlignedLoad(
883 SrcA.getElementType(),
884 Builder.CreateGEP(SrcA.getElementType(), SrcA.emitRawPointer(*this),
885 llvm::ConstantInt::get(IntTy, i)),
887 Values.push_back(Builder.CreateBitCast(V, AType));
888 }
889 // Load B
890 llvm::Type *BType = Intrinsic->getFunctionType()->getParamType(MI.NumEltsA);
891 for (unsigned i = 0; i < MI.NumEltsB; ++i) {
892 Value *V = Builder.CreateAlignedLoad(
893 SrcB.getElementType(),
894 Builder.CreateGEP(SrcB.getElementType(), SrcB.emitRawPointer(*this),
895 llvm::ConstantInt::get(IntTy, i)),
897 Values.push_back(Builder.CreateBitCast(V, BType));
898 }
899 // Load C
900 llvm::Type *CType =
901 Intrinsic->getFunctionType()->getParamType(MI.NumEltsA + MI.NumEltsB);
902 for (unsigned i = 0; i < MI.NumEltsC; ++i) {
903 Value *V = Builder.CreateAlignedLoad(
904 SrcC.getElementType(),
905 Builder.CreateGEP(SrcC.getElementType(), SrcC.emitRawPointer(*this),
906 llvm::ConstantInt::get(IntTy, i)),
908 Values.push_back(Builder.CreateBitCast(V, CType));
909 }
910 Value *Result = Builder.CreateCall(Intrinsic, Values);
911 llvm::Type *DType = Dst.getElementType();
912 for (unsigned i = 0; i < MI.NumEltsD; ++i)
913 Builder.CreateAlignedStore(
914 Builder.CreateBitCast(Builder.CreateExtractValue(Result, i), DType),
915 Builder.CreateGEP(Dst.getElementType(), Dst.emitRawPointer(*this),
916 llvm::ConstantInt::get(IntTy, i)),
918 return Result;
919 }
920 // The following builtins require half type support
921 case NVPTX::BI__nvvm_ex2_approx_f16:
922 return MakeHalfType(
923 CGM.getIntrinsic(Intrinsic::nvvm_ex2_approx, Builder.getHalfTy()),
924 BuiltinID, E, *this);
925 case NVPTX::BI__nvvm_ex2_approx_f16x2:
926 return MakeHalfType(
927 CGM.getIntrinsic(Intrinsic::nvvm_ex2_approx,
928 FixedVectorType::get(Builder.getHalfTy(), 2)),
929 BuiltinID, E, *this);
930 case NVPTX::BI__nvvm_ff2f16x2_rn:
931 return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rn, BuiltinID, E, *this);
932 case NVPTX::BI__nvvm_ff2f16x2_rn_relu:
933 return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rn_relu, BuiltinID, E, *this);
934 case NVPTX::BI__nvvm_ff2f16x2_rz:
935 return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rz, BuiltinID, E, *this);
936 case NVPTX::BI__nvvm_ff2f16x2_rz_relu:
937 return MakeHalfType(Intrinsic::nvvm_ff2f16x2_rz_relu, BuiltinID, E, *this);
938 case NVPTX::BI__nvvm_fma_rn_f16:
939 return MakeHalfType(Intrinsic::nvvm_fma_rn_f16, BuiltinID, E, *this);
940 case NVPTX::BI__nvvm_fma_rn_f16x2:
941 return MakeHalfType(Intrinsic::nvvm_fma_rn_f16x2, BuiltinID, E, *this);
942 case NVPTX::BI__nvvm_fma_rn_ftz_f16:
943 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_f16, BuiltinID, E, *this);
944 case NVPTX::BI__nvvm_fma_rn_ftz_f16x2:
945 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_f16x2, BuiltinID, E, *this);
946 case NVPTX::BI__nvvm_fma_rn_ftz_relu_f16:
947 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_relu_f16, BuiltinID, E,
948 *this);
949 case NVPTX::BI__nvvm_fma_rn_ftz_relu_f16x2:
950 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_relu_f16x2, BuiltinID, E,
951 *this);
952 case NVPTX::BI__nvvm_fma_rn_ftz_sat_f16:
953 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_sat_f16, BuiltinID, E,
954 *this);
955 case NVPTX::BI__nvvm_fma_rn_ftz_sat_f16x2:
956 return MakeHalfType(Intrinsic::nvvm_fma_rn_ftz_sat_f16x2, BuiltinID, E,
957 *this);
958 case NVPTX::BI__nvvm_fma_rn_relu_f16:
959 return MakeHalfType(Intrinsic::nvvm_fma_rn_relu_f16, BuiltinID, E, *this);
960 case NVPTX::BI__nvvm_fma_rn_relu_f16x2:
961 return MakeHalfType(Intrinsic::nvvm_fma_rn_relu_f16x2, BuiltinID, E, *this);
962 case NVPTX::BI__nvvm_fma_rn_sat_f16:
963 return MakeHalfType(Intrinsic::nvvm_fma_rn_sat_f16, BuiltinID, E, *this);
964 case NVPTX::BI__nvvm_fma_rn_sat_f16x2:
965 return MakeHalfType(Intrinsic::nvvm_fma_rn_sat_f16x2, BuiltinID, E, *this);
966 case NVPTX::BI__nvvm_fmax_f16:
967 return MakeHalfType(Intrinsic::nvvm_fmax_f16, BuiltinID, E, *this);
968 case NVPTX::BI__nvvm_fmax_f16x2:
969 return MakeHalfType(Intrinsic::nvvm_fmax_f16x2, BuiltinID, E, *this);
970 case NVPTX::BI__nvvm_fmax_ftz_f16:
971 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_f16, BuiltinID, E, *this);
972 case NVPTX::BI__nvvm_fmax_ftz_f16x2:
973 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_f16x2, BuiltinID, E, *this);
974 case NVPTX::BI__nvvm_fmax_ftz_nan_f16:
975 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_f16, BuiltinID, E, *this);
976 case NVPTX::BI__nvvm_fmax_ftz_nan_f16x2:
977 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_f16x2, BuiltinID, E,
978 *this);
979 case NVPTX::BI__nvvm_fmax_ftz_nan_xorsign_abs_f16:
980 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f16, BuiltinID,
981 E, *this);
982 case NVPTX::BI__nvvm_fmax_ftz_nan_xorsign_abs_f16x2:
983 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_nan_xorsign_abs_f16x2,
984 BuiltinID, E, *this);
985 case NVPTX::BI__nvvm_fmax_ftz_xorsign_abs_f16:
986 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_xorsign_abs_f16, BuiltinID, E,
987 *this);
988 case NVPTX::BI__nvvm_fmax_ftz_xorsign_abs_f16x2:
989 return MakeHalfType(Intrinsic::nvvm_fmax_ftz_xorsign_abs_f16x2, BuiltinID,
990 E, *this);
991 case NVPTX::BI__nvvm_fmax_nan_f16:
992 return MakeHalfType(Intrinsic::nvvm_fmax_nan_f16, BuiltinID, E, *this);
993 case NVPTX::BI__nvvm_fmax_nan_f16x2:
994 return MakeHalfType(Intrinsic::nvvm_fmax_nan_f16x2, BuiltinID, E, *this);
995 case NVPTX::BI__nvvm_fmax_nan_xorsign_abs_f16:
996 return MakeHalfType(Intrinsic::nvvm_fmax_nan_xorsign_abs_f16, BuiltinID, E,
997 *this);
998 case NVPTX::BI__nvvm_fmax_nan_xorsign_abs_f16x2:
999 return MakeHalfType(Intrinsic::nvvm_fmax_nan_xorsign_abs_f16x2, BuiltinID,
1000 E, *this);
1001 case NVPTX::BI__nvvm_fmax_xorsign_abs_f16:
1002 return MakeHalfType(Intrinsic::nvvm_fmax_xorsign_abs_f16, BuiltinID, E,
1003 *this);
1004 case NVPTX::BI__nvvm_fmax_xorsign_abs_f16x2:
1005 return MakeHalfType(Intrinsic::nvvm_fmax_xorsign_abs_f16x2, BuiltinID, E,
1006 *this);
1007 case NVPTX::BI__nvvm_fmin_f16:
1008 return MakeHalfType(Intrinsic::nvvm_fmin_f16, BuiltinID, E, *this);
1009 case NVPTX::BI__nvvm_fmin_f16x2:
1010 return MakeHalfType(Intrinsic::nvvm_fmin_f16x2, BuiltinID, E, *this);
1011 case NVPTX::BI__nvvm_fmin_ftz_f16:
1012 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_f16, BuiltinID, E, *this);
1013 case NVPTX::BI__nvvm_fmin_ftz_f16x2:
1014 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_f16x2, BuiltinID, E, *this);
1015 case NVPTX::BI__nvvm_fmin_ftz_nan_f16:
1016 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_f16, BuiltinID, E, *this);
1017 case NVPTX::BI__nvvm_fmin_ftz_nan_f16x2:
1018 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_f16x2, BuiltinID, E,
1019 *this);
1020 case NVPTX::BI__nvvm_fmin_ftz_nan_xorsign_abs_f16:
1021 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f16, BuiltinID,
1022 E, *this);
1023 case NVPTX::BI__nvvm_fmin_ftz_nan_xorsign_abs_f16x2:
1024 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_nan_xorsign_abs_f16x2,
1025 BuiltinID, E, *this);
1026 case NVPTX::BI__nvvm_fmin_ftz_xorsign_abs_f16:
1027 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_xorsign_abs_f16, BuiltinID, E,
1028 *this);
1029 case NVPTX::BI__nvvm_fmin_ftz_xorsign_abs_f16x2:
1030 return MakeHalfType(Intrinsic::nvvm_fmin_ftz_xorsign_abs_f16x2, BuiltinID,
1031 E, *this);
1032 case NVPTX::BI__nvvm_fmin_nan_f16:
1033 return MakeHalfType(Intrinsic::nvvm_fmin_nan_f16, BuiltinID, E, *this);
1034 case NVPTX::BI__nvvm_fmin_nan_f16x2:
1035 return MakeHalfType(Intrinsic::nvvm_fmin_nan_f16x2, BuiltinID, E, *this);
1036 case NVPTX::BI__nvvm_fmin_nan_xorsign_abs_f16:
1037 return MakeHalfType(Intrinsic::nvvm_fmin_nan_xorsign_abs_f16, BuiltinID, E,
1038 *this);
1039 case NVPTX::BI__nvvm_fmin_nan_xorsign_abs_f16x2:
1040 return MakeHalfType(Intrinsic::nvvm_fmin_nan_xorsign_abs_f16x2, BuiltinID,
1041 E, *this);
1042 case NVPTX::BI__nvvm_fmin_xorsign_abs_f16:
1043 return MakeHalfType(Intrinsic::nvvm_fmin_xorsign_abs_f16, BuiltinID, E,
1044 *this);
1045 case NVPTX::BI__nvvm_fmin_xorsign_abs_f16x2:
1046 return MakeHalfType(Intrinsic::nvvm_fmin_xorsign_abs_f16x2, BuiltinID, E,
1047 *this);
1048 case NVPTX::BI__nvvm_fabs_f:
1049 case NVPTX::BI__nvvm_abs_bf16:
1050 case NVPTX::BI__nvvm_abs_bf16x2:
1051 case NVPTX::BI__nvvm_fabs_f16:
1052 case NVPTX::BI__nvvm_fabs_f16x2:
1053 return Builder.CreateUnaryIntrinsic(Intrinsic::nvvm_fabs,
1054 EmitScalarExpr(E->getArg(0)));
1055 case NVPTX::BI__nvvm_fabs_ftz_f:
1056 case NVPTX::BI__nvvm_fabs_ftz_f16:
1057 case NVPTX::BI__nvvm_fabs_ftz_f16x2:
1058 return Builder.CreateUnaryIntrinsic(Intrinsic::nvvm_fabs_ftz,
1059 EmitScalarExpr(E->getArg(0)));
1060 case NVPTX::BI__nvvm_fabs_d:
1061 return Builder.CreateUnaryIntrinsic(Intrinsic::fabs,
1062 EmitScalarExpr(E->getArg(0)));
1063 case NVPTX::BI__nvvm_ex2_approx_d:
1064 case NVPTX::BI__nvvm_ex2_approx_f:
1065 return Builder.CreateUnaryIntrinsic(Intrinsic::nvvm_ex2_approx,
1066 EmitScalarExpr(E->getArg(0)));
1067 case NVPTX::BI__nvvm_ex2_approx_ftz_f:
1068 return Builder.CreateUnaryIntrinsic(Intrinsic::nvvm_ex2_approx_ftz,
1069 EmitScalarExpr(E->getArg(0)));
1070 case NVPTX::BI__nvvm_ldg_h:
1071 case NVPTX::BI__nvvm_ldg_h2:
1072 return EnsureNativeHalfSupport(BuiltinID, E, *this) ? MakeLdg(*this, E)
1073 : nullptr;
1074 case NVPTX::BI__nvvm_ldu_h:
1075 case NVPTX::BI__nvvm_ldu_h2:
1076 return EnsureNativeHalfSupport(BuiltinID, E, *this)
1077 ? MakeLdu(Intrinsic::nvvm_ldu_global_f, *this, E)
1078 : nullptr;
1079 case NVPTX::BI__nvvm_cp_async_ca_shared_global_4:
1080 return MakeCpAsync(Intrinsic::nvvm_cp_async_ca_shared_global_4,
1081 Intrinsic::nvvm_cp_async_ca_shared_global_4_s, *this, E,
1082 4);
1083 case NVPTX::BI__nvvm_cp_async_ca_shared_global_8:
1084 return MakeCpAsync(Intrinsic::nvvm_cp_async_ca_shared_global_8,
1085 Intrinsic::nvvm_cp_async_ca_shared_global_8_s, *this, E,
1086 8);
1087 case NVPTX::BI__nvvm_cp_async_ca_shared_global_16:
1088 return MakeCpAsync(Intrinsic::nvvm_cp_async_ca_shared_global_16,
1089 Intrinsic::nvvm_cp_async_ca_shared_global_16_s, *this, E,
1090 16);
1091 case NVPTX::BI__nvvm_cp_async_cg_shared_global_16:
1092 return MakeCpAsync(Intrinsic::nvvm_cp_async_cg_shared_global_16,
1093 Intrinsic::nvvm_cp_async_cg_shared_global_16_s, *this, E,
1094 16);
1095 case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_x:
1096 return Builder.CreateCall(
1097 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_x));
1098 case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_y:
1099 return Builder.CreateCall(
1100 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_y));
1101 case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_z:
1102 return Builder.CreateCall(
1103 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_z));
1104 case NVPTX::BI__nvvm_read_ptx_sreg_clusterid_w:
1105 return Builder.CreateCall(
1106 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_clusterid_w));
1107 case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_x:
1108 return Builder.CreateCall(
1109 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_x));
1110 case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_y:
1111 return Builder.CreateCall(
1112 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_y));
1113 case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_z:
1114 return Builder.CreateCall(
1115 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_z));
1116 case NVPTX::BI__nvvm_read_ptx_sreg_nclusterid_w:
1117 return Builder.CreateCall(
1118 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_nclusterid_w));
1119 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_x:
1120 return Builder.CreateCall(
1121 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_x));
1122 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_y:
1123 return Builder.CreateCall(
1124 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_y));
1125 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_z:
1126 return Builder.CreateCall(
1127 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_z));
1128 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctaid_w:
1129 return Builder.CreateCall(
1130 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctaid_w));
1131 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_x:
1132 return Builder.CreateCall(
1133 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_x));
1134 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_y:
1135 return Builder.CreateCall(
1136 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_y));
1137 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_z:
1138 return Builder.CreateCall(
1139 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_z));
1140 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctaid_w:
1141 return Builder.CreateCall(
1142 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctaid_w));
1143 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_ctarank:
1144 return Builder.CreateCall(
1145 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_ctarank));
1146 case NVPTX::BI__nvvm_read_ptx_sreg_cluster_nctarank:
1147 return Builder.CreateCall(
1148 CGM.getIntrinsic(Intrinsic::nvvm_read_ptx_sreg_cluster_nctarank));
1149 case NVPTX::BI__nvvm_is_explicit_cluster:
1150 return Builder.CreateCall(
1151 CGM.getIntrinsic(Intrinsic::nvvm_is_explicit_cluster));
1152 case NVPTX::BI__nvvm_isspacep_shared_cluster:
1153 return Builder.CreateCall(
1154 CGM.getIntrinsic(Intrinsic::nvvm_isspacep_shared_cluster),
1155 EmitScalarExpr(E->getArg(0)));
1156 case NVPTX::BI__nvvm_mapa:
1157 return Builder.CreateCall(
1158 CGM.getIntrinsic(Intrinsic::nvvm_mapa),
1159 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
1160 case NVPTX::BI__nvvm_mapa_shared_cluster:
1161 return Builder.CreateCall(
1162 CGM.getIntrinsic(Intrinsic::nvvm_mapa_shared_cluster),
1163 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
1164 case NVPTX::BI__nvvm_getctarank:
1165 return Builder.CreateCall(
1166 CGM.getIntrinsic(Intrinsic::nvvm_getctarank),
1167 EmitScalarExpr(E->getArg(0)));
1168 case NVPTX::BI__nvvm_getctarank_shared_cluster:
1169 return Builder.CreateCall(
1170 CGM.getIntrinsic(Intrinsic::nvvm_getctarank_shared_cluster),
1171 EmitScalarExpr(E->getArg(0)));
1172 case NVPTX::BI__nvvm_barrier_cluster_arrive:
1173 return Builder.CreateCall(
1174 CGM.getIntrinsic(Intrinsic::nvvm_barrier_cluster_arrive));
1175 case NVPTX::BI__nvvm_barrier_cluster_arrive_relaxed:
1176 return Builder.CreateCall(
1177 CGM.getIntrinsic(Intrinsic::nvvm_barrier_cluster_arrive_relaxed));
1178 case NVPTX::BI__nvvm_barrier_cluster_wait:
1179 return Builder.CreateCall(
1180 CGM.getIntrinsic(Intrinsic::nvvm_barrier_cluster_wait));
1181 case NVPTX::BI__nvvm_fence_sc_cluster:
1182 return Builder.CreateCall(
1183 CGM.getIntrinsic(Intrinsic::nvvm_fence_sc_cluster));
1184 case NVPTX::BI__nvvm_bar_sync:
1185 return Builder.CreateCall(
1186 CGM.getIntrinsic(Intrinsic::nvvm_barrier_cta_sync_aligned_all),
1187 EmitScalarExpr(E->getArg(0)));
1188 case NVPTX::BI__syncthreads:
1189 return Builder.CreateCall(
1190 CGM.getIntrinsic(Intrinsic::nvvm_barrier_cta_sync_aligned_all),
1191 Builder.getInt32(0));
1192 case NVPTX::BI__nvvm_barrier_sync:
1193 return Builder.CreateCall(
1194 CGM.getIntrinsic(Intrinsic::nvvm_barrier_cta_sync_all),
1195 EmitScalarExpr(E->getArg(0)));
1196 case NVPTX::BI__nvvm_barrier_sync_cnt:
1197 return Builder.CreateCall(
1198 CGM.getIntrinsic(Intrinsic::nvvm_barrier_cta_sync_count),
1199 {EmitScalarExpr(E->getArg(0)), EmitScalarExpr(E->getArg(1))});
1200 default:
1201 return nullptr;
1202 }
1203}
#define V(N, I)
Value * MakeAtomicCmpXchgValue(CodeGenFunction &CGF, const CallExpr *E, bool ReturnBool)
Utility to insert an atomic cmpxchg instruction.
Value * MakeBinaryAtomicValue(CodeGenFunction &CGF, llvm::AtomicRMWInst::BinOp Kind, const CallExpr *E, AtomicOrdering Ordering)
Utility to insert an atomic instruction based on Intrinsic::ID and the expression node.
#define MMA_VARIANTS_B1_AND(geom, type)
#define MMA_INTR(geom_op_type, layout)
Definition NVPTX.cpp:30
#define MMA_VARIANTS(geom, type)
#define MMA_SATF_VARIANTS(geom, type)
#define MMA_LDST(n, geom_op_type)
Definition NVPTX.cpp:32
#define MMA_VARIANTS_B1_XOR(geom, type)
#define MMA_VARIANTS_I4(geom, type)
Enumerates target-specific builtins in their own namespaces within namespace clang.
@ GE_None
No error.
QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error, unsigned *IntegerConstantArgs=nullptr) const
Return the type for the specified builtin.
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3081
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3068
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
llvm::Align getAsAlign() const
getAsAlign - Returns Quantity as a valid llvm::Align, Beware llvm::Align assumes power of two 8-bit b...
Definition CharUnits.h:189
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
Like RawAddress, an abstract representation of an aligned address, but the pointer contained in this ...
Definition Address.h:128
llvm::Value * emitRawPointer(CodeGenFunction &CGF) const
Return the pointer contained in this class after authenticating it and adding offset to it if necessa...
Definition Address.h:253
llvm::Type * getElementType() const
Return the type of the values stored in this address.
Definition Address.h:209
llvm::LoadInst * CreateAlignedLoad(llvm::Type *Ty, llvm::Value *Addr, CharUnits Align, const llvm::Twine &Name="")
Definition CGBuilder.h:132
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty, llvm::Type *ElementTy, const llvm::Twine &Name="")
Definition CGBuilder.h:193
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertTypeForMem(QualType T)
Address EmitPointerWithAlignment(const Expr *Addr, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr, KnownNonNull_t IsKnownNonNull=NotKnownNonNull)
EmitPointerWithAlignment - Given an expression with a pointer type, emit the value and compute our be...
Definition CGExpr.cpp:1552
llvm::Value * EmitScalarExpr(const Expr *E, bool IgnoreResultAssign=false)
EmitScalarExpr - Emit the computation of the specified expression of LLVM scalar type,...
llvm::Value * EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E)
Definition NVPTX.cpp:420
void Error(SourceLocation loc, StringRef error)
Emit a general error that something can't be done.
ASTContext & getContext() const
CharUnits getNaturalPointeeTypeAlignment(QualType T, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
llvm::Function * getIntrinsic(unsigned IID, ArrayRef< llvm::Type * > Tys={})
std::optional< llvm::APSInt > getIntegerConstantExpr(const ASTContext &Ctx) const
isIntegerConstantExpr - Return the value if this expression is a valid integer constant expression.
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
A (possibly-)qualified type.
Definition TypeBase.h:937
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
QualType getType() const
Definition Value.cpp:237
The JSON file list parser is used to communicate input to InstallAPI.
@ DType
'dtype' clause, an alias for 'device_type', stored separately for diagnostic purposes.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
Diagnostic wrappers for TextAPI types for error reporting.
Definition Dominators.h:30