clang 22.0.0git
SemaAMDGPU.cpp
Go to the documentation of this file.
1//===------ SemaAMDGPU.cpp ------- AMDGPU target-specific routines --------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements semantic analysis functions specific to AMDGPU.
10//
11//===----------------------------------------------------------------------===//
12
18#include "clang/Sema/Sema.h"
19#include "llvm/Support/AMDGPUAddrSpace.h"
20#include "llvm/Support/AtomicOrdering.h"
21#include <cstdint>
22
23namespace clang {
24
26
28 CallExpr *TheCall) {
29 // position of memory order and scope arguments in the builtin
30 unsigned OrderIndex, ScopeIndex;
31
32 const auto *FD = SemaRef.getCurFunctionDecl(/*AllowLambda=*/true);
33 assert(FD && "AMDGPU builtins should not be used outside of a function");
34 llvm::StringMap<bool> CallerFeatureMap;
35 getASTContext().getFunctionFeatureMap(CallerFeatureMap, FD);
36 bool HasGFX950Insts =
37 Builtin::evaluateRequiredTargetFeatures("gfx950-insts", CallerFeatureMap);
38
39 switch (BuiltinID) {
40 case AMDGPU::BI__builtin_amdgcn_raw_ptr_buffer_load_lds:
41 case AMDGPU::BI__builtin_amdgcn_struct_ptr_buffer_load_lds:
42 case AMDGPU::BI__builtin_amdgcn_load_to_lds:
43 case AMDGPU::BI__builtin_amdgcn_global_load_lds: {
44 constexpr const int SizeIdx = 2;
45 llvm::APSInt Size;
46 Expr *ArgExpr = TheCall->getArg(SizeIdx);
47 [[maybe_unused]] ExprResult R =
48 SemaRef.VerifyIntegerConstantExpression(ArgExpr, &Size);
49 assert(!R.isInvalid());
50 switch (Size.getSExtValue()) {
51 case 1:
52 case 2:
53 case 4:
54 return false;
55 case 12:
56 case 16: {
57 if (HasGFX950Insts)
58 return false;
59 [[fallthrough]];
60 }
61 default:
62 SemaRef.targetDiag(ArgExpr->getExprLoc(),
63 diag::err_amdgcn_load_lds_size_invalid_value)
64 << ArgExpr->getSourceRange();
65 SemaRef.targetDiag(ArgExpr->getExprLoc(),
66 diag::note_amdgcn_load_lds_size_valid_value)
67 << HasGFX950Insts << ArgExpr->getSourceRange();
68 return true;
69 }
70 }
71 case AMDGPU::BI__builtin_amdgcn_get_fpenv:
72 case AMDGPU::BI__builtin_amdgcn_set_fpenv:
73 return false;
74 case AMDGPU::BI__builtin_amdgcn_atomic_inc32:
75 case AMDGPU::BI__builtin_amdgcn_atomic_inc64:
76 case AMDGPU::BI__builtin_amdgcn_atomic_dec32:
77 case AMDGPU::BI__builtin_amdgcn_atomic_dec64:
78 OrderIndex = 2;
79 ScopeIndex = 3;
80 break;
81 case AMDGPU::BI__builtin_amdgcn_fence:
82 OrderIndex = 0;
83 ScopeIndex = 1;
84 break;
85 case AMDGPU::BI__builtin_amdgcn_mov_dpp:
86 return checkMovDPPFunctionCall(TheCall, 5, 1);
87 case AMDGPU::BI__builtin_amdgcn_mov_dpp8:
88 return checkMovDPPFunctionCall(TheCall, 2, 1);
89 case AMDGPU::BI__builtin_amdgcn_update_dpp:
90 return checkMovDPPFunctionCall(TheCall, 6, 2);
91 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f16_fp8:
92 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_bf16_fp8:
93 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f16_bf8:
94 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_bf16_bf8:
95 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f16_fp4:
96 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_bf16_fp4:
97 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f32_fp8:
98 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f32_bf8:
99 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk8_f32_fp4:
100 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_f16_fp6:
101 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_bf16_fp6:
102 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_f16_bf6:
103 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_bf16_bf6:
104 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_f32_fp6:
105 case AMDGPU::BI__builtin_amdgcn_cvt_scale_pk16_f32_bf6:
106 return SemaRef.BuiltinConstantArgRange(TheCall, 2, 0, 15);
107 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_load_32x4B:
108 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_load_16x8B:
109 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_load_8x16B:
110 return checkCoopAtomicFunctionCall(TheCall, /*IsStore=*/false);
111 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_32x4B:
112 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_16x8B:
113 case AMDGPU::BI__builtin_amdgcn_cooperative_atomic_store_8x16B:
114 return checkCoopAtomicFunctionCall(TheCall, /*IsStore=*/true);
115 case AMDGPU::BI__builtin_amdgcn_image_load_1d_v4f32_i32:
116 case AMDGPU::BI__builtin_amdgcn_image_load_1darray_v4f32_i32:
117 case AMDGPU::BI__builtin_amdgcn_image_load_1d_v4f16_i32:
118 case AMDGPU::BI__builtin_amdgcn_image_load_1darray_v4f16_i32:
119 case AMDGPU::BI__builtin_amdgcn_image_load_2d_f32_i32:
120 case AMDGPU::BI__builtin_amdgcn_image_load_2d_v4f32_i32:
121 case AMDGPU::BI__builtin_amdgcn_image_load_2d_v4f16_i32:
122 case AMDGPU::BI__builtin_amdgcn_image_load_2darray_f32_i32:
123 case AMDGPU::BI__builtin_amdgcn_image_load_2darray_v4f32_i32:
124 case AMDGPU::BI__builtin_amdgcn_image_load_2darray_v4f16_i32:
125 case AMDGPU::BI__builtin_amdgcn_image_load_3d_v4f32_i32:
126 case AMDGPU::BI__builtin_amdgcn_image_load_3d_v4f16_i32:
127 case AMDGPU::BI__builtin_amdgcn_image_load_cube_v4f32_i32:
128 case AMDGPU::BI__builtin_amdgcn_image_load_cube_v4f16_i32:
129 case AMDGPU::BI__builtin_amdgcn_image_load_mip_1d_v4f32_i32:
130 case AMDGPU::BI__builtin_amdgcn_image_load_mip_1d_v4f16_i32:
131 case AMDGPU::BI__builtin_amdgcn_image_load_mip_1darray_v4f32_i32:
132 case AMDGPU::BI__builtin_amdgcn_image_load_mip_1darray_v4f16_i32:
133 case AMDGPU::BI__builtin_amdgcn_image_load_mip_2d_f32_i32:
134 case AMDGPU::BI__builtin_amdgcn_image_load_mip_2d_v4f32_i32:
135 case AMDGPU::BI__builtin_amdgcn_image_load_mip_2d_v4f16_i32:
136 case AMDGPU::BI__builtin_amdgcn_image_load_mip_2darray_f32_i32:
137 case AMDGPU::BI__builtin_amdgcn_image_load_mip_2darray_v4f32_i32:
138 case AMDGPU::BI__builtin_amdgcn_image_load_mip_2darray_v4f16_i32:
139 case AMDGPU::BI__builtin_amdgcn_image_load_mip_3d_v4f32_i32:
140 case AMDGPU::BI__builtin_amdgcn_image_load_mip_3d_v4f16_i32:
141 case AMDGPU::BI__builtin_amdgcn_image_load_mip_cube_v4f32_i32:
142 case AMDGPU::BI__builtin_amdgcn_image_load_mip_cube_v4f16_i32:
143 case AMDGPU::BI__builtin_amdgcn_image_sample_1d_v4f32_f32:
144 case AMDGPU::BI__builtin_amdgcn_image_sample_1darray_v4f32_f32:
145 case AMDGPU::BI__builtin_amdgcn_image_sample_1d_v4f16_f32:
146 case AMDGPU::BI__builtin_amdgcn_image_sample_1darray_v4f16_f32:
147 case AMDGPU::BI__builtin_amdgcn_image_sample_2d_f32_f32:
148 case AMDGPU::BI__builtin_amdgcn_image_sample_2d_v4f32_f32:
149 case AMDGPU::BI__builtin_amdgcn_image_sample_2d_v4f16_f32:
150 case AMDGPU::BI__builtin_amdgcn_image_sample_2darray_f32_f32:
151 case AMDGPU::BI__builtin_amdgcn_image_sample_2darray_v4f32_f32:
152 case AMDGPU::BI__builtin_amdgcn_image_sample_2darray_v4f16_f32:
153 case AMDGPU::BI__builtin_amdgcn_image_sample_3d_v4f32_f32:
154 case AMDGPU::BI__builtin_amdgcn_image_sample_3d_v4f16_f32:
155 case AMDGPU::BI__builtin_amdgcn_image_sample_cube_v4f32_f32:
156 case AMDGPU::BI__builtin_amdgcn_image_sample_cube_v4f16_f32:
157 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_1d_v4f32_f32:
158 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_1d_v4f16_f32:
159 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_1darray_v4f32_f32:
160 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_1darray_v4f16_f32:
161 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2d_f32_f32:
162 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2d_v4f32_f32:
163 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2d_v4f16_f32:
164 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2darray_f32_f32:
165 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2darray_v4f32_f32:
166 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_2darray_v4f16_f32:
167 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_3d_v4f32_f32:
168 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_3d_v4f16_f32:
169 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_cube_v4f32_f32:
170 case AMDGPU::BI__builtin_amdgcn_image_sample_lz_cube_v4f16_f32:
171 case AMDGPU::BI__builtin_amdgcn_image_sample_l_1d_v4f32_f32:
172 case AMDGPU::BI__builtin_amdgcn_image_sample_l_1d_v4f16_f32:
173 case AMDGPU::BI__builtin_amdgcn_image_sample_l_1darray_v4f32_f32:
174 case AMDGPU::BI__builtin_amdgcn_image_sample_l_1darray_v4f16_f32:
175 case AMDGPU::BI__builtin_amdgcn_image_sample_l_2d_f32_f32:
176 case AMDGPU::BI__builtin_amdgcn_image_sample_l_2d_v4f16_f32:
177 case AMDGPU::BI__builtin_amdgcn_image_sample_l_2d_v4f32_f32:
178 case AMDGPU::BI__builtin_amdgcn_image_sample_l_2darray_f32_f32:
179 case AMDGPU::BI__builtin_amdgcn_image_sample_l_2darray_v4f32_f32:
180 case AMDGPU::BI__builtin_amdgcn_image_sample_l_2darray_v4f16_f32:
181 case AMDGPU::BI__builtin_amdgcn_image_sample_l_3d_v4f32_f32:
182 case AMDGPU::BI__builtin_amdgcn_image_sample_l_3d_v4f16_f32:
183 case AMDGPU::BI__builtin_amdgcn_image_sample_l_cube_v4f32_f32:
184 case AMDGPU::BI__builtin_amdgcn_image_sample_l_cube_v4f16_f32:
185 case AMDGPU::BI__builtin_amdgcn_image_sample_d_1d_v4f32_f32:
186 case AMDGPU::BI__builtin_amdgcn_image_sample_d_1d_v4f16_f32:
187 case AMDGPU::BI__builtin_amdgcn_image_sample_d_1darray_v4f32_f32:
188 case AMDGPU::BI__builtin_amdgcn_image_sample_d_1darray_v4f16_f32:
189 case AMDGPU::BI__builtin_amdgcn_image_sample_d_2d_f32_f32:
190 case AMDGPU::BI__builtin_amdgcn_image_sample_d_2d_v4f32_f32:
191 case AMDGPU::BI__builtin_amdgcn_image_sample_d_2d_v4f16_f32:
192 case AMDGPU::BI__builtin_amdgcn_image_sample_d_2darray_f32_f32:
193 case AMDGPU::BI__builtin_amdgcn_image_sample_d_2darray_v4f32_f32:
194 case AMDGPU::BI__builtin_amdgcn_image_sample_d_2darray_v4f16_f32:
195 case AMDGPU::BI__builtin_amdgcn_image_sample_d_3d_v4f32_f32:
196 case AMDGPU::BI__builtin_amdgcn_image_sample_d_3d_v4f16_f32:
197 case AMDGPU::BI__builtin_amdgcn_image_gather4_lz_2d_v4f32_f32: {
198 StringRef FeatureList(
199 getASTContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
201 CallerFeatureMap)) {
202 Diag(TheCall->getBeginLoc(), diag::err_builtin_needs_feature)
203 << FD->getDeclName() << FeatureList;
204 return false;
205 }
206
207 unsigned ArgCount = TheCall->getNumArgs() - 1;
208 llvm::APSInt Result;
209
210 return (SemaRef.BuiltinConstantArg(TheCall, 0, Result)) ||
211 (SemaRef.BuiltinConstantArg(TheCall, ArgCount, Result)) ||
212 (SemaRef.BuiltinConstantArg(TheCall, (ArgCount - 1), Result));
213 }
214 case AMDGPU::BI__builtin_amdgcn_image_store_1d_v4f32_i32:
215 case AMDGPU::BI__builtin_amdgcn_image_store_1darray_v4f32_i32:
216 case AMDGPU::BI__builtin_amdgcn_image_store_1d_v4f16_i32:
217 case AMDGPU::BI__builtin_amdgcn_image_store_1darray_v4f16_i32:
218 case AMDGPU::BI__builtin_amdgcn_image_store_2d_f32_i32:
219 case AMDGPU::BI__builtin_amdgcn_image_store_2d_v4f32_i32:
220 case AMDGPU::BI__builtin_amdgcn_image_store_2d_v4f16_i32:
221 case AMDGPU::BI__builtin_amdgcn_image_store_2darray_f32_i32:
222 case AMDGPU::BI__builtin_amdgcn_image_store_2darray_v4f32_i32:
223 case AMDGPU::BI__builtin_amdgcn_image_store_2darray_v4f16_i32:
224 case AMDGPU::BI__builtin_amdgcn_image_store_3d_v4f32_i32:
225 case AMDGPU::BI__builtin_amdgcn_image_store_3d_v4f16_i32:
226 case AMDGPU::BI__builtin_amdgcn_image_store_cube_v4f32_i32:
227 case AMDGPU::BI__builtin_amdgcn_image_store_cube_v4f16_i32:
228 case AMDGPU::BI__builtin_amdgcn_image_store_mip_1d_v4f32_i32:
229 case AMDGPU::BI__builtin_amdgcn_image_store_mip_1d_v4f16_i32:
230 case AMDGPU::BI__builtin_amdgcn_image_store_mip_1darray_v4f32_i32:
231 case AMDGPU::BI__builtin_amdgcn_image_store_mip_1darray_v4f16_i32:
232 case AMDGPU::BI__builtin_amdgcn_image_store_mip_2d_f32_i32:
233 case AMDGPU::BI__builtin_amdgcn_image_store_mip_2d_v4f32_i32:
234 case AMDGPU::BI__builtin_amdgcn_image_store_mip_2d_v4f16_i32:
235 case AMDGPU::BI__builtin_amdgcn_image_store_mip_2darray_f32_i32:
236 case AMDGPU::BI__builtin_amdgcn_image_store_mip_2darray_v4f32_i32:
237 case AMDGPU::BI__builtin_amdgcn_image_store_mip_2darray_v4f16_i32:
238 case AMDGPU::BI__builtin_amdgcn_image_store_mip_3d_v4f32_i32:
239 case AMDGPU::BI__builtin_amdgcn_image_store_mip_3d_v4f16_i32:
240 case AMDGPU::BI__builtin_amdgcn_image_store_mip_cube_v4f32_i32:
241 case AMDGPU::BI__builtin_amdgcn_image_store_mip_cube_v4f16_i32: {
242 StringRef FeatureList(
243 getASTContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
245 CallerFeatureMap)) {
246 Diag(TheCall->getBeginLoc(), diag::err_builtin_needs_feature)
247 << FD->getDeclName() << FeatureList;
248 return false;
249 }
250
251 unsigned ArgCount = TheCall->getNumArgs() - 1;
252 llvm::APSInt Result;
253
254 return (SemaRef.BuiltinConstantArg(TheCall, 1, Result)) ||
255 (SemaRef.BuiltinConstantArg(TheCall, ArgCount, Result)) ||
256 (SemaRef.BuiltinConstantArg(TheCall, (ArgCount - 1), Result));
257 }
258 default:
259 return false;
260 }
261
262 ExprResult Arg = TheCall->getArg(OrderIndex);
263 auto ArgExpr = Arg.get();
264 Expr::EvalResult ArgResult;
265
266 if (!ArgExpr->EvaluateAsInt(ArgResult, getASTContext()))
267 return Diag(ArgExpr->getExprLoc(), diag::err_typecheck_expect_int)
268 << ArgExpr->getType();
269 auto Ord = ArgResult.Val.getInt().getZExtValue();
270
271 // Check validity of memory ordering as per C11 / C++11's memory model.
272 // Only fence needs check. Atomic dec/inc allow all memory orders.
273 if (!llvm::isValidAtomicOrderingCABI(Ord))
274 return Diag(ArgExpr->getBeginLoc(),
275 diag::warn_atomic_op_has_invalid_memory_order)
276 << 0 << ArgExpr->getSourceRange();
277 switch (static_cast<llvm::AtomicOrderingCABI>(Ord)) {
278 case llvm::AtomicOrderingCABI::relaxed:
279 case llvm::AtomicOrderingCABI::consume:
280 if (BuiltinID == AMDGPU::BI__builtin_amdgcn_fence)
281 return Diag(ArgExpr->getBeginLoc(),
282 diag::warn_atomic_op_has_invalid_memory_order)
283 << 0 << ArgExpr->getSourceRange();
284 break;
285 case llvm::AtomicOrderingCABI::acquire:
286 case llvm::AtomicOrderingCABI::release:
287 case llvm::AtomicOrderingCABI::acq_rel:
288 case llvm::AtomicOrderingCABI::seq_cst:
289 break;
290 }
291
292 Arg = TheCall->getArg(ScopeIndex);
293 ArgExpr = Arg.get();
294 Expr::EvalResult ArgResult1;
295 // Check that sync scope is a constant literal
296 if (!ArgExpr->EvaluateAsConstantExpr(ArgResult1, getASTContext()))
297 return Diag(ArgExpr->getExprLoc(), diag::err_expr_not_string_literal)
298 << ArgExpr->getType();
299
300 return false;
301}
302
304 bool Fail = false;
305
306 // First argument is a global or generic pointer.
307 Expr *PtrArg = TheCall->getArg(0);
308 QualType PtrTy = PtrArg->getType()->getPointeeType();
309 unsigned AS = getASTContext().getTargetAddressSpace(PtrTy.getAddressSpace());
310 if (AS != llvm::AMDGPUAS::FLAT_ADDRESS &&
311 AS != llvm::AMDGPUAS::GLOBAL_ADDRESS) {
312 Fail = true;
313 Diag(TheCall->getBeginLoc(), diag::err_amdgcn_coop_atomic_invalid_as)
314 << PtrArg->getSourceRange();
315 }
316
317 // Check atomic ordering
318 Expr *AtomicOrdArg = TheCall->getArg(IsStore ? 2 : 1);
319 Expr::EvalResult AtomicOrdArgRes;
320 if (!AtomicOrdArg->EvaluateAsInt(AtomicOrdArgRes, getASTContext()))
321 llvm_unreachable("Intrinsic requires imm for atomic ordering argument!");
322 auto Ord =
323 llvm::AtomicOrderingCABI(AtomicOrdArgRes.Val.getInt().getZExtValue());
324
325 // Atomic ordering cannot be acq_rel in any case, acquire for stores or
326 // release for loads.
327 if (!llvm::isValidAtomicOrderingCABI((unsigned)Ord) ||
328 (Ord == llvm::AtomicOrderingCABI::acq_rel) ||
329 Ord == (IsStore ? llvm::AtomicOrderingCABI::acquire
330 : llvm::AtomicOrderingCABI::release)) {
331 return Diag(AtomicOrdArg->getBeginLoc(),
332 diag::warn_atomic_op_has_invalid_memory_order)
333 << 0 << AtomicOrdArg->getSourceRange();
334 }
335
336 // Last argument is a string literal
337 Expr *Arg = TheCall->getArg(TheCall->getNumArgs() - 1);
339 Fail = true;
340 Diag(TheCall->getBeginLoc(), diag::err_expr_not_string_literal)
341 << Arg->getSourceRange();
342 }
343
344 return Fail;
345}
346
347bool SemaAMDGPU::checkMovDPPFunctionCall(CallExpr *TheCall, unsigned NumArgs,
348 unsigned NumDataArgs) {
349 assert(NumDataArgs <= 2);
350 if (SemaRef.checkArgCountRange(TheCall, NumArgs, NumArgs))
351 return true;
352 Expr *Args[2];
353 QualType ArgTys[2];
354 for (unsigned I = 0; I != NumDataArgs; ++I) {
355 Args[I] = TheCall->getArg(I);
356 ArgTys[I] = Args[I]->getType();
357 // TODO: Vectors can also be supported.
358 if (!ArgTys[I]->isArithmeticType() || ArgTys[I]->isAnyComplexType()) {
359 SemaRef.Diag(Args[I]->getBeginLoc(),
360 diag::err_typecheck_cond_expect_int_float)
361 << ArgTys[I] << Args[I]->getSourceRange();
362 return true;
363 }
364 }
365 if (NumDataArgs < 2)
366 return false;
367
368 if (getASTContext().hasSameUnqualifiedType(ArgTys[0], ArgTys[1]))
369 return false;
370
371 if (((ArgTys[0]->isUnsignedIntegerType() &&
372 ArgTys[1]->isSignedIntegerType()) ||
373 (ArgTys[0]->isSignedIntegerType() &&
374 ArgTys[1]->isUnsignedIntegerType())) &&
375 getASTContext().getTypeSize(ArgTys[0]) ==
376 getASTContext().getTypeSize(ArgTys[1]))
377 return false;
378
379 SemaRef.Diag(Args[1]->getBeginLoc(),
380 diag::err_typecheck_call_different_arg_types)
381 << ArgTys[0] << ArgTys[1];
382 return true;
383}
384
385static bool
387 const AMDGPUFlatWorkGroupSizeAttr &Attr) {
388 // Accept template arguments for now as they depend on something else.
389 // We'll get to check them when they eventually get instantiated.
390 if (MinExpr->isValueDependent() || MaxExpr->isValueDependent())
391 return false;
392
393 uint32_t Min = 0;
394 if (!S.checkUInt32Argument(Attr, MinExpr, Min, 0))
395 return true;
396
397 uint32_t Max = 0;
398 if (!S.checkUInt32Argument(Attr, MaxExpr, Max, 1))
399 return true;
400
401 if (Min == 0 && Max != 0) {
402 S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
403 << &Attr << 0;
404 return true;
405 }
406 if (Min > Max) {
407 S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
408 << &Attr << 1;
409 return true;
410 }
411
412 return false;
413}
414
415AMDGPUFlatWorkGroupSizeAttr *
417 Expr *MinExpr, Expr *MaxExpr) {
418 ASTContext &Context = getASTContext();
419 AMDGPUFlatWorkGroupSizeAttr TmpAttr(Context, CI, MinExpr, MaxExpr);
420
421 if (checkAMDGPUFlatWorkGroupSizeArguments(SemaRef, MinExpr, MaxExpr, TmpAttr))
422 return nullptr;
423 return ::new (Context)
424 AMDGPUFlatWorkGroupSizeAttr(Context, CI, MinExpr, MaxExpr);
425}
426
428 const AttributeCommonInfo &CI,
429 Expr *MinExpr, Expr *MaxExpr) {
430 if (auto *Attr = CreateAMDGPUFlatWorkGroupSizeAttr(CI, MinExpr, MaxExpr))
431 D->addAttr(Attr);
432}
433
435 const ParsedAttr &AL) {
436 Expr *MinExpr = AL.getArgAsExpr(0);
437 Expr *MaxExpr = AL.getArgAsExpr(1);
438
439 addAMDGPUFlatWorkGroupSizeAttr(D, AL, MinExpr, MaxExpr);
440}
441
442static bool checkAMDGPUWavesPerEUArguments(Sema &S, Expr *MinExpr,
443 Expr *MaxExpr,
444 const AMDGPUWavesPerEUAttr &Attr) {
445 if (S.DiagnoseUnexpandedParameterPack(MinExpr) ||
446 (MaxExpr && S.DiagnoseUnexpandedParameterPack(MaxExpr)))
447 return true;
448
449 // Accept template arguments for now as they depend on something else.
450 // We'll get to check them when they eventually get instantiated.
451 if (MinExpr->isValueDependent() || (MaxExpr && MaxExpr->isValueDependent()))
452 return false;
453
454 uint32_t Min = 0;
455 if (!S.checkUInt32Argument(Attr, MinExpr, Min, 0))
456 return true;
457
458 uint32_t Max = 0;
459 if (MaxExpr && !S.checkUInt32Argument(Attr, MaxExpr, Max, 1))
460 return true;
461
462 if (Min == 0 && Max != 0) {
463 S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
464 << &Attr << 0;
465 return true;
466 }
467 if (Max != 0 && Min > Max) {
468 S.Diag(Attr.getLocation(), diag::err_attribute_argument_invalid)
469 << &Attr << 1;
470 return true;
471 }
472
473 return false;
474}
475
476AMDGPUWavesPerEUAttr *
478 Expr *MinExpr, Expr *MaxExpr) {
479 ASTContext &Context = getASTContext();
480 AMDGPUWavesPerEUAttr TmpAttr(Context, CI, MinExpr, MaxExpr);
481
482 if (checkAMDGPUWavesPerEUArguments(SemaRef, MinExpr, MaxExpr, TmpAttr))
483 return nullptr;
484
485 return ::new (Context) AMDGPUWavesPerEUAttr(Context, CI, MinExpr, MaxExpr);
486}
487
489 Expr *MinExpr, Expr *MaxExpr) {
490 if (auto *Attr = CreateAMDGPUWavesPerEUAttr(CI, MinExpr, MaxExpr))
491 D->addAttr(Attr);
492}
493
496 return;
497
498 Expr *MinExpr = AL.getArgAsExpr(0);
499 Expr *MaxExpr = (AL.getNumArgs() > 1) ? AL.getArgAsExpr(1) : nullptr;
500
501 addAMDGPUWavesPerEUAttr(D, AL, MinExpr, MaxExpr);
502}
503
505 uint32_t NumSGPR = 0;
506 Expr *NumSGPRExpr = AL.getArgAsExpr(0);
507 if (!SemaRef.checkUInt32Argument(AL, NumSGPRExpr, NumSGPR))
508 return;
509
510 D->addAttr(::new (getASTContext())
511 AMDGPUNumSGPRAttr(getASTContext(), AL, NumSGPR));
512}
513
515 uint32_t NumVGPR = 0;
516 Expr *NumVGPRExpr = AL.getArgAsExpr(0);
517 if (!SemaRef.checkUInt32Argument(AL, NumVGPRExpr, NumVGPR))
518 return;
519
520 D->addAttr(::new (getASTContext())
521 AMDGPUNumVGPRAttr(getASTContext(), AL, NumVGPR));
522}
523
524static bool
526 Expr *ZExpr,
527 const AMDGPUMaxNumWorkGroupsAttr &Attr) {
528 if (S.DiagnoseUnexpandedParameterPack(XExpr) ||
529 (YExpr && S.DiagnoseUnexpandedParameterPack(YExpr)) ||
530 (ZExpr && S.DiagnoseUnexpandedParameterPack(ZExpr)))
531 return true;
532
533 // Accept template arguments for now as they depend on something else.
534 // We'll get to check them when they eventually get instantiated.
535 if (XExpr->isValueDependent() || (YExpr && YExpr->isValueDependent()) ||
536 (ZExpr && ZExpr->isValueDependent()))
537 return false;
538
539 uint32_t NumWG = 0;
540 Expr *Exprs[3] = {XExpr, YExpr, ZExpr};
541 for (int i = 0; i < 3; i++) {
542 if (Exprs[i]) {
543 if (!S.checkUInt32Argument(Attr, Exprs[i], NumWG, i,
544 /*StrictlyUnsigned=*/true))
545 return true;
546 if (NumWG == 0) {
547 S.Diag(Attr.getLoc(), diag::err_attribute_argument_is_zero)
548 << &Attr << Exprs[i]->getSourceRange();
549 return true;
550 }
551 }
552 }
553
554 return false;
555}
556
558 const AttributeCommonInfo &CI, Expr *XExpr, Expr *YExpr, Expr *ZExpr) {
559 ASTContext &Context = getASTContext();
560 AMDGPUMaxNumWorkGroupsAttr TmpAttr(Context, CI, XExpr, YExpr, ZExpr);
561 assert(!SemaRef.isSFINAEContext() &&
562 "Can't produce SFINAE diagnostic pointing to temporary attribute");
563
564 if (checkAMDGPUMaxNumWorkGroupsArguments(SemaRef, XExpr, YExpr, ZExpr,
565 TmpAttr))
566 return nullptr;
567
568 return ::new (Context)
569 AMDGPUMaxNumWorkGroupsAttr(Context, CI, XExpr, YExpr, ZExpr);
570}
571
573 const AttributeCommonInfo &CI,
574 Expr *XExpr, Expr *YExpr,
575 Expr *ZExpr) {
576 if (auto *Attr = CreateAMDGPUMaxNumWorkGroupsAttr(CI, XExpr, YExpr, ZExpr))
577 D->addAttr(Attr);
578}
579
581 const ParsedAttr &AL) {
582 Expr *YExpr = (AL.getNumArgs() > 1) ? AL.getArgAsExpr(1) : nullptr;
583 Expr *ZExpr = (AL.getNumArgs() > 2) ? AL.getArgAsExpr(2) : nullptr;
584 addAMDGPUMaxNumWorkGroupsAttr(D, AL, AL.getArgAsExpr(0), YExpr, ZExpr);
585}
586
587} // namespace clang
This file declares semantic analysis functions specific to AMDGPU.
Enumerates target-specific builtins in their own namespaces within namespace clang.
APSInt & getInt()
Definition APValue.h:489
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
void getFunctionFeatureMap(llvm::StringMap< bool > &FeatureMap, const FunctionDecl *) const
unsigned getTargetAddressSpace(LangAS AS) const
PtrTy get() const
Definition Ownership.h:171
bool isInvalid() const
Definition Ownership.h:167
Attr - This represents one attribute.
Definition Attr.h:45
SourceLocation getLocation() const
Definition Attr.h:98
SourceLocation getLoc() const
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3081
SourceLocation getBeginLoc() const
Definition Expr.h:3211
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3068
Decl - This represents one declaration (or definition), e.g.
Definition DeclBase.h:86
void addAttr(Attr *A)
This represents one expression.
Definition Expr.h:112
bool EvaluateAsInt(EvalResult &Result, const ASTContext &Ctx, SideEffectsKind AllowSideEffects=SE_NoSideEffects, bool InConstantContext=false) const
EvaluateAsInt - Return true if this is a constant which we can fold and convert to an integer,...
bool isValueDependent() const
Determines whether the value of this expression depends on.
Definition Expr.h:177
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3085
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
ParsedAttr - Represents a syntactic attribute.
Definition ParsedAttr.h:119
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this attribute.
Definition ParsedAttr.h:371
Expr * getArgAsExpr(unsigned Arg) const
Definition ParsedAttr.h:383
bool checkAtLeastNumArgs(class Sema &S, unsigned Num) const
Check if the attribute has at least as many args as Num.
bool checkAtMostNumArgs(class Sema &S, unsigned Num) const
Check if the attribute has at most as many args as Num.
A (possibly-)qualified type.
Definition TypeBase.h:937
void handleAMDGPUMaxNumWorkGroupsAttr(Decl *D, const ParsedAttr &AL)
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max)
addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size attribute to a particular declar...
bool checkCoopAtomicFunctionCall(CallExpr *TheCall, bool IsStore)
void handleAMDGPUFlatWorkGroupSizeAttr(Decl *D, const ParsedAttr &AL)
void handleAMDGPUNumSGPRAttr(Decl *D, const ParsedAttr &AL)
AMDGPUMaxNumWorkGroupsAttr * CreateAMDGPUMaxNumWorkGroupsAttr(const AttributeCommonInfo &CI, Expr *XExpr, Expr *YExpr, Expr *ZExpr)
Create an AMDGPUMaxNumWorkGroupsAttr attribute.
AMDGPUWavesPerEUAttr * CreateAMDGPUWavesPerEUAttr(const AttributeCommonInfo &CI, Expr *Min, Expr *Max)
Create an AMDGPUWavesPerEUAttr attribute.
void handleAMDGPUNumVGPRAttr(Decl *D, const ParsedAttr &AL)
AMDGPUFlatWorkGroupSizeAttr * CreateAMDGPUFlatWorkGroupSizeAttr(const AttributeCommonInfo &CI, Expr *Min, Expr *Max)
Create an AMDGPUWavesPerEUAttr attribute.
bool checkMovDPPFunctionCall(CallExpr *TheCall, unsigned NumArgs, unsigned NumDataArgs)
void handleAMDGPUWavesPerEUAttr(Decl *D, const ParsedAttr &AL)
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall)
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max)
addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a particular declaration.
void addAMDGPUMaxNumWorkGroupsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *XExpr, Expr *YExpr, Expr *ZExpr)
addAMDGPUMaxNumWorkGroupsAttr - Adds an amdgpu_max_num_work_groups attribute to a particular declarat...
SemaBase(Sema &S)
Definition SemaBase.cpp:7
ASTContext & getASTContext() const
Definition SemaBase.cpp:9
Sema & SemaRef
Definition SemaBase.h:40
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID)
Emit a diagnostic.
Definition SemaBase.cpp:61
Sema - This implements semantic analysis and AST building for C.
Definition Sema.h:854
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC)
If the given type contains an unexpanded parameter pack, diagnose the error.
bool checkUInt32Argument(const AttrInfo &AI, const Expr *Expr, uint32_t &Val, unsigned Idx=UINT_MAX, bool StrictlyUnsigned=false)
If Expr is a valid integer constant, get the value of the integer expression and return success or fa...
Definition Sema.h:4825
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.cpp:350
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool evaluateRequiredTargetFeatures(llvm::StringRef RequiredFatures, const llvm::StringMap< bool > &TargetFetureMap)
Returns true if the required target features of a builtin function are enabled.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ Result
The result type of a method or function.
Definition TypeBase.h:905
static bool checkAMDGPUMaxNumWorkGroupsArguments(Sema &S, Expr *XExpr, Expr *YExpr, Expr *ZExpr, const AMDGPUMaxNumWorkGroupsAttr &Attr)
static bool checkAMDGPUFlatWorkGroupSizeArguments(Sema &S, Expr *MinExpr, Expr *MaxExpr, const AMDGPUFlatWorkGroupSizeAttr &Attr)
ActionResult< Expr * > ExprResult
Definition Ownership.h:249
static bool checkAMDGPUWavesPerEUArguments(Sema &S, Expr *MinExpr, Expr *MaxExpr, const AMDGPUWavesPerEUAttr &Attr)
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647