clang 22.0.0git
CIRGenOpenACCRecipe.cpp
Go to the documentation of this file.
1//===----------------------------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Helperes to emit OpenACC clause recipes as CIR code.
10//
11//===----------------------------------------------------------------------===//
12
13#include <numeric>
14
15#include "CIRGenOpenACCRecipe.h"
16
17namespace clang::CIRGen {
18mlir::Block *OpenACCRecipeBuilderBase::createRecipeBlock(mlir::Region &region,
19 mlir::Type opTy,
20 mlir::Location loc,
21 size_t numBounds,
22 bool isInit) {
24 types.reserve(numBounds + 2);
25 types.push_back(opTy);
26 // The init section is the only one that doesn't have TWO copies of the
27 // operation-type. Copy has a to/from, and destroy has a
28 // 'reference'/'privatized' copy version.
29 if (!isInit)
30 types.push_back(opTy);
31
32 auto boundsTy = mlir::acc::DataBoundsType::get(&cgf.getMLIRContext());
33 for (size_t i = 0; i < numBounds; ++i)
34 types.push_back(boundsTy);
35
36 llvm::SmallVector<mlir::Location> locs{types.size(), loc};
37 return builder.createBlock(&region, region.end(), types, locs);
38}
39void OpenACCRecipeBuilderBase::makeAllocaCopy(mlir::Location loc,
40 mlir::Type copyType,
41 mlir::Value numEltsToCopy,
42 mlir::Value offsetPerSubarray,
43 mlir::Value destAlloca,
44 mlir::Value srcAlloca) {
45 mlir::OpBuilder::InsertionGuard guardCase(builder);
46
47 mlir::Type itrTy = cgf.cgm.convertType(cgf.getContext().UnsignedLongLongTy);
48 auto itrPtrTy = cir::PointerType::get(itrTy);
49 mlir::IntegerAttr itrAlign =
52
53 auto loopBuilder = [&]() {
54 auto itr =
55 cir::AllocaOp::create(builder, loc, itrPtrTy, itrTy, "itr", itrAlign);
56 cir::ConstantOp constZero = builder.getConstInt(loc, itrTy, 0);
57 builder.CIRBaseBuilderTy::createStore(loc, constZero, itr);
59 loc,
60 /*condBuilder=*/
61 [&](mlir::OpBuilder &b, mlir::Location loc) {
62 // itr < numEltsToCopy
63 // Enforce a trip count of 1 if there wasn't any element count, this
64 // way we can just use this loop with a constant bounds instead of a
65 // separate code path.
66 if (!numEltsToCopy)
67 numEltsToCopy = builder.getConstInt(loc, itrTy, 1);
68
69 auto loadCur = cir::LoadOp::create(builder, loc, {itr});
70 auto cmp = builder.createCompare(loc, cir::CmpOpKind::lt, loadCur,
71 numEltsToCopy);
73 },
74 /*bodyBuilder=*/
75 [&](mlir::OpBuilder &b, mlir::Location loc) {
76 // destAlloca[itr] = srcAlloca[offsetPerSubArray * itr];
77 auto loadCur = cir::LoadOp::create(builder, loc, {itr});
78 auto srcOffset = builder.createMul(loc, offsetPerSubarray, loadCur);
79
80 auto ptrToOffsetIntoSrc = cir::PtrStrideOp::create(
81 builder, loc, copyType, srcAlloca, srcOffset);
82
83 auto offsetIntoDecayDest = cir::PtrStrideOp::create(
84 builder, loc, builder.getPointerTo(copyType), destAlloca,
85 loadCur);
86
87 builder.CIRBaseBuilderTy::createStore(loc, ptrToOffsetIntoSrc,
88 offsetIntoDecayDest);
89 builder.createYield(loc);
90 },
91 /*stepBuilder=*/
92 [&](mlir::OpBuilder &b, mlir::Location loc) {
93 // Simple increment of the iterator.
94 auto load = cir::LoadOp::create(builder, loc, {itr});
95 auto inc = cir::UnaryOp::create(builder, loc, load.getType(),
96 cir::UnaryOpKind::Inc, load);
97 builder.CIRBaseBuilderTy::createStore(loc, inc, itr);
98 builder.createYield(loc);
99 });
100 };
101
102 cir::ScopeOp::create(builder, loc,
103 [&](mlir::OpBuilder &b, mlir::Location loc) {
104 loopBuilder();
105 builder.createYield(loc);
106 });
107}
108
109mlir::Value OpenACCRecipeBuilderBase::makeBoundsAlloca(
110 mlir::Block *block, SourceRange exprRange, mlir::Location loc,
111 std::string_view allocaName, size_t numBounds,
112 llvm::ArrayRef<QualType> boundTypes) {
113 mlir::OpBuilder::InsertionGuard guardCase(builder);
114
115 // Get the range of bounds arguments, which are all but the 1st arg.
116 llvm::ArrayRef<mlir::BlockArgument> boundsRange =
117 block->getArguments().drop_front(1);
118
119 // boundTypes contains the before and after of each bounds, so it ends up
120 // having 1 extra. Assert this is the case to ensure we don't call this in the
121 // wrong 'block'.
122 assert(boundsRange.size() + 1 == boundTypes.size());
123
124 mlir::Type itrTy = cgf.cgm.convertType(cgf.getContext().UnsignedLongLongTy);
125 auto idxType = mlir::IndexType::get(&cgf.getMLIRContext());
126
127 auto getUpperBound = [&](mlir::Value bound) {
128 auto upperBoundVal =
129 mlir::acc::GetUpperboundOp::create(builder, loc, idxType, bound);
130 return mlir::UnrealizedConversionCastOp::create(builder, loc, itrTy,
131 upperBoundVal.getResult())
132 .getResult(0);
133 };
134
135 auto isArrayTy = [&](QualType ty) {
136 if (ty->isArrayType() && !ty->isConstantArrayType())
137 cgf.cgm.errorNYI(exprRange, "OpenACC recipe init for VLAs");
138 return ty->isConstantArrayType();
139 };
140
141 mlir::Type topLevelTy = cgf.convertType(boundTypes.back());
142 cir::PointerType topLevelTyPtr = builder.getPointerTo(topLevelTy);
143 // Do an alloca for the 'top' level type without bounds.
144 mlir::Value initialAlloca = builder.createAlloca(
145 loc, topLevelTyPtr, topLevelTy, allocaName,
146 cgf.getContext().getTypeAlignInChars(boundTypes.back()));
147
148 bool lastBoundWasArray = isArrayTy(boundTypes.back());
149
150 // Make sure we track a moving version of this so we can get our
151 // 'copying' back to correct.
152 mlir::Value lastAlloca = initialAlloca;
153
154 // Since we're iterating the types in reverse, this sets up for each index
155 // corresponding to the boundsRange to be the 'after application of the
156 // bounds.
157 llvm::ArrayRef<QualType> boundResults = boundTypes.drop_back(1);
158
159 // Collect the 'do we have any allocas needed after this type' list.
160 llvm::SmallVector<bool> allocasLeftArr;
161 llvm::ArrayRef<QualType> resultTypes = boundTypes.drop_front();
162 std::transform_inclusive_scan(
163 resultTypes.begin(), resultTypes.end(),
164 std::back_inserter(allocasLeftArr), std::plus<bool>{},
165 [](QualType ty) { return !ty->isConstantArrayType(); }, false);
166
167 // Keep track of the number of 'elements' that we're allocating. Individual
168 // allocas should multiply this by the size of its current allocation.
169 mlir::Value cumulativeElts;
170 for (auto [bound, resultType, allocasLeft] : llvm::reverse(
171 llvm::zip_equal(boundsRange, boundResults, allocasLeftArr))) {
172
173 // if there is no further 'alloca' operation we need to do, we can skip
174 // creating the UB/multiplications/etc.
175 if (!allocasLeft)
176 break;
177
178 // First: figure out the number of elements in the current 'bound' list.
179 mlir::Value eltsPerSubArray = getUpperBound(bound);
180 mlir::Value eltsToAlloca;
181
182 // IF we are in a sub-bounds, the total number of elements to alloca is
183 // the product of that one and the current 'bounds' size. That is,
184 // arr[5][5], we would need 25 elements, not just 5. Else it is just the
185 // current number of elements.
186 if (cumulativeElts)
187 eltsToAlloca = builder.createMul(loc, eltsPerSubArray, cumulativeElts);
188 else
189 eltsToAlloca = eltsPerSubArray;
190
191 if (!lastBoundWasArray) {
192 // If we have to do an allocation, figure out the size of the
193 // allocation. alloca takes the number of bytes, not elements.
194 TypeInfoChars eltInfo = cgf.getContext().getTypeInfoInChars(resultType);
195 cir::ConstantOp eltSize = builder.getConstInt(
196 loc, itrTy, eltInfo.Width.alignTo(eltInfo.Align).getQuantity());
197 mlir::Value curSize = builder.createMul(loc, eltsToAlloca, eltSize);
198
199 mlir::Type eltTy = cgf.convertType(resultType);
200 cir::PointerType ptrTy = builder.getPointerTo(eltTy);
201 mlir::Value curAlloca = builder.createAlloca(
202 loc, ptrTy, eltTy, "openacc.init.bounds",
203 cgf.getContext().getTypeAlignInChars(resultType), curSize);
204
205 makeAllocaCopy(loc, ptrTy, cumulativeElts, eltsPerSubArray, lastAlloca,
206 curAlloca);
207 lastAlloca = curAlloca;
208 } else {
209 // In the case of an array, we just need to decay the pointer, so just do
210 // a zero-offset stride on the last alloca to decay it down an array
211 // level.
212 cir::ConstantOp constZero = builder.getConstInt(loc, itrTy, 0);
213 lastAlloca = builder.getArrayElement(loc, loc, lastAlloca,
214 cgf.convertType(resultType),
215 constZero, /*shouldDecay=*/true);
216 }
217
218 cumulativeElts = eltsToAlloca;
219 lastBoundWasArray = isArrayTy(resultType);
220 }
221 return initialAlloca;
222}
223
224std::pair<mlir::Value, mlir::Value> OpenACCRecipeBuilderBase::createBoundsLoop(
225 mlir::Value subscriptedValue, mlir::Value subscriptedValue2,
226 mlir::Value bound, mlir::Location loc, bool inverse) {
227 mlir::Operation *bodyInsertLoc;
228
229 mlir::Type itrTy = cgf.cgm.convertType(cgf.getContext().UnsignedLongLongTy);
230 auto itrPtrTy = cir::PointerType::get(itrTy);
231 mlir::IntegerAttr itrAlign =
232 cgf.cgm.getSize(cgf.getContext().getTypeAlignInChars(
233 cgf.getContext().UnsignedLongLongTy));
234 auto idxType = mlir::IndexType::get(&cgf.getMLIRContext());
235
236 auto doSubscriptOp = [&](mlir::Value subVal,
237 cir::LoadOp idxLoad) -> mlir::Value {
238 auto eltTy = cast<cir::PointerType>(subVal.getType()).getPointee();
239
240 if (auto arrayTy = dyn_cast<cir::ArrayType>(eltTy))
241 return builder.getArrayElement(loc, loc, subVal, arrayTy.getElementType(),
242 idxLoad,
243 /*shouldDecay=*/true);
244
245 assert(isa<cir::PointerType>(eltTy));
246
247 auto eltLoad = cir::LoadOp::create(builder, loc, {subVal});
248
249 return cir::PtrStrideOp::create(builder, loc, eltLoad.getType(), eltLoad,
250 idxLoad);
251 };
252
253 auto forStmtBuilder = [&]() {
254 // get the lower and upper bound for iterating over.
255 auto lowerBoundVal =
256 mlir::acc::GetLowerboundOp::create(builder, loc, idxType, bound);
257 auto lbConversion = mlir::UnrealizedConversionCastOp::create(
258 builder, loc, itrTy, lowerBoundVal.getResult());
259 auto upperBoundVal =
260 mlir::acc::GetUpperboundOp::create(builder, loc, idxType, bound);
261 auto ubConversion = mlir::UnrealizedConversionCastOp::create(
262 builder, loc, itrTy, upperBoundVal.getResult());
263
264 // Create a memory location for the iterator.
265 auto itr =
266 cir::AllocaOp::create(builder, loc, itrPtrTy, itrTy, "iter", itrAlign);
267 // Store to the iterator: either lower bound, or if inverse loop, upper
268 // bound.
269 if (inverse) {
270 cir::ConstantOp constOne = builder.getConstInt(loc, itrTy, 1);
271
272 auto sub = cir::BinOp::create(builder, loc, itrTy, cir::BinOpKind::Sub,
273 ubConversion.getResult(0), constOne);
274
275 // Upperbound is exclusive, so subtract 1.
276 builder.CIRBaseBuilderTy::createStore(loc, sub, itr);
277 } else {
278 // Lowerbound is inclusive, so we can include it.
279 builder.CIRBaseBuilderTy::createStore(loc, lbConversion.getResult(0),
280 itr);
281 }
282 // Save the 'end' iterator based on whether we are inverted or not. This
283 // end iterator never changes, so we can just get it and convert it, so no
284 // need to store/load/etc.
285 auto endItr = inverse ? lbConversion : ubConversion;
286
287 builder.createFor(
288 loc,
289 /*condBuilder=*/
290 [&](mlir::OpBuilder &b, mlir::Location loc) {
291 auto loadCur = cir::LoadOp::create(builder, loc, {itr});
292 // Use 'not equal' since we are just doing an increment/decrement.
293 auto cmp = builder.createCompare(
294 loc, inverse ? cir::CmpOpKind::ge : cir::CmpOpKind::lt, loadCur,
295 endItr.getResult(0));
296 builder.createCondition(cmp);
297 },
298 /*bodyBuilder=*/
299 [&](mlir::OpBuilder &b, mlir::Location loc) {
300 auto load = cir::LoadOp::create(builder, loc, {itr});
301
302 if (subscriptedValue)
303 subscriptedValue = doSubscriptOp(subscriptedValue, load);
304 if (subscriptedValue2)
305 subscriptedValue2 = doSubscriptOp(subscriptedValue2, load);
306 bodyInsertLoc = builder.createYield(loc);
307 },
308 /*stepBuilder=*/
309 [&](mlir::OpBuilder &b, mlir::Location loc) {
310 auto load = cir::LoadOp::create(builder, loc, {itr});
311 auto unary = cir::UnaryOp::create(
312 builder, loc, load.getType(),
313 inverse ? cir::UnaryOpKind::Dec : cir::UnaryOpKind::Inc, load);
314 builder.CIRBaseBuilderTy::createStore(loc, unary, itr);
315 builder.createYield(loc);
316 });
317 };
318
319 cir::ScopeOp::create(builder, loc,
320 [&](mlir::OpBuilder &b, mlir::Location loc) {
321 forStmtBuilder();
322 builder.createYield(loc);
323 });
324
325 // Leave the insertion point to be inside the body, so we can loop over
326 // these things.
327 builder.setInsertionPoint(bodyInsertLoc);
328 return {subscriptedValue, subscriptedValue2};
329}
330
331mlir::acc::ReductionOperator
333 switch (op) {
335 return mlir::acc::ReductionOperator::AccAdd;
337 return mlir::acc::ReductionOperator::AccMul;
339 return mlir::acc::ReductionOperator::AccMax;
341 return mlir::acc::ReductionOperator::AccMin;
343 return mlir::acc::ReductionOperator::AccIand;
345 return mlir::acc::ReductionOperator::AccIor;
347 return mlir::acc::ReductionOperator::AccXor;
349 return mlir::acc::ReductionOperator::AccLand;
351 return mlir::acc::ReductionOperator::AccLor;
353 llvm_unreachable("invalid reduction operator");
354 }
355
356 llvm_unreachable("invalid reduction operator");
357}
358
359// This function generates the 'destroy' section for a recipe. Note
360// that this function is not 'insertion point' clean, in that it alters the
361// insertion point to be inside of the 'destroy' section of the recipe, but
362// doesn't restore it aftewards.
364 mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp,
365 CharUnits alignment, QualType origType, size_t numBounds, QualType baseType,
366 mlir::Region &destroyRegion) {
367 mlir::Block *block = createRecipeBlock(destroyRegion, mainOp.getType(), loc,
368 numBounds, /*isInit=*/false);
369 builder.setInsertionPointToEnd(&destroyRegion.back());
370 CIRGenFunction::LexicalScope ls(cgf, loc, block);
371
372 mlir::Type elementTy =
373 mlir::cast<cir::PointerType>(mainOp.getType()).getPointee();
374 auto emitDestroy = [&](mlir::Value var, mlir::Type ty) {
375 Address addr{var, ty, alignment};
376 cgf.emitDestroy(addr, origType,
377 cgf.getDestroyer(QualType::DK_cxx_destructor));
378 };
379
380 if (numBounds) {
381 mlir::OpBuilder::InsertionGuard guardCase(builder);
382 // Get the range of bounds arguments, which are all but the 1st 2. 1st is
383 // a 'reference', 2nd is the 'private' variant we need to destroy from.
385 block->getArguments().drop_front(2);
386
387 mlir::Value subscriptedValue = block->getArgument(1);
388 for (mlir::BlockArgument boundArg : llvm::reverse(boundsRange))
389 subscriptedValue = createBoundsLoop(subscriptedValue, boundArg, loc,
390 /*inverse=*/true);
391
392 emitDestroy(subscriptedValue, cgf.cgm.convertType(origType));
393 } else {
394 // If we don't have any bounds, we can just destroy the variable directly.
395 // The destroy region has a signature of "original item, privatized item".
396 // So the 2nd item is the one that needs destroying, the former is just
397 // for reference and we don't really have a need for it at the moment.
398 emitDestroy(block->getArgument(1), elementTy);
399 }
400
401 ls.forceCleanup();
402 mlir::acc::YieldOp::create(builder, locEnd);
403}
404void OpenACCRecipeBuilderBase::makeBoundsInit(
405 mlir::Value alloca, mlir::Location loc, mlir::Block *block,
406 const VarDecl *allocaDecl, QualType origType, bool isInitSection) {
407 mlir::OpBuilder::InsertionGuard guardCase(builder);
408 builder.setInsertionPointToEnd(block);
409 CIRGenFunction::LexicalScope ls(cgf, loc, block);
410
411 CIRGenFunction::AutoVarEmission tempDeclEmission{*allocaDecl};
412 tempDeclEmission.emittedAsOffload = true;
413
414 // The init section is the only one of the handful that only has a single
415 // argument for the 'type', so we have to drop 1 for init, and future calls
416 // to this will need to drop 2.
418 block->getArguments().drop_front(isInitSection ? 1 : 2);
419
420 mlir::Value subscriptedValue = alloca;
421 for (mlir::BlockArgument boundArg : llvm::reverse(boundsRange))
422 subscriptedValue = createBoundsLoop(subscriptedValue, boundArg, loc,
423 /*inverse=*/false);
424
425 tempDeclEmission.setAllocatedAddress(
426 Address{subscriptedValue, cgf.convertType(origType),
427 cgf.getContext().getDeclAlign(allocaDecl)});
428 cgf.emitAutoVarInit(tempDeclEmission);
429}
430
431// TODO: OpenACC: when we start doing firstprivate for array/vlas/etc, we
432// probably need to do a little work about the 'init' calls to put it in 'copy'
433// region instead.
435 mlir::Location loc, mlir::Location locEnd, SourceRange exprRange,
436 mlir::Value mainOp, mlir::Region &recipeInitRegion, size_t numBounds,
437 llvm::ArrayRef<QualType> boundTypes, const VarDecl *allocaDecl,
438 QualType origType, bool emitInitExpr) {
439 assert(allocaDecl && "Required recipe variable not set?");
440 CIRGenFunction::DeclMapRevertingRAII declMapRAII{cgf, allocaDecl};
441
442 mlir::Block *block = createRecipeBlock(recipeInitRegion, mainOp.getType(),
443 loc, numBounds, /*isInit=*/true);
444 builder.setInsertionPointToEnd(&recipeInitRegion.back());
445 CIRGenFunction::LexicalScope ls(cgf, loc, block);
446
447 const Type *allocaPointeeType =
448 allocaDecl->getType()->getPointeeOrArrayElementType();
449 // We are OK with no init for builtins, arrays of builtins, or pointers,
450 // else we should NYI so we know to go look for these.
451 if (cgf.getContext().getLangOpts().CPlusPlus && !allocaDecl->getInit() &&
452 !allocaDecl->getType()->isPointerType() &&
453 !allocaPointeeType->isBuiltinType() &&
454 !allocaPointeeType->isPointerType()) {
455 // If we don't have any initialization recipe, we failed during Sema to
456 // initialize this correctly. If we disable the
457 // Sema::TentativeAnalysisScopes in SemaOpenACC::CreateInitRecipe, it'll
458 // emit an error to tell us. However, emitting those errors during
459 // production is a violation of the standard, so we cannot do them.
460 cgf.cgm.errorNYI(exprRange, "private/reduction default-init recipe");
461 }
462
463 if (!numBounds) {
464 // This is an 'easy' case, we just have to use the builtin init stuff to
465 // initialize this variable correctly.
466 CIRGenFunction::AutoVarEmission tempDeclEmission =
467 cgf.emitAutoVarAlloca(*allocaDecl, builder.saveInsertionPoint());
468 if (emitInitExpr)
469 cgf.emitAutoVarInit(tempDeclEmission);
470 } else {
471 mlir::Value alloca = makeBoundsAlloca(
472 block, exprRange, loc, allocaDecl->getName(), numBounds, boundTypes);
473
474 // If the initializer is trivial, there is nothing to do here, so save
475 // ourselves some effort.
476 if (emitInitExpr && allocaDecl->getInit() &&
477 (!cgf.isTrivialInitializer(allocaDecl->getInit()) ||
478 cgf.getContext().getLangOpts().getTrivialAutoVarInit() !=
480 makeBoundsInit(alloca, loc, block, allocaDecl, origType,
481 /*isInitSection=*/true);
482 }
483
484 ls.forceCleanup();
485 mlir::acc::YieldOp::create(builder, locEnd);
486}
487
489 mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp,
490 const VarDecl *allocaDecl, const VarDecl *temporary,
491 mlir::Region &copyRegion, size_t numBounds) {
492 mlir::Block *block = createRecipeBlock(copyRegion, mainOp.getType(), loc,
493 numBounds, /*isInit=*/false);
494 builder.setInsertionPointToEnd(&copyRegion.back());
495 CIRGenFunction::LexicalScope ls(cgf, loc, block);
496
497 mlir::Value fromArg = block->getArgument(0);
498 mlir::Value toArg = block->getArgument(1);
499
501 block->getArguments().drop_front(2);
502
503 for (mlir::BlockArgument boundArg : llvm::reverse(boundsRange))
504 std::tie(fromArg, toArg) =
505 createBoundsLoop(fromArg, toArg, boundArg, loc, /*inverse=*/false);
506
507 // Set up the 'to' address.
508 mlir::Type elementTy =
509 mlir::cast<cir::PointerType>(toArg.getType()).getPointee();
510 CIRGenFunction::AutoVarEmission tempDeclEmission(*allocaDecl);
511 tempDeclEmission.emittedAsOffload = true;
512 tempDeclEmission.setAllocatedAddress(
513 Address{toArg, elementTy, cgf.getContext().getDeclAlign(allocaDecl)});
514
515 // Set up the 'from' address from the temporary.
516 CIRGenFunction::DeclMapRevertingRAII declMapRAII{cgf, temporary};
517 cgf.setAddrOfLocalVar(
518 temporary,
519 Address{fromArg, elementTy, cgf.getContext().getDeclAlign(allocaDecl)});
520 cgf.emitAutoVarInit(tempDeclEmission);
521
522 builder.setInsertionPointToEnd(&copyRegion.back());
523 ls.forceCleanup();
524 mlir::acc::YieldOp::create(builder, locEnd);
525}
526
527// This function generates the 'combiner' section for a reduction recipe. Note
528// that this function is not 'insertion point' clean, in that it alters the
529// insertion point to be inside of the 'combiner' section of the recipe, but
530// doesn't restore it aftewards.
532 mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp,
533 mlir::acc::ReductionRecipeOp recipe, size_t numBounds, QualType origType,
535 mlir::Block *block =
536 createRecipeBlock(recipe.getCombinerRegion(), mainOp.getType(), loc,
537 numBounds, /*isInit=*/false);
538 builder.setInsertionPointToEnd(&recipe.getCombinerRegion().back());
539 CIRGenFunction::LexicalScope ls(cgf, loc, block);
540
541 mlir::Value lhsArg = block->getArgument(0);
542 mlir::Value rhsArg = block->getArgument(1);
544 block->getArguments().drop_front(2);
545
546 if (llvm::any_of(combinerRecipes, [](auto &r) { return r.Op == nullptr; })) {
547 cgf.cgm.errorNYI(loc, "OpenACC Reduction combiner not generated");
548 mlir::acc::YieldOp::create(builder, locEnd, block->getArgument(0));
549 return;
550 }
551
552 // apply the bounds so that we can get our bounds emitted correctly.
553 for (mlir::BlockArgument boundArg : llvm::reverse(boundsRange))
554 std::tie(lhsArg, rhsArg) =
555 createBoundsLoop(lhsArg, rhsArg, boundArg, loc, /*inverse=*/false);
556
557 // Emitter for when we know this isn't a struct or array we have to loop
558 // through. This should work for the 'field' once the get-element call has
559 // been made.
560 auto emitSingleCombiner =
561 [&](mlir::Value lhsArg, mlir::Value rhsArg,
563 mlir::Type elementTy =
564 mlir::cast<cir::PointerType>(lhsArg.getType()).getPointee();
565 CIRGenFunction::DeclMapRevertingRAII declMapRAIILhs{cgf, combiner.LHS};
566 cgf.setAddrOfLocalVar(
567 combiner.LHS, Address{lhsArg, elementTy,
568 cgf.getContext().getDeclAlign(combiner.LHS)});
569 CIRGenFunction::DeclMapRevertingRAII declMapRAIIRhs{cgf, combiner.RHS};
570 cgf.setAddrOfLocalVar(
571 combiner.RHS, Address{rhsArg, elementTy,
572 cgf.getContext().getDeclAlign(combiner.RHS)});
573
574 [[maybe_unused]] mlir::LogicalResult stmtRes =
575 cgf.emitStmt(combiner.Op, /*useCurrentScope=*/true);
576 };
577
578 // Emitter for when we know this is either a non-array or element of an array
579 // (which also shouldn't be an array type?). This function should generate the
580 // initialization code for an entire 'array-element'/non-array, including
581 // diving into each element of a struct (if necessary).
582 auto emitCombiner = [&](mlir::Value lhsArg, mlir::Value rhsArg, QualType ty) {
583 assert(!ty->isArrayType() && "Array type shouldn't get here");
584 if (const auto *rd = ty->getAsRecordDecl()) {
585 if (combinerRecipes.size() == 1 &&
586 cgf.getContext().hasSameType(ty, combinerRecipes[0].LHS->getType())) {
587 // If this is a 'top level' operator on the type we can just emit this
588 // as a simple one.
589 emitSingleCombiner(lhsArg, rhsArg, combinerRecipes[0]);
590 } else {
591 // else we have to handle each individual field after after a
592 // get-element.
593 for (const auto &[field, combiner] :
594 llvm::zip_equal(rd->fields(), combinerRecipes)) {
595 mlir::Type fieldType = cgf.convertType(field->getType());
596 auto fieldPtr = cir::PointerType::get(fieldType);
597
598 mlir::Value lhsField = builder.createGetMember(
599 loc, fieldPtr, lhsArg, field->getName(), field->getFieldIndex());
600 mlir::Value rhsField = builder.createGetMember(
601 loc, fieldPtr, rhsArg, field->getName(), field->getFieldIndex());
602
603 emitSingleCombiner(lhsField, rhsField, combiner);
604 }
605 }
606
607 } else {
608 // if this is a single-thing (because we should know this isn't an array,
609 // as Sema wouldn't let us get here), we can just do a normal emit call.
610 emitSingleCombiner(lhsArg, rhsArg, combinerRecipes[0]);
611 }
612 };
613
614 if (const auto *cat = cgf.getContext().getAsConstantArrayType(origType)) {
615 // If we're in an array, we have to emit the combiner for each element of
616 // the array.
617 auto itrTy = mlir::cast<cir::IntType>(cgf.PtrDiffTy);
618 auto itrPtrTy = cir::PointerType::get(itrTy);
619
620 mlir::Value zero =
621 builder.getConstInt(loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), 0);
622 mlir::Value itr =
623 cir::AllocaOp::create(builder, loc, itrPtrTy, itrTy, "itr",
624 cgf.cgm.getSize(cgf.getPointerAlign()));
625 builder.CIRBaseBuilderTy::createStore(loc, zero, itr);
626
627 builder.setInsertionPointAfter(builder.createFor(
628 loc,
629 /*condBuilder=*/
630 [&](mlir::OpBuilder &b, mlir::Location loc) {
631 auto loadItr = cir::LoadOp::create(builder, loc, {itr});
632 mlir::Value arraySize = builder.getConstInt(
633 loc, mlir::cast<cir::IntType>(cgf.PtrDiffTy), cat->getZExtSize());
634 auto cmp = builder.createCompare(loc, cir::CmpOpKind::lt, loadItr,
635 arraySize);
636 builder.createCondition(cmp);
637 },
638 /*bodyBuilder=*/
639 [&](mlir::OpBuilder &b, mlir::Location loc) {
640 auto loadItr = cir::LoadOp::create(builder, loc, {itr});
641 auto lhsElt = builder.getArrayElement(
642 loc, loc, lhsArg, cgf.convertType(cat->getElementType()), loadItr,
643 /*shouldDecay=*/true);
644 auto rhsElt = builder.getArrayElement(
645 loc, loc, rhsArg, cgf.convertType(cat->getElementType()), loadItr,
646 /*shouldDecay=*/true);
647
648 emitCombiner(lhsElt, rhsElt, cat->getElementType());
649 builder.createYield(loc);
650 },
651 /*stepBuilder=*/
652 [&](mlir::OpBuilder &b, mlir::Location loc) {
653 auto loadItr = cir::LoadOp::create(builder, loc, {itr});
654 auto inc = cir::UnaryOp::create(builder, loc, loadItr.getType(),
655 cir::UnaryOpKind::Inc, loadItr);
656 builder.CIRBaseBuilderTy::createStore(loc, inc, itr);
657 builder.createYield(loc);
658 }));
659
660 } else if (origType->isArrayType()) {
661 cgf.cgm.errorNYI(loc,
662 "OpenACC Reduction combiner non-constant array recipe");
663 } else {
664 emitCombiner(lhsArg, rhsArg, origType);
665 }
666
667 builder.setInsertionPointToEnd(&recipe.getCombinerRegion().back());
668 ls.forceCleanup();
669 mlir::acc::YieldOp::create(builder, locEnd, block->getArgument(0));
670}
671
672} // namespace clang::CIRGen
__device__ __2f16 b
cir::ConditionOp createCondition(mlir::Value condition)
Create a loop condition.
cir::ForOp createFor(mlir::Location loc, llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> condBuilder, llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> bodyBuilder, llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> stepBuilder)
Create a for operation.
cir::CmpOp createCompare(mlir::Location loc, cir::CmpOpKind kind, mlir::Value lhs, mlir::Value rhs)
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
CanQualType UnsignedLongLongTy
cir::ConstantOp getConstInt(mlir::Location loc, llvm::APSInt intVal)
void forceCleanup()
Force the emission of cleanups now, instead of waiting until this object is destroyed.
mlir::Type convertType(clang::QualType t)
void emitAutoVarInit(const AutoVarEmission &emission)
Emit the initializer for an allocated variable.
clang::ASTContext & getContext() const
mlir::Type convertType(clang::QualType type)
mlir::IntegerAttr getSize(CharUnits size)
void createReductionRecipeCombiner(mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp, mlir::acc::ReductionRecipeOp recipe, size_t numBounds, QualType origType, llvm::ArrayRef< OpenACCReductionRecipe::CombinerRecipe > combinerRecipes)
void createInitRecipe(mlir::Location loc, mlir::Location locEnd, SourceRange exprRange, mlir::Value mainOp, mlir::Region &recipeInitRegion, size_t numBounds, llvm::ArrayRef< QualType > boundTypes, const VarDecl *allocaDecl, QualType origType, bool emitInitExpr)
void createFirstprivateRecipeCopy(mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp, const VarDecl *allocaDecl, const VarDecl *temporary, mlir::Region &copyRegion, size_t numBounds)
mlir::acc::ReductionOperator convertReductionOp(OpenACCReductionOperator op)
std::pair< mlir::Value, mlir::Value > createBoundsLoop(mlir::Value subscriptedValue, mlir::Value subscriptedValue2, mlir::Value bound, mlir::Location loc, bool inverse)
void createRecipeDestroySection(mlir::Location loc, mlir::Location locEnd, mlir::Value mainOp, CharUnits alignment, QualType origType, size_t numBounds, QualType baseType, mlir::Region &destroyRegion)
mlir::Block * createRecipeBlock(mlir::Region &region, mlir::Type opTy, mlir::Location loc, size_t numBounds, bool isInit)
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
A (possibly-)qualified type.
Definition TypeBase.h:937
A trivial tuple used to represent a source range.
const Type * getPointeeOrArrayElementType() const
If this is a pointer type, return the pointee type.
Definition TypeBase.h:9051
bool isArrayType() const
Definition TypeBase.h:8614
bool isPointerType() const
Definition TypeBase.h:8515
bool isBuiltinType() const
Helper methods to distinguish type categories.
Definition TypeBase.h:8638
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
const Expr * getInit() const
Definition Decl.h:1368
@ Type
The l-value was considered opaque, so the alignment was determined from a type.
OpenACCReductionOperator
@ Invalid
Invalid Reduction Clause Kind.
bool isa(CodeGen::Address addr)
Definition Address.h:330
U cast(CodeGen::Address addr)
Definition Address.h:327
bool emittedAsOffload
True if the variable was emitted as an offload recipe, and thus doesn't have the same sort of alloca ...
Represents a scope, including function bodies, compound statements, and the substatements of if/while...