clang  6.0.0svn
CGOpenMPRuntimeNVPTX.cpp
Go to the documentation of this file.
1 //===---- CGOpenMPRuntimeNVPTX.cpp - Interface to OpenMP NVPTX Runtimes ---===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This provides a class for OpenMP runtime code generation specialized to NVPTX
11 // targets.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "CGOpenMPRuntimeNVPTX.h"
16 #include "clang/AST/DeclOpenMP.h"
17 #include "CodeGenFunction.h"
18 #include "clang/AST/StmtOpenMP.h"
19 
20 using namespace clang;
21 using namespace CodeGen;
22 
23 namespace {
25  /// \brief Call to void __kmpc_kernel_init(kmp_int32 thread_limit,
26  /// int16_t RequiresOMPRuntime);
27  OMPRTL_NVPTX__kmpc_kernel_init,
28  /// \brief Call to void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized);
29  OMPRTL_NVPTX__kmpc_kernel_deinit,
30  /// \brief Call to void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
31  /// int16_t RequiresOMPRuntime, int16_t RequiresDataSharing);
32  OMPRTL_NVPTX__kmpc_spmd_kernel_init,
33  /// \brief Call to void __kmpc_spmd_kernel_deinit();
34  OMPRTL_NVPTX__kmpc_spmd_kernel_deinit,
35  /// \brief Call to void __kmpc_kernel_prepare_parallel(void
36  /// *outlined_function, void ***args, kmp_int32 nArgs);
37  OMPRTL_NVPTX__kmpc_kernel_prepare_parallel,
38  /// \brief Call to bool __kmpc_kernel_parallel(void **outlined_function, void
39  /// ***args);
40  OMPRTL_NVPTX__kmpc_kernel_parallel,
41  /// \brief Call to void __kmpc_kernel_end_parallel();
42  OMPRTL_NVPTX__kmpc_kernel_end_parallel,
43  /// Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
44  /// global_tid);
45  OMPRTL_NVPTX__kmpc_serialized_parallel,
46  /// Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
47  /// global_tid);
48  OMPRTL_NVPTX__kmpc_end_serialized_parallel,
49  /// \brief Call to int32_t __kmpc_shuffle_int32(int32_t element,
50  /// int16_t lane_offset, int16_t warp_size);
51  OMPRTL_NVPTX__kmpc_shuffle_int32,
52  /// \brief Call to int64_t __kmpc_shuffle_int64(int64_t element,
53  /// int16_t lane_offset, int16_t warp_size);
54  OMPRTL_NVPTX__kmpc_shuffle_int64,
55  /// \brief Call to __kmpc_nvptx_parallel_reduce_nowait(kmp_int32
56  /// global_tid, kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
57  /// void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
58  /// lane_offset, int16_t shortCircuit),
59  /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num));
60  OMPRTL_NVPTX__kmpc_parallel_reduce_nowait,
61  /// \brief Call to __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid,
62  /// int32_t num_vars, size_t reduce_size, void *reduce_data,
63  /// void (*kmp_ShuffleReductFctPtr)(void *rhs, int16_t lane_id, int16_t
64  /// lane_offset, int16_t shortCircuit),
65  /// void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num),
66  /// void (*kmp_CopyToScratchpadFctPtr)(void *reduce_data, void * scratchpad,
67  /// int32_t index, int32_t width),
68  /// void (*kmp_LoadReduceFctPtr)(void *reduce_data, void * scratchpad, int32_t
69  /// index, int32_t width, int32_t reduce))
70  OMPRTL_NVPTX__kmpc_teams_reduce_nowait,
71  /// \brief Call to __kmpc_nvptx_end_reduce_nowait(int32_t global_tid);
72  OMPRTL_NVPTX__kmpc_end_reduce_nowait
73 };
74 
75 /// Pre(post)-action for different OpenMP constructs specialized for NVPTX.
76 class NVPTXActionTy final : public PrePostActionTy {
77  llvm::Value *EnterCallee;
78  ArrayRef<llvm::Value *> EnterArgs;
79  llvm::Value *ExitCallee;
80  ArrayRef<llvm::Value *> ExitArgs;
81  bool Conditional;
82  llvm::BasicBlock *ContBlock = nullptr;
83 
84 public:
85  NVPTXActionTy(llvm::Value *EnterCallee, ArrayRef<llvm::Value *> EnterArgs,
86  llvm::Value *ExitCallee, ArrayRef<llvm::Value *> ExitArgs,
87  bool Conditional = false)
88  : EnterCallee(EnterCallee), EnterArgs(EnterArgs), ExitCallee(ExitCallee),
89  ExitArgs(ExitArgs), Conditional(Conditional) {}
90  void Enter(CodeGenFunction &CGF) override {
91  llvm::Value *EnterRes = CGF.EmitRuntimeCall(EnterCallee, EnterArgs);
92  if (Conditional) {
93  llvm::Value *CallBool = CGF.Builder.CreateIsNotNull(EnterRes);
94  auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
95  ContBlock = CGF.createBasicBlock("omp_if.end");
96  // Generate the branch (If-stmt)
97  CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
98  CGF.EmitBlock(ThenBlock);
99  }
100  }
101  void Done(CodeGenFunction &CGF) {
102  // Emit the rest of blocks/branches
103  CGF.EmitBranch(ContBlock);
104  CGF.EmitBlock(ContBlock, true);
105  }
106  void Exit(CodeGenFunction &CGF) override {
107  CGF.EmitRuntimeCall(ExitCallee, ExitArgs);
108  }
109 };
110 
111 // A class to track the execution mode when codegening directives within
112 // a target region. The appropriate mode (generic/spmd) is set on entry
113 // to the target region and used by containing directives such as 'parallel'
114 // to emit optimized code.
115 class ExecutionModeRAII {
116 private:
119 
120 public:
121  ExecutionModeRAII(CGOpenMPRuntimeNVPTX::ExecutionMode &Mode,
123  : Mode(Mode) {
124  SavedMode = Mode;
125  Mode = NewMode;
126  }
127  ~ExecutionModeRAII() { Mode = SavedMode; }
128 };
129 
130 /// GPU Configuration: This information can be derived from cuda registers,
131 /// however, providing compile time constants helps generate more efficient
132 /// code. For all practical purposes this is fine because the configuration
133 /// is the same for all known NVPTX architectures.
134 enum MachineConfiguration : unsigned {
135  WarpSize = 32,
136  /// Number of bits required to represent a lane identifier, which is
137  /// computed as log_2(WarpSize).
138  LaneIDBits = 5,
139  LaneIDMask = WarpSize - 1,
140 
141  /// Global memory alignment for performance.
142  GlobalMemoryAlignment = 256,
143 };
144 
145 enum NamedBarrier : unsigned {
146  /// Synchronize on this barrier #ID using a named barrier primitive.
147  /// Only the subset of active threads in a parallel region arrive at the
148  /// barrier.
149  NB_Parallel = 1,
150 };
151 } // anonymous namespace
152 
153 /// Get the GPU warp size.
155  return CGF.EmitRuntimeCall(
156  llvm::Intrinsic::getDeclaration(
157  &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_warpsize),
158  "nvptx_warp_size");
159 }
160 
161 /// Get the id of the current thread on the GPU.
163  return CGF.EmitRuntimeCall(
164  llvm::Intrinsic::getDeclaration(
165  &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_tid_x),
166  "nvptx_tid");
167 }
168 
169 /// Get the id of the warp in the block.
170 /// We assume that the warp size is 32, which is always the case
171 /// on the NVPTX device, to generate more efficient code.
173  CGBuilderTy &Bld = CGF.Builder;
174  return Bld.CreateAShr(getNVPTXThreadID(CGF), LaneIDBits, "nvptx_warp_id");
175 }
176 
177 /// Get the id of the current lane in the Warp.
178 /// We assume that the warp size is 32, which is always the case
179 /// on the NVPTX device, to generate more efficient code.
181  CGBuilderTy &Bld = CGF.Builder;
182  return Bld.CreateAnd(getNVPTXThreadID(CGF), Bld.getInt32(LaneIDMask),
183  "nvptx_lane_id");
184 }
185 
186 /// Get the maximum number of threads in a block of the GPU.
188  return CGF.EmitRuntimeCall(
189  llvm::Intrinsic::getDeclaration(
190  &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_read_ptx_sreg_ntid_x),
191  "nvptx_num_threads");
192 }
193 
194 /// Get barrier to synchronize all threads in a block.
196  CGF.EmitRuntimeCall(llvm::Intrinsic::getDeclaration(
197  &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_barrier0));
198 }
199 
200 /// Get barrier #ID to synchronize selected (multiple of warp size) threads in
201 /// a CTA.
202 static void getNVPTXBarrier(CodeGenFunction &CGF, int ID,
203  llvm::Value *NumThreads) {
204  CGBuilderTy &Bld = CGF.Builder;
205  llvm::Value *Args[] = {Bld.getInt32(ID), NumThreads};
206  CGF.EmitRuntimeCall(llvm::Intrinsic::getDeclaration(
207  &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_barrier),
208  Args);
209 }
210 
211 /// Synchronize all GPU threads in a block.
213 
214 /// Synchronize worker threads in a parallel region.
215 static void syncParallelThreads(CodeGenFunction &CGF, llvm::Value *NumThreads) {
216  return getNVPTXBarrier(CGF, NB_Parallel, NumThreads);
217 }
218 
219 /// Get the value of the thread_limit clause in the teams directive.
220 /// For the 'generic' execution mode, the runtime encodes thread_limit in
221 /// the launch parameters, always starting thread_limit+warpSize threads per
222 /// CTA. The threads in the last warp are reserved for master execution.
223 /// For the 'spmd' execution mode, all threads in a CTA are part of the team.
225  bool IsInSpmdExecutionMode = false) {
226  CGBuilderTy &Bld = CGF.Builder;
227  return IsInSpmdExecutionMode
228  ? getNVPTXNumThreads(CGF)
229  : Bld.CreateSub(getNVPTXNumThreads(CGF), getNVPTXWarpSize(CGF),
230  "thread_limit");
231 }
232 
233 /// Get the thread id of the OMP master thread.
234 /// The master thread id is the first thread (lane) of the last warp in the
235 /// GPU block. Warp size is assumed to be some power of 2.
236 /// Thread id is 0 indexed.
237 /// E.g: If NumThreads is 33, master id is 32.
238 /// If NumThreads is 64, master id is 32.
239 /// If NumThreads is 1024, master id is 992.
241  CGBuilderTy &Bld = CGF.Builder;
242  llvm::Value *NumThreads = getNVPTXNumThreads(CGF);
243 
244  // We assume that the warp size is a power of 2.
245  llvm::Value *Mask = Bld.CreateSub(getNVPTXWarpSize(CGF), Bld.getInt32(1));
246 
247  return Bld.CreateAnd(Bld.CreateSub(NumThreads, Bld.getInt32(1)),
248  Bld.CreateNot(Mask), "master_tid");
249 }
250 
251 CGOpenMPRuntimeNVPTX::WorkerFunctionState::WorkerFunctionState(
252  CodeGenModule &CGM)
253  : WorkerFn(nullptr), CGFI(nullptr) {
254  createWorkerFunction(CGM);
255 }
256 
257 void CGOpenMPRuntimeNVPTX::WorkerFunctionState::createWorkerFunction(
258  CodeGenModule &CGM) {
259  // Create an worker function with no arguments.
260  CGFI = &CGM.getTypes().arrangeNullaryFunction();
261 
262  WorkerFn = llvm::Function::Create(
264  /* placeholder */ "_worker", &CGM.getModule());
265  CGM.SetInternalFunctionAttributes(/*D=*/nullptr, WorkerFn, *CGFI);
266 }
267 
268 bool CGOpenMPRuntimeNVPTX::isInSpmdExecutionMode() const {
269  return CurrentExecutionMode == CGOpenMPRuntimeNVPTX::ExecutionMode::Spmd;
270 }
271 
274  const OMPExecutableDirective &D) {
275  OpenMPDirectiveKind DirectiveKind = D.getDirectiveKind();
276  switch (DirectiveKind) {
277  case OMPD_target:
278  case OMPD_target_teams:
279  return CGOpenMPRuntimeNVPTX::ExecutionMode::Generic;
280  case OMPD_target_parallel:
281  case OMPD_target_parallel_for:
282  case OMPD_target_parallel_for_simd:
283  return CGOpenMPRuntimeNVPTX::ExecutionMode::Spmd;
284  default:
285  llvm_unreachable("Unsupported directive on NVPTX device.");
286  }
287  llvm_unreachable("Unsupported directive on NVPTX device.");
288 }
289 
290 void CGOpenMPRuntimeNVPTX::emitGenericKernel(const OMPExecutableDirective &D,
291  StringRef ParentName,
292  llvm::Function *&OutlinedFn,
293  llvm::Constant *&OutlinedFnID,
294  bool IsOffloadEntry,
295  const RegionCodeGenTy &CodeGen) {
296  ExecutionModeRAII ModeRAII(CurrentExecutionMode,
297  CGOpenMPRuntimeNVPTX::ExecutionMode::Generic);
298  EntryFunctionState EST;
299  WorkerFunctionState WST(CGM);
300  Work.clear();
301  WrapperFunctionsMap.clear();
302 
303  // Emit target region as a standalone region.
304  class NVPTXPrePostActionTy : public PrePostActionTy {
306  CGOpenMPRuntimeNVPTX::EntryFunctionState &EST;
307  CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST;
308 
309  public:
310  NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX &RT,
311  CGOpenMPRuntimeNVPTX::EntryFunctionState &EST,
312  CGOpenMPRuntimeNVPTX::WorkerFunctionState &WST)
313  : RT(RT), EST(EST), WST(WST) {}
314  void Enter(CodeGenFunction &CGF) override {
315  RT.emitGenericEntryHeader(CGF, EST, WST);
316  }
317  void Exit(CodeGenFunction &CGF) override {
318  RT.emitGenericEntryFooter(CGF, EST);
319  }
320  } Action(*this, EST, WST);
321  CodeGen.setAction(Action);
322  emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
323  IsOffloadEntry, CodeGen);
324 
325  // Create the worker function
326  emitWorkerFunction(WST);
327 
328  // Now change the name of the worker function to correspond to this target
329  // region's entry function.
330  WST.WorkerFn->setName(OutlinedFn->getName() + "_worker");
331 }
332 
333 // Setup NVPTX threads for master-worker OpenMP scheme.
334 void CGOpenMPRuntimeNVPTX::emitGenericEntryHeader(CodeGenFunction &CGF,
335  EntryFunctionState &EST,
336  WorkerFunctionState &WST) {
337  CGBuilderTy &Bld = CGF.Builder;
338 
339  llvm::BasicBlock *WorkerBB = CGF.createBasicBlock(".worker");
340  llvm::BasicBlock *MasterCheckBB = CGF.createBasicBlock(".mastercheck");
341  llvm::BasicBlock *MasterBB = CGF.createBasicBlock(".master");
342  EST.ExitBB = CGF.createBasicBlock(".exit");
343 
344  auto *IsWorker =
345  Bld.CreateICmpULT(getNVPTXThreadID(CGF), getThreadLimit(CGF));
346  Bld.CreateCondBr(IsWorker, WorkerBB, MasterCheckBB);
347 
348  CGF.EmitBlock(WorkerBB);
349  emitCall(CGF, WST.WorkerFn);
350  CGF.EmitBranch(EST.ExitBB);
351 
352  CGF.EmitBlock(MasterCheckBB);
353  auto *IsMaster =
354  Bld.CreateICmpEQ(getNVPTXThreadID(CGF), getMasterThreadID(CGF));
355  Bld.CreateCondBr(IsMaster, MasterBB, EST.ExitBB);
356 
357  CGF.EmitBlock(MasterBB);
358  // First action in sequential region:
359  // Initialize the state of the OpenMP runtime library on the GPU.
360  // TODO: Optimize runtime initialization and pass in correct value.
361  llvm::Value *Args[] = {getThreadLimit(CGF),
362  Bld.getInt16(/*RequiresOMPRuntime=*/1)};
363  CGF.EmitRuntimeCall(
364  createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_init), Args);
365 }
366 
367 void CGOpenMPRuntimeNVPTX::emitGenericEntryFooter(CodeGenFunction &CGF,
368  EntryFunctionState &EST) {
369  if (!EST.ExitBB)
370  EST.ExitBB = CGF.createBasicBlock(".exit");
371 
372  llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".termination.notifier");
373  CGF.EmitBranch(TerminateBB);
374 
375  CGF.EmitBlock(TerminateBB);
376  // Signal termination condition.
377  // TODO: Optimize runtime initialization and pass in correct value.
378  llvm::Value *Args[] = {CGF.Builder.getInt16(/*IsOMPRuntimeInitialized=*/1)};
379  CGF.EmitRuntimeCall(
380  createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_deinit), Args);
381  // Barrier to terminate worker threads.
382  syncCTAThreads(CGF);
383  // Master thread jumps to exit point.
384  CGF.EmitBranch(EST.ExitBB);
385 
386  CGF.EmitBlock(EST.ExitBB);
387  EST.ExitBB = nullptr;
388 }
389 
390 void CGOpenMPRuntimeNVPTX::emitSpmdKernel(const OMPExecutableDirective &D,
391  StringRef ParentName,
392  llvm::Function *&OutlinedFn,
393  llvm::Constant *&OutlinedFnID,
394  bool IsOffloadEntry,
395  const RegionCodeGenTy &CodeGen) {
396  ExecutionModeRAII ModeRAII(CurrentExecutionMode,
397  CGOpenMPRuntimeNVPTX::ExecutionMode::Spmd);
398  EntryFunctionState EST;
399 
400  // Emit target region as a standalone region.
401  class NVPTXPrePostActionTy : public PrePostActionTy {
403  CGOpenMPRuntimeNVPTX::EntryFunctionState &EST;
404  const OMPExecutableDirective &D;
405 
406  public:
407  NVPTXPrePostActionTy(CGOpenMPRuntimeNVPTX &RT,
408  CGOpenMPRuntimeNVPTX::EntryFunctionState &EST,
409  const OMPExecutableDirective &D)
410  : RT(RT), EST(EST), D(D) {}
411  void Enter(CodeGenFunction &CGF) override {
412  RT.emitSpmdEntryHeader(CGF, EST, D);
413  }
414  void Exit(CodeGenFunction &CGF) override {
415  RT.emitSpmdEntryFooter(CGF, EST);
416  }
417  } Action(*this, EST, D);
418  CodeGen.setAction(Action);
419  emitTargetOutlinedFunctionHelper(D, ParentName, OutlinedFn, OutlinedFnID,
420  IsOffloadEntry, CodeGen);
421 }
422 
423 void CGOpenMPRuntimeNVPTX::emitSpmdEntryHeader(
424  CodeGenFunction &CGF, EntryFunctionState &EST,
425  const OMPExecutableDirective &D) {
426  auto &Bld = CGF.Builder;
427 
428  // Setup BBs in entry function.
429  llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute");
430  EST.ExitBB = CGF.createBasicBlock(".exit");
431 
432  // Initialize the OMP state in the runtime; called by all active threads.
433  // TODO: Set RequiresOMPRuntime and RequiresDataSharing parameters
434  // based on code analysis of the target region.
435  llvm::Value *Args[] = {getThreadLimit(CGF, /*IsInSpmdExecutionMode=*/true),
436  /*RequiresOMPRuntime=*/Bld.getInt16(1),
437  /*RequiresDataSharing=*/Bld.getInt16(1)};
438  CGF.EmitRuntimeCall(
439  createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_spmd_kernel_init), Args);
440  CGF.EmitBranch(ExecuteBB);
441 
442  CGF.EmitBlock(ExecuteBB);
443 }
444 
445 void CGOpenMPRuntimeNVPTX::emitSpmdEntryFooter(CodeGenFunction &CGF,
446  EntryFunctionState &EST) {
447  if (!EST.ExitBB)
448  EST.ExitBB = CGF.createBasicBlock(".exit");
449 
450  llvm::BasicBlock *OMPDeInitBB = CGF.createBasicBlock(".omp.deinit");
451  CGF.EmitBranch(OMPDeInitBB);
452 
453  CGF.EmitBlock(OMPDeInitBB);
454  // DeInitialize the OMP state in the runtime; called by all active threads.
455  CGF.EmitRuntimeCall(
456  createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_spmd_kernel_deinit), None);
457  CGF.EmitBranch(EST.ExitBB);
458 
459  CGF.EmitBlock(EST.ExitBB);
460  EST.ExitBB = nullptr;
461 }
462 
463 // Create a unique global variable to indicate the execution mode of this target
464 // region. The execution mode is either 'generic', or 'spmd' depending on the
465 // target directive. This variable is picked up by the offload library to setup
466 // the device appropriately before kernel launch. If the execution mode is
467 // 'generic', the runtime reserves one warp for the master, otherwise, all
468 // warps participate in parallel work.
469 static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name,
471  (void)new llvm::GlobalVariable(
472  CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
473  llvm::GlobalValue::WeakAnyLinkage,
474  llvm::ConstantInt::get(CGM.Int8Ty, Mode), Name + Twine("_exec_mode"));
475 }
476 
477 void CGOpenMPRuntimeNVPTX::emitWorkerFunction(WorkerFunctionState &WST) {
478  ASTContext &Ctx = CGM.getContext();
479 
480  CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
481  CGF.disableDebugInfo();
482  CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, WST.WorkerFn, *WST.CGFI, {});
483  emitWorkerLoop(CGF, WST);
484  CGF.FinishFunction();
485 }
486 
487 void CGOpenMPRuntimeNVPTX::emitWorkerLoop(CodeGenFunction &CGF,
488  WorkerFunctionState &WST) {
489  //
490  // The workers enter this loop and wait for parallel work from the master.
491  // When the master encounters a parallel region it sets up the work + variable
492  // arguments, and wakes up the workers. The workers first check to see if
493  // they are required for the parallel region, i.e., within the # of requested
494  // parallel threads. The activated workers load the variable arguments and
495  // execute the parallel work.
496  //
497 
498  CGBuilderTy &Bld = CGF.Builder;
499 
500  llvm::BasicBlock *AwaitBB = CGF.createBasicBlock(".await.work");
501  llvm::BasicBlock *SelectWorkersBB = CGF.createBasicBlock(".select.workers");
502  llvm::BasicBlock *ExecuteBB = CGF.createBasicBlock(".execute.parallel");
503  llvm::BasicBlock *TerminateBB = CGF.createBasicBlock(".terminate.parallel");
504  llvm::BasicBlock *BarrierBB = CGF.createBasicBlock(".barrier.parallel");
505  llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".exit");
506 
507  CGF.EmitBranch(AwaitBB);
508 
509  // Workers wait for work from master.
510  CGF.EmitBlock(AwaitBB);
511  // Wait for parallel work
512  syncCTAThreads(CGF);
513 
514  Address WorkFn =
515  CGF.CreateDefaultAlignTempAlloca(CGF.Int8PtrTy, /*Name=*/"work_fn");
516  Address ExecStatus =
517  CGF.CreateDefaultAlignTempAlloca(CGF.Int8Ty, /*Name=*/"exec_status");
518  CGF.InitTempAlloca(ExecStatus, Bld.getInt8(/*C=*/0));
519  CGF.InitTempAlloca(WorkFn, llvm::Constant::getNullValue(CGF.Int8PtrTy));
520 
521  // Set up shared arguments
522  Address SharedArgs =
523  CGF.CreateDefaultAlignTempAlloca(CGF.Int8PtrPtrTy, "shared_args");
524  llvm::Value *Args[] = {WorkFn.getPointer(), SharedArgs.getPointer()};
525  llvm::Value *Ret = CGF.EmitRuntimeCall(
526  createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_parallel), Args);
527  Bld.CreateStore(Bld.CreateZExt(Ret, CGF.Int8Ty), ExecStatus);
528 
529  // On termination condition (workid == 0), exit loop.
530  llvm::Value *ShouldTerminate =
531  Bld.CreateIsNull(Bld.CreateLoad(WorkFn), "should_terminate");
532  Bld.CreateCondBr(ShouldTerminate, ExitBB, SelectWorkersBB);
533 
534  // Activate requested workers.
535  CGF.EmitBlock(SelectWorkersBB);
536  llvm::Value *IsActive =
537  Bld.CreateIsNotNull(Bld.CreateLoad(ExecStatus), "is_active");
538  Bld.CreateCondBr(IsActive, ExecuteBB, BarrierBB);
539 
540  // Signal start of parallel region.
541  CGF.EmitBlock(ExecuteBB);
542 
543  // Current context
544  ASTContext &Ctx = CGF.getContext();
545 
546  // Process work items: outlined parallel functions.
547  for (auto *W : Work) {
548  // Try to match this outlined function.
550 
551  llvm::Value *WorkFnMatch =
552  Bld.CreateICmpEQ(Bld.CreateLoad(WorkFn), ID, "work_match");
553 
554  llvm::BasicBlock *ExecuteFNBB = CGF.createBasicBlock(".execute.fn");
555  llvm::BasicBlock *CheckNextBB = CGF.createBasicBlock(".check.next");
556  Bld.CreateCondBr(WorkFnMatch, ExecuteFNBB, CheckNextBB);
557 
558  // Execute this outlined function.
559  CGF.EmitBlock(ExecuteFNBB);
560 
561  // Insert call to work function via shared wrapper. The shared
562  // wrapper takes exactly three arguments:
563  // - the parallelism level;
564  // - the master thread ID;
565  // - the list of references to shared arguments.
566  //
567  // TODO: Assert that the function is a wrapper function.s
568  Address Capture = CGF.EmitLoadOfPointer(SharedArgs,
569  Ctx.getPointerType(
570  Ctx.getPointerType(Ctx.VoidPtrTy)).castAs<PointerType>());
571  emitCall(CGF, W, {Bld.getInt16(/*ParallelLevel=*/0),
572  getMasterThreadID(CGF), Capture.getPointer()});
573 
574  // Go to end of parallel region.
575  CGF.EmitBranch(TerminateBB);
576 
577  CGF.EmitBlock(CheckNextBB);
578  }
579 
580  // Signal end of parallel region.
581  CGF.EmitBlock(TerminateBB);
582  CGF.EmitRuntimeCall(
583  createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_end_parallel),
584  llvm::None);
585  CGF.EmitBranch(BarrierBB);
586 
587  // All active and inactive workers wait at a barrier after parallel region.
588  CGF.EmitBlock(BarrierBB);
589  // Barrier after parallel region.
590  syncCTAThreads(CGF);
591  CGF.EmitBranch(AwaitBB);
592 
593  // Exit target region.
594  CGF.EmitBlock(ExitBB);
595 }
596 
597 /// \brief Returns specified OpenMP runtime function for the current OpenMP
598 /// implementation. Specialized for the NVPTX device.
599 /// \param Function OpenMP runtime function.
600 /// \return Specified function.
601 llvm::Constant *
603  llvm::Constant *RTLFn = nullptr;
604  switch (static_cast<OpenMPRTLFunctionNVPTX>(Function)) {
605  case OMPRTL_NVPTX__kmpc_kernel_init: {
606  // Build void __kmpc_kernel_init(kmp_int32 thread_limit, int16_t
607  // RequiresOMPRuntime);
608  llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty};
609  llvm::FunctionType *FnTy =
610  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
611  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_init");
612  break;
613  }
614  case OMPRTL_NVPTX__kmpc_kernel_deinit: {
615  // Build void __kmpc_kernel_deinit(int16_t IsOMPRuntimeInitialized);
616  llvm::Type *TypeParams[] = {CGM.Int16Ty};
617  llvm::FunctionType *FnTy =
618  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
619  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_deinit");
620  break;
621  }
622  case OMPRTL_NVPTX__kmpc_spmd_kernel_init: {
623  // Build void __kmpc_spmd_kernel_init(kmp_int32 thread_limit,
624  // int16_t RequiresOMPRuntime, int16_t RequiresDataSharing);
625  llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty};
626  llvm::FunctionType *FnTy =
627  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
628  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_init");
629  break;
630  }
631  case OMPRTL_NVPTX__kmpc_spmd_kernel_deinit: {
632  // Build void __kmpc_spmd_kernel_deinit();
633  llvm::FunctionType *FnTy =
634  llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
635  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_spmd_kernel_deinit");
636  break;
637  }
638  case OMPRTL_NVPTX__kmpc_kernel_prepare_parallel: {
639  /// Build void __kmpc_kernel_prepare_parallel(
640  /// void *outlined_function, void ***args, kmp_int32 nArgs);
641  llvm::Type *TypeParams[] = {CGM.Int8PtrTy,
642  CGM.Int8PtrPtrTy->getPointerTo(0), CGM.Int32Ty};
643  llvm::FunctionType *FnTy =
644  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
645  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_prepare_parallel");
646  break;
647  }
648  case OMPRTL_NVPTX__kmpc_kernel_parallel: {
649  /// Build bool __kmpc_kernel_parallel(void **outlined_function, void ***args);
650  llvm::Type *TypeParams[] = {CGM.Int8PtrPtrTy,
651  CGM.Int8PtrPtrTy->getPointerTo(0)};
652  llvm::Type *RetTy = CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
653  llvm::FunctionType *FnTy =
654  llvm::FunctionType::get(RetTy, TypeParams, /*isVarArg*/ false);
655  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_parallel");
656  break;
657  }
658  case OMPRTL_NVPTX__kmpc_kernel_end_parallel: {
659  /// Build void __kmpc_kernel_end_parallel();
660  llvm::FunctionType *FnTy =
661  llvm::FunctionType::get(CGM.VoidTy, llvm::None, /*isVarArg*/ false);
662  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_kernel_end_parallel");
663  break;
664  }
665  case OMPRTL_NVPTX__kmpc_serialized_parallel: {
666  // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
667  // global_tid);
668  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
669  llvm::FunctionType *FnTy =
670  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
671  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
672  break;
673  }
674  case OMPRTL_NVPTX__kmpc_end_serialized_parallel: {
675  // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
676  // global_tid);
677  llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
678  llvm::FunctionType *FnTy =
679  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
680  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
681  break;
682  }
683  case OMPRTL_NVPTX__kmpc_shuffle_int32: {
684  // Build int32_t __kmpc_shuffle_int32(int32_t element,
685  // int16_t lane_offset, int16_t warp_size);
686  llvm::Type *TypeParams[] = {CGM.Int32Ty, CGM.Int16Ty, CGM.Int16Ty};
687  llvm::FunctionType *FnTy =
688  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
689  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int32");
690  break;
691  }
692  case OMPRTL_NVPTX__kmpc_shuffle_int64: {
693  // Build int64_t __kmpc_shuffle_int64(int64_t element,
694  // int16_t lane_offset, int16_t warp_size);
695  llvm::Type *TypeParams[] = {CGM.Int64Ty, CGM.Int16Ty, CGM.Int16Ty};
696  llvm::FunctionType *FnTy =
697  llvm::FunctionType::get(CGM.Int64Ty, TypeParams, /*isVarArg*/ false);
698  RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_shuffle_int64");
699  break;
700  }
701  case OMPRTL_NVPTX__kmpc_parallel_reduce_nowait: {
702  // Build int32_t kmpc_nvptx_parallel_reduce_nowait(kmp_int32 global_tid,
703  // kmp_int32 num_vars, size_t reduce_size, void* reduce_data,
704  // void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
705  // lane_offset, int16_t Algorithm Version),
706  // void (*kmp_InterWarpCopyFctPtr)(void* src, int warp_num));
707  llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
708  CGM.Int16Ty, CGM.Int16Ty};
709  auto *ShuffleReduceFnTy =
710  llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
711  /*isVarArg=*/false);
712  llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
713  auto *InterWarpCopyFnTy =
714  llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
715  /*isVarArg=*/false);
716  llvm::Type *TypeParams[] = {CGM.Int32Ty,
717  CGM.Int32Ty,
718  CGM.SizeTy,
719  CGM.VoidPtrTy,
720  ShuffleReduceFnTy->getPointerTo(),
721  InterWarpCopyFnTy->getPointerTo()};
722  llvm::FunctionType *FnTy =
723  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
724  RTLFn = CGM.CreateRuntimeFunction(
725  FnTy, /*Name=*/"__kmpc_nvptx_parallel_reduce_nowait");
726  break;
727  }
728  case OMPRTL_NVPTX__kmpc_teams_reduce_nowait: {
729  // Build int32_t __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid,
730  // int32_t num_vars, size_t reduce_size, void *reduce_data,
731  // void (*kmp_ShuffleReductFctPtr)(void *rhsData, int16_t lane_id, int16_t
732  // lane_offset, int16_t shortCircuit),
733  // void (*kmp_InterWarpCopyFctPtr)(void* src, int32_t warp_num),
734  // void (*kmp_CopyToScratchpadFctPtr)(void *reduce_data, void * scratchpad,
735  // int32_t index, int32_t width),
736  // void (*kmp_LoadReduceFctPtr)(void *reduce_data, void * scratchpad,
737  // int32_t index, int32_t width, int32_t reduce))
738  llvm::Type *ShuffleReduceTypeParams[] = {CGM.VoidPtrTy, CGM.Int16Ty,
739  CGM.Int16Ty, CGM.Int16Ty};
740  auto *ShuffleReduceFnTy =
741  llvm::FunctionType::get(CGM.VoidTy, ShuffleReduceTypeParams,
742  /*isVarArg=*/false);
743  llvm::Type *InterWarpCopyTypeParams[] = {CGM.VoidPtrTy, CGM.Int32Ty};
744  auto *InterWarpCopyFnTy =
745  llvm::FunctionType::get(CGM.VoidTy, InterWarpCopyTypeParams,
746  /*isVarArg=*/false);
747  llvm::Type *CopyToScratchpadTypeParams[] = {CGM.VoidPtrTy, CGM.VoidPtrTy,
748  CGM.Int32Ty, CGM.Int32Ty};
749  auto *CopyToScratchpadFnTy =
750  llvm::FunctionType::get(CGM.VoidTy, CopyToScratchpadTypeParams,
751  /*isVarArg=*/false);
752  llvm::Type *LoadReduceTypeParams[] = {
753  CGM.VoidPtrTy, CGM.VoidPtrTy, CGM.Int32Ty, CGM.Int32Ty, CGM.Int32Ty};
754  auto *LoadReduceFnTy =
755  llvm::FunctionType::get(CGM.VoidTy, LoadReduceTypeParams,
756  /*isVarArg=*/false);
757  llvm::Type *TypeParams[] = {CGM.Int32Ty,
758  CGM.Int32Ty,
759  CGM.SizeTy,
760  CGM.VoidPtrTy,
761  ShuffleReduceFnTy->getPointerTo(),
762  InterWarpCopyFnTy->getPointerTo(),
763  CopyToScratchpadFnTy->getPointerTo(),
764  LoadReduceFnTy->getPointerTo()};
765  llvm::FunctionType *FnTy =
766  llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
767  RTLFn = CGM.CreateRuntimeFunction(
768  FnTy, /*Name=*/"__kmpc_nvptx_teams_reduce_nowait");
769  break;
770  }
771  case OMPRTL_NVPTX__kmpc_end_reduce_nowait: {
772  // Build __kmpc_end_reduce_nowait(kmp_int32 global_tid);
773  llvm::Type *TypeParams[] = {CGM.Int32Ty};
774  llvm::FunctionType *FnTy =
775  llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
776  RTLFn = CGM.CreateRuntimeFunction(
777  FnTy, /*Name=*/"__kmpc_nvptx_end_reduce_nowait");
778  break;
779  }
780  }
781  return RTLFn;
782 }
783 
784 void CGOpenMPRuntimeNVPTX::createOffloadEntry(llvm::Constant *ID,
785  llvm::Constant *Addr,
786  uint64_t Size, int32_t) {
787  auto *F = dyn_cast<llvm::Function>(Addr);
788  // TODO: Add support for global variables on the device after declare target
789  // support.
790  if (!F)
791  return;
792  llvm::Module *M = F->getParent();
793  llvm::LLVMContext &Ctx = M->getContext();
794 
795  // Get "nvvm.annotations" metadata node
796  llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
797 
798  llvm::Metadata *MDVals[] = {
799  llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, "kernel"),
800  llvm::ConstantAsMetadata::get(
801  llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), 1))};
802  // Append metadata to nvvm.annotations
803  MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
804 }
805 
806 void CGOpenMPRuntimeNVPTX::emitTargetOutlinedFunction(
807  const OMPExecutableDirective &D, StringRef ParentName,
808  llvm::Function *&OutlinedFn, llvm::Constant *&OutlinedFnID,
809  bool IsOffloadEntry, const RegionCodeGenTy &CodeGen) {
810  if (!IsOffloadEntry) // Nothing to do.
811  return;
812 
813  assert(!ParentName.empty() && "Invalid target region parent name!");
814 
817  switch (Mode) {
818  case CGOpenMPRuntimeNVPTX::ExecutionMode::Generic:
819  emitGenericKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
820  CodeGen);
821  break;
822  case CGOpenMPRuntimeNVPTX::ExecutionMode::Spmd:
823  emitSpmdKernel(D, ParentName, OutlinedFn, OutlinedFnID, IsOffloadEntry,
824  CodeGen);
825  break;
827  llvm_unreachable(
828  "Unknown programming model for OpenMP directive on NVPTX target.");
829  }
830 
831  setPropertyExecutionMode(CGM, OutlinedFn->getName(), Mode);
832 }
833 
835  : CGOpenMPRuntime(CGM), CurrentExecutionMode(ExecutionMode::Unknown) {
836  if (!CGM.getLangOpts().OpenMPIsDevice)
837  llvm_unreachable("OpenMP NVPTX can only handle device code.");
838 }
839 
841  OpenMPProcBindClauseKind ProcBind,
842  SourceLocation Loc) {
843  // Do nothing in case of Spmd mode and L0 parallel.
844  // TODO: If in Spmd mode and L1 parallel emit the clause.
845  if (isInSpmdExecutionMode())
846  return;
847 
848  CGOpenMPRuntime::emitProcBindClause(CGF, ProcBind, Loc);
849 }
850 
852  llvm::Value *NumThreads,
853  SourceLocation Loc) {
854  // Do nothing in case of Spmd mode and L0 parallel.
855  // TODO: If in Spmd mode and L1 parallel emit the clause.
856  if (isInSpmdExecutionMode())
857  return;
858 
859  CGOpenMPRuntime::emitNumThreadsClause(CGF, NumThreads, Loc);
860 }
861 
863  const Expr *NumTeams,
864  const Expr *ThreadLimit,
865  SourceLocation Loc) {}
866 
868  const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
869  OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
870 
871  auto *OutlinedFun = cast<llvm::Function>(
873  D, ThreadIDVar, InnermostKind, CodeGen));
874  if (!isInSpmdExecutionMode()) {
875  llvm::Function *WrapperFun =
876  createDataSharingWrapper(OutlinedFun, D);
877  WrapperFunctionsMap[OutlinedFun] = WrapperFun;
878  }
879 
880  return OutlinedFun;
881 }
882 
884  const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
885  OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) {
886 
888  D, ThreadIDVar, InnermostKind, CodeGen);
889  llvm::Function *OutlinedFun = cast<llvm::Function>(OutlinedFunVal);
890  OutlinedFun->removeFnAttr(llvm::Attribute::NoInline);
891  OutlinedFun->removeFnAttr(llvm::Attribute::OptimizeNone);
892  OutlinedFun->addFnAttr(llvm::Attribute::AlwaysInline);
893 
894  return OutlinedFun;
895 }
896 
898  const OMPExecutableDirective &D,
899  SourceLocation Loc,
900  llvm::Value *OutlinedFn,
901  ArrayRef<llvm::Value *> CapturedVars) {
902  if (!CGF.HaveInsertPoint())
903  return;
904 
905  Address ZeroAddr =
907  /*Name*/ ".zero.addr");
908  CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
910  OutlinedFnArgs.push_back(ZeroAddr.getPointer());
911  OutlinedFnArgs.push_back(ZeroAddr.getPointer());
912  OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
913  emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
914 }
915 
917  CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
918  ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
919  if (!CGF.HaveInsertPoint())
920  return;
921 
922  if (isInSpmdExecutionMode())
923  emitSpmdParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
924  else
925  emitGenericParallelCall(CGF, Loc, OutlinedFn, CapturedVars, IfCond);
926 }
927 
928 void CGOpenMPRuntimeNVPTX::emitGenericParallelCall(
929  CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
930  ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
931  llvm::Function *Fn = cast<llvm::Function>(OutlinedFn);
932  llvm::Function *WFn = WrapperFunctionsMap[Fn];
933  assert(WFn && "Wrapper function does not exist!");
934 
935  // Force inline this outlined function at its call site.
936  Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
937 
938  auto &&L0ParallelGen = [this, WFn, &CapturedVars](CodeGenFunction &CGF,
939  PrePostActionTy &) {
940  CGBuilderTy &Bld = CGF.Builder;
941 
942  llvm::Value *ID = Bld.CreateBitOrPointerCast(WFn, CGM.Int8PtrTy);
943 
944  if (!CapturedVars.empty()) {
945  // There's somehting to share, add the attribute
946  CGF.CurFn->addFnAttr("has-nvptx-shared-depot");
947  // Prepare for parallel region. Indicate the outlined function.
948  Address SharedArgs =
949  CGF.CreateDefaultAlignTempAlloca(CGF.VoidPtrPtrTy,
950  "shared_args");
951  llvm::Value *SharedArgsPtr = SharedArgs.getPointer();
952  llvm::Value *Args[] = {ID, SharedArgsPtr,
953  Bld.getInt32(CapturedVars.size())};
954 
955  CGF.EmitRuntimeCall(
956  createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_prepare_parallel),
957  Args);
958 
959  unsigned Idx = 0;
960  ASTContext &Ctx = CGF.getContext();
961  for (llvm::Value *V : CapturedVars) {
963  CGF.EmitLoadOfPointer(SharedArgs,
964  Ctx.getPointerType(
965  Ctx.getPointerType(Ctx.VoidPtrTy)).castAs<PointerType>()),
966  Idx, CGF.getPointerSize());
967  llvm::Value *PtrV = Bld.CreateBitCast(V, CGF.VoidPtrTy);
968  CGF.EmitStoreOfScalar(PtrV, Dst, /*Volatile=*/false,
969  Ctx.getPointerType(Ctx.VoidPtrTy));
970  Idx++;
971  }
972  } else {
973  llvm::Value *Args[] = {ID,
974  llvm::ConstantPointerNull::get(CGF.VoidPtrPtrTy->getPointerTo(0)),
975  /*nArgs=*/Bld.getInt32(0)};
976  CGF.EmitRuntimeCall(
977  createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_kernel_prepare_parallel),
978  Args);
979  }
980 
981  // Activate workers. This barrier is used by the master to signal
982  // work for the workers.
983  syncCTAThreads(CGF);
984 
985  // OpenMP [2.5, Parallel Construct, p.49]
986  // There is an implied barrier at the end of a parallel region. After the
987  // end of a parallel region, only the master thread of the team resumes
988  // execution of the enclosing task region.
989  //
990  // The master waits at this barrier until all workers are done.
991  syncCTAThreads(CGF);
992 
993  // Remember for post-processing in worker loop.
994  Work.emplace_back(WFn);
995  };
996 
997  auto *RTLoc = emitUpdateLocation(CGF, Loc);
998  auto *ThreadID = getThreadID(CGF, Loc);
999  llvm::Value *Args[] = {RTLoc, ThreadID};
1000 
1001  auto &&SeqGen = [this, Fn, &CapturedVars, &Args, Loc](CodeGenFunction &CGF,
1002  PrePostActionTy &) {
1003  auto &&CodeGen = [this, Fn, &CapturedVars, Loc](CodeGenFunction &CGF,
1004  PrePostActionTy &Action) {
1005  Action.Enter(CGF);
1006 
1007  llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
1008  OutlinedFnArgs.push_back(
1009  llvm::ConstantPointerNull::get(CGM.Int32Ty->getPointerTo()));
1010  OutlinedFnArgs.push_back(
1011  llvm::ConstantPointerNull::get(CGM.Int32Ty->getPointerTo()));
1012  OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
1013  emitOutlinedFunctionCall(CGF, Loc, Fn, OutlinedFnArgs);
1014  };
1015 
1016  RegionCodeGenTy RCG(CodeGen);
1017  NVPTXActionTy Action(
1018  createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_serialized_parallel),
1019  Args,
1020  createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_serialized_parallel),
1021  Args);
1022  RCG.setAction(Action);
1023  RCG(CGF);
1024  };
1025 
1026  if (IfCond)
1027  emitOMPIfClause(CGF, IfCond, L0ParallelGen, SeqGen);
1028  else {
1030  RegionCodeGenTy ThenRCG(L0ParallelGen);
1031  ThenRCG(CGF);
1032  }
1033 }
1034 
1035 void CGOpenMPRuntimeNVPTX::emitSpmdParallelCall(
1036  CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
1037  ArrayRef<llvm::Value *> CapturedVars, const Expr *IfCond) {
1038  // Just call the outlined function to execute the parallel region.
1039  // OutlinedFn(&GTid, &zero, CapturedStruct);
1040  //
1041  // TODO: Do something with IfCond when support for the 'if' clause
1042  // is added on Spmd target directives.
1043  llvm::SmallVector<llvm::Value *, 16> OutlinedFnArgs;
1044  OutlinedFnArgs.push_back(
1045  llvm::ConstantPointerNull::get(CGM.Int32Ty->getPointerTo()));
1046  OutlinedFnArgs.push_back(
1047  llvm::ConstantPointerNull::get(CGM.Int32Ty->getPointerTo()));
1048  OutlinedFnArgs.append(CapturedVars.begin(), CapturedVars.end());
1049  emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, OutlinedFnArgs);
1050 }
1051 
1052 /// This function creates calls to one of two shuffle functions to copy
1053 /// variables between lanes in a warp.
1055  QualType ElemTy,
1056  llvm::Value *Elem,
1057  llvm::Value *Offset) {
1058  auto &CGM = CGF.CGM;
1059  auto &C = CGM.getContext();
1060  auto &Bld = CGF.Builder;
1061  CGOpenMPRuntimeNVPTX &RT =
1062  *(static_cast<CGOpenMPRuntimeNVPTX *>(&CGM.getOpenMPRuntime()));
1063 
1064  unsigned Size = CGM.getContext().getTypeSizeInChars(ElemTy).getQuantity();
1065  assert(Size <= 8 && "Unsupported bitwidth in shuffle instruction.");
1066 
1067  OpenMPRTLFunctionNVPTX ShuffleFn = Size <= 4
1068  ? OMPRTL_NVPTX__kmpc_shuffle_int32
1069  : OMPRTL_NVPTX__kmpc_shuffle_int64;
1070 
1071  // Cast all types to 32- or 64-bit values before calling shuffle routines.
1072  auto CastTy = Size <= 4 ? CGM.Int32Ty : CGM.Int64Ty;
1073  auto *ElemCast = Bld.CreateSExtOrBitCast(Elem, CastTy);
1074  auto *WarpSize = CGF.EmitScalarConversion(
1075  getNVPTXWarpSize(CGF), C.getIntTypeForBitwidth(32, /* Signed */ true),
1076  C.getIntTypeForBitwidth(16, /* Signed */ true), SourceLocation());
1077 
1078  auto *ShuffledVal =
1079  CGF.EmitRuntimeCall(RT.createNVPTXRuntimeFunction(ShuffleFn),
1080  {ElemCast, Offset, WarpSize});
1081 
1082  return Bld.CreateTruncOrBitCast(ShuffledVal, CGF.ConvertTypeForMem(ElemTy));
1083 }
1084 
1085 namespace {
1086 enum CopyAction : unsigned {
1087  // RemoteLaneToThread: Copy over a Reduce list from a remote lane in
1088  // the warp using shuffle instructions.
1089  RemoteLaneToThread,
1090  // ThreadCopy: Make a copy of a Reduce list on the thread's stack.
1091  ThreadCopy,
1092  // ThreadToScratchpad: Copy a team-reduced array to the scratchpad.
1093  ThreadToScratchpad,
1094  // ScratchpadToThread: Copy from a scratchpad array in global memory
1095  // containing team-reduced data to a thread's stack.
1096  ScratchpadToThread,
1097 };
1098 } // namespace
1099 
1104 };
1105 
1106 /// Emit instructions to copy a Reduce list, which contains partially
1107 /// aggregated values, in the specified direction.
1109  CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy,
1110  ArrayRef<const Expr *> Privates, Address SrcBase, Address DestBase,
1111  CopyOptionsTy CopyOptions = {nullptr, nullptr, nullptr}) {
1112 
1113  auto &CGM = CGF.CGM;
1114  auto &C = CGM.getContext();
1115  auto &Bld = CGF.Builder;
1116 
1117  auto *RemoteLaneOffset = CopyOptions.RemoteLaneOffset;
1118  auto *ScratchpadIndex = CopyOptions.ScratchpadIndex;
1119  auto *ScratchpadWidth = CopyOptions.ScratchpadWidth;
1120 
1121  // Iterates, element-by-element, through the source Reduce list and
1122  // make a copy.
1123  unsigned Idx = 0;
1124  unsigned Size = Privates.size();
1125  for (auto &Private : Privates) {
1126  Address SrcElementAddr = Address::invalid();
1127  Address DestElementAddr = Address::invalid();
1128  Address DestElementPtrAddr = Address::invalid();
1129  // Should we shuffle in an element from a remote lane?
1130  bool ShuffleInElement = false;
1131  // Set to true to update the pointer in the dest Reduce list to a
1132  // newly created element.
1133  bool UpdateDestListPtr = false;
1134  // Increment the src or dest pointer to the scratchpad, for each
1135  // new element.
1136  bool IncrScratchpadSrc = false;
1137  bool IncrScratchpadDest = false;
1138 
1139  switch (Action) {
1140  case RemoteLaneToThread: {
1141  // Step 1.1: Get the address for the src element in the Reduce list.
1142  Address SrcElementPtrAddr =
1143  Bld.CreateConstArrayGEP(SrcBase, Idx, CGF.getPointerSize());
1144  llvm::Value *SrcElementPtrPtr = CGF.EmitLoadOfScalar(
1145  SrcElementPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
1146  SrcElementAddr =
1147  Address(SrcElementPtrPtr, C.getTypeAlignInChars(Private->getType()));
1148 
1149  // Step 1.2: Create a temporary to store the element in the destination
1150  // Reduce list.
1151  DestElementPtrAddr =
1152  Bld.CreateConstArrayGEP(DestBase, Idx, CGF.getPointerSize());
1153  DestElementAddr =
1154  CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
1155  ShuffleInElement = true;
1156  UpdateDestListPtr = true;
1157  break;
1158  }
1159  case ThreadCopy: {
1160  // Step 1.1: Get the address for the src element in the Reduce list.
1161  Address SrcElementPtrAddr =
1162  Bld.CreateConstArrayGEP(SrcBase, Idx, CGF.getPointerSize());
1163  llvm::Value *SrcElementPtrPtr = CGF.EmitLoadOfScalar(
1164  SrcElementPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
1165  SrcElementAddr =
1166  Address(SrcElementPtrPtr, C.getTypeAlignInChars(Private->getType()));
1167 
1168  // Step 1.2: Get the address for dest element. The destination
1169  // element has already been created on the thread's stack.
1170  DestElementPtrAddr =
1171  Bld.CreateConstArrayGEP(DestBase, Idx, CGF.getPointerSize());
1172  llvm::Value *DestElementPtr =
1173  CGF.EmitLoadOfScalar(DestElementPtrAddr, /*Volatile=*/false,
1174  C.VoidPtrTy, SourceLocation());
1175  Address DestElemAddr =
1176  Address(DestElementPtr, C.getTypeAlignInChars(Private->getType()));
1177  DestElementAddr = Bld.CreateElementBitCast(
1178  DestElemAddr, CGF.ConvertTypeForMem(Private->getType()));
1179  break;
1180  }
1181  case ThreadToScratchpad: {
1182  // Step 1.1: Get the address for the src element in the Reduce list.
1183  Address SrcElementPtrAddr =
1184  Bld.CreateConstArrayGEP(SrcBase, Idx, CGF.getPointerSize());
1185  llvm::Value *SrcElementPtrPtr = CGF.EmitLoadOfScalar(
1186  SrcElementPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
1187  SrcElementAddr =
1188  Address(SrcElementPtrPtr, C.getTypeAlignInChars(Private->getType()));
1189 
1190  // Step 1.2: Get the address for dest element:
1191  // address = base + index * ElementSizeInChars.
1192  unsigned ElementSizeInChars =
1193  C.getTypeSizeInChars(Private->getType()).getQuantity();
1194  auto *CurrentOffset =
1195  Bld.CreateMul(llvm::ConstantInt::get(CGM.SizeTy, ElementSizeInChars),
1196  ScratchpadIndex);
1197  auto *ScratchPadElemAbsolutePtrVal =
1198  Bld.CreateAdd(DestBase.getPointer(), CurrentOffset);
1199  ScratchPadElemAbsolutePtrVal =
1200  Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
1201  Address ScratchpadPtr =
1202  Address(ScratchPadElemAbsolutePtrVal,
1203  C.getTypeAlignInChars(Private->getType()));
1204  DestElementAddr = Bld.CreateElementBitCast(
1205  ScratchpadPtr, CGF.ConvertTypeForMem(Private->getType()));
1206  IncrScratchpadDest = true;
1207  break;
1208  }
1209  case ScratchpadToThread: {
1210  // Step 1.1: Get the address for the src element in the scratchpad.
1211  // address = base + index * ElementSizeInChars.
1212  unsigned ElementSizeInChars =
1213  C.getTypeSizeInChars(Private->getType()).getQuantity();
1214  auto *CurrentOffset =
1215  Bld.CreateMul(llvm::ConstantInt::get(CGM.SizeTy, ElementSizeInChars),
1216  ScratchpadIndex);
1217  auto *ScratchPadElemAbsolutePtrVal =
1218  Bld.CreateAdd(SrcBase.getPointer(), CurrentOffset);
1219  ScratchPadElemAbsolutePtrVal =
1220  Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
1221  SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal,
1222  C.getTypeAlignInChars(Private->getType()));
1223  IncrScratchpadSrc = true;
1224 
1225  // Step 1.2: Create a temporary to store the element in the destination
1226  // Reduce list.
1227  DestElementPtrAddr =
1228  Bld.CreateConstArrayGEP(DestBase, Idx, CGF.getPointerSize());
1229  DestElementAddr =
1230  CGF.CreateMemTemp(Private->getType(), ".omp.reduction.element");
1231  UpdateDestListPtr = true;
1232  break;
1233  }
1234  }
1235 
1236  // Regardless of src and dest of copy, we emit the load of src
1237  // element as this is required in all directions
1238  SrcElementAddr = Bld.CreateElementBitCast(
1239  SrcElementAddr, CGF.ConvertTypeForMem(Private->getType()));
1240  llvm::Value *Elem =
1241  CGF.EmitLoadOfScalar(SrcElementAddr, /*Volatile=*/false,
1242  Private->getType(), SourceLocation());
1243 
1244  // Now that all active lanes have read the element in the
1245  // Reduce list, shuffle over the value from the remote lane.
1246  if (ShuffleInElement) {
1247  Elem = createRuntimeShuffleFunction(CGF, Private->getType(), Elem,
1248  RemoteLaneOffset);
1249  }
1250 
1251  // Store the source element value to the dest element address.
1252  CGF.EmitStoreOfScalar(Elem, DestElementAddr, /*Volatile=*/false,
1253  Private->getType());
1254 
1255  // Step 3.1: Modify reference in dest Reduce list as needed.
1256  // Modifying the reference in Reduce list to point to the newly
1257  // created element. The element is live in the current function
1258  // scope and that of functions it invokes (i.e., reduce_function).
1259  // RemoteReduceData[i] = (void*)&RemoteElem
1260  if (UpdateDestListPtr) {
1261  CGF.EmitStoreOfScalar(Bld.CreatePointerBitCastOrAddrSpaceCast(
1262  DestElementAddr.getPointer(), CGF.VoidPtrTy),
1263  DestElementPtrAddr, /*Volatile=*/false,
1264  C.VoidPtrTy);
1265  }
1266 
1267  // Step 4.1: Increment SrcBase/DestBase so that it points to the starting
1268  // address of the next element in scratchpad memory, unless we're currently
1269  // processing the last one. Memory alignment is also taken care of here.
1270  if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) {
1271  llvm::Value *ScratchpadBasePtr =
1272  IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer();
1273  unsigned ElementSizeInChars =
1274  C.getTypeSizeInChars(Private->getType()).getQuantity();
1275  ScratchpadBasePtr = Bld.CreateAdd(
1276  ScratchpadBasePtr,
1277  Bld.CreateMul(ScratchpadWidth, llvm::ConstantInt::get(
1278  CGM.SizeTy, ElementSizeInChars)));
1279 
1280  // Take care of global memory alignment for performance
1281  ScratchpadBasePtr = Bld.CreateSub(ScratchpadBasePtr,
1282  llvm::ConstantInt::get(CGM.SizeTy, 1));
1283  ScratchpadBasePtr = Bld.CreateSDiv(
1284  ScratchpadBasePtr,
1285  llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
1286  ScratchpadBasePtr = Bld.CreateAdd(ScratchpadBasePtr,
1287  llvm::ConstantInt::get(CGM.SizeTy, 1));
1288  ScratchpadBasePtr = Bld.CreateMul(
1289  ScratchpadBasePtr,
1290  llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
1291 
1292  if (IncrScratchpadDest)
1293  DestBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
1294  else /* IncrScratchpadSrc = true */
1295  SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
1296  }
1297 
1298  Idx++;
1299  }
1300 }
1301 
1302 /// This function emits a helper that loads data from the scratchpad array
1303 /// and (optionally) reduces it with the input operand.
1304 ///
1305 /// load_and_reduce(local, scratchpad, index, width, should_reduce)
1306 /// reduce_data remote;
1307 /// for elem in remote:
1308 /// remote.elem = Scratchpad[elem_id][index]
1309 /// if (should_reduce)
1310 /// local = local @ remote
1311 /// else
1312 /// local = remote
1313 static llvm::Value *
1315  ArrayRef<const Expr *> Privates,
1316  QualType ReductionArrayTy, llvm::Value *ReduceFn) {
1317  auto &C = CGM.getContext();
1318  auto Int32Ty = C.getIntTypeForBitwidth(32, /* Signed */ true);
1319 
1320  // Destination of the copy.
1321  ImplicitParamDecl ReduceListArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
1322  // Base address of the scratchpad array, with each element storing a
1323  // Reduce list per team.
1324  ImplicitParamDecl ScratchPadArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
1325  // A source index into the scratchpad array.
1326  ImplicitParamDecl IndexArg(C, Int32Ty, ImplicitParamDecl::Other);
1327  // Row width of an element in the scratchpad array, typically
1328  // the number of teams.
1329  ImplicitParamDecl WidthArg(C, Int32Ty, ImplicitParamDecl::Other);
1330  // If should_reduce == 1, then it's load AND reduce,
1331  // If should_reduce == 0 (or otherwise), then it only loads (+ copy).
1332  // The latter case is used for initialization.
1333  ImplicitParamDecl ShouldReduceArg(C, Int32Ty, ImplicitParamDecl::Other);
1334 
1335  FunctionArgList Args;
1336  Args.push_back(&ReduceListArg);
1337  Args.push_back(&ScratchPadArg);
1338  Args.push_back(&IndexArg);
1339  Args.push_back(&WidthArg);
1340  Args.push_back(&ShouldReduceArg);
1341 
1342  auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
1343  auto *Fn = llvm::Function::Create(
1345  "_omp_reduction_load_and_reduce", &CGM.getModule());
1346  CGM.SetInternalFunctionAttributes(/*DC=*/nullptr, Fn, CGFI);
1347  CodeGenFunction CGF(CGM);
1348  // We don't need debug information in this function as nothing here refers to
1349  // user code.
1350  CGF.disableDebugInfo();
1351  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
1352 
1353  auto &Bld = CGF.Builder;
1354 
1355  // Get local Reduce list pointer.
1356  Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
1357  Address ReduceListAddr(
1358  Bld.CreatePointerBitCastOrAddrSpaceCast(
1359  CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
1360  C.VoidPtrTy, SourceLocation()),
1361  CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
1362  CGF.getPointerAlign());
1363 
1364  Address AddrScratchPadArg = CGF.GetAddrOfLocalVar(&ScratchPadArg);
1365  llvm::Value *ScratchPadBase = CGF.EmitLoadOfScalar(
1366  AddrScratchPadArg, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
1367 
1368  Address AddrIndexArg = CGF.GetAddrOfLocalVar(&IndexArg);
1369  llvm::Value *IndexVal =
1370  Bld.CreateIntCast(CGF.EmitLoadOfScalar(AddrIndexArg, /*Volatile=*/false,
1371  Int32Ty, SourceLocation()),
1372  CGM.SizeTy, /*isSigned=*/true);
1373 
1374  Address AddrWidthArg = CGF.GetAddrOfLocalVar(&WidthArg);
1375  llvm::Value *WidthVal =
1376  Bld.CreateIntCast(CGF.EmitLoadOfScalar(AddrWidthArg, /*Volatile=*/false,
1377  Int32Ty, SourceLocation()),
1378  CGM.SizeTy, /*isSigned=*/true);
1379 
1380  Address AddrShouldReduceArg = CGF.GetAddrOfLocalVar(&ShouldReduceArg);
1381  llvm::Value *ShouldReduceVal = CGF.EmitLoadOfScalar(
1382  AddrShouldReduceArg, /*Volatile=*/false, Int32Ty, SourceLocation());
1383 
1384  // The absolute ptr address to the base addr of the next element to copy.
1385  llvm::Value *CumulativeElemBasePtr =
1386  Bld.CreatePtrToInt(ScratchPadBase, CGM.SizeTy);
1387  Address SrcDataAddr(CumulativeElemBasePtr, CGF.getPointerAlign());
1388 
1389  // Create a Remote Reduce list to store the elements read from the
1390  // scratchpad array.
1391  Address RemoteReduceList =
1392  CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_red_list");
1393 
1394  // Assemble remote Reduce list from scratchpad array.
1395  emitReductionListCopy(ScratchpadToThread, CGF, ReductionArrayTy, Privates,
1396  SrcDataAddr, RemoteReduceList,
1397  {/*RemoteLaneOffset=*/nullptr,
1398  /*ScratchpadIndex=*/IndexVal,
1399  /*ScratchpadWidth=*/WidthVal});
1400 
1401  llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
1402  llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
1403  llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
1404 
1405  auto CondReduce = Bld.CreateICmpEQ(ShouldReduceVal, Bld.getInt32(1));
1406  Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
1407 
1408  CGF.EmitBlock(ThenBB);
1409  // We should reduce with the local Reduce list.
1410  // reduce_function(LocalReduceList, RemoteReduceList)
1411  llvm::Value *LocalDataPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1412  ReduceListAddr.getPointer(), CGF.VoidPtrTy);
1413  llvm::Value *RemoteDataPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1414  RemoteReduceList.getPointer(), CGF.VoidPtrTy);
1415  CGF.EmitCallOrInvoke(ReduceFn, {LocalDataPtr, RemoteDataPtr});
1416  Bld.CreateBr(MergeBB);
1417 
1418  CGF.EmitBlock(ElseBB);
1419  // No reduction; just copy:
1420  // Local Reduce list = Remote Reduce list.
1421  emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
1422  RemoteReduceList, ReduceListAddr);
1423  Bld.CreateBr(MergeBB);
1424 
1425  CGF.EmitBlock(MergeBB);
1426 
1427  CGF.FinishFunction();
1428  return Fn;
1429 }
1430 
1431 /// This function emits a helper that stores reduced data from the team
1432 /// master to a scratchpad array in global memory.
1433 ///
1434 /// for elem in Reduce List:
1435 /// scratchpad[elem_id][index] = elem
1436 ///
1438  ArrayRef<const Expr *> Privates,
1439  QualType ReductionArrayTy) {
1440 
1441  auto &C = CGM.getContext();
1442  auto Int32Ty = C.getIntTypeForBitwidth(32, /* Signed */ true);
1443 
1444  // Source of the copy.
1445  ImplicitParamDecl ReduceListArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
1446  // Base address of the scratchpad array, with each element storing a
1447  // Reduce list per team.
1448  ImplicitParamDecl ScratchPadArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
1449  // A destination index into the scratchpad array, typically the team
1450  // identifier.
1451  ImplicitParamDecl IndexArg(C, Int32Ty, ImplicitParamDecl::Other);
1452  // Row width of an element in the scratchpad array, typically
1453  // the number of teams.
1454  ImplicitParamDecl WidthArg(C, Int32Ty, ImplicitParamDecl::Other);
1455 
1456  FunctionArgList Args;
1457  Args.push_back(&ReduceListArg);
1458  Args.push_back(&ScratchPadArg);
1459  Args.push_back(&IndexArg);
1460  Args.push_back(&WidthArg);
1461 
1462  auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
1463  auto *Fn = llvm::Function::Create(
1465  "_omp_reduction_copy_to_scratchpad", &CGM.getModule());
1466  CGM.SetInternalFunctionAttributes(/*DC=*/nullptr, Fn, CGFI);
1467  CodeGenFunction CGF(CGM);
1468  // We don't need debug information in this function as nothing here refers to
1469  // user code.
1470  CGF.disableDebugInfo();
1471  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
1472 
1473  auto &Bld = CGF.Builder;
1474 
1475  Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
1476  Address SrcDataAddr(
1477  Bld.CreatePointerBitCastOrAddrSpaceCast(
1478  CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
1479  C.VoidPtrTy, SourceLocation()),
1480  CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
1481  CGF.getPointerAlign());
1482 
1483  Address AddrScratchPadArg = CGF.GetAddrOfLocalVar(&ScratchPadArg);
1484  llvm::Value *ScratchPadBase = CGF.EmitLoadOfScalar(
1485  AddrScratchPadArg, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
1486 
1487  Address AddrIndexArg = CGF.GetAddrOfLocalVar(&IndexArg);
1488  llvm::Value *IndexVal =
1489  Bld.CreateIntCast(CGF.EmitLoadOfScalar(AddrIndexArg, /*Volatile=*/false,
1490  Int32Ty, SourceLocation()),
1491  CGF.SizeTy, /*isSigned=*/true);
1492 
1493  Address AddrWidthArg = CGF.GetAddrOfLocalVar(&WidthArg);
1494  llvm::Value *WidthVal =
1495  Bld.CreateIntCast(CGF.EmitLoadOfScalar(AddrWidthArg, /*Volatile=*/false,
1496  Int32Ty, SourceLocation()),
1497  CGF.SizeTy, /*isSigned=*/true);
1498 
1499  // The absolute ptr address to the base addr of the next element to copy.
1500  llvm::Value *CumulativeElemBasePtr =
1501  Bld.CreatePtrToInt(ScratchPadBase, CGM.SizeTy);
1502  Address DestDataAddr(CumulativeElemBasePtr, CGF.getPointerAlign());
1503 
1504  emitReductionListCopy(ThreadToScratchpad, CGF, ReductionArrayTy, Privates,
1505  SrcDataAddr, DestDataAddr,
1506  {/*RemoteLaneOffset=*/nullptr,
1507  /*ScratchpadIndex=*/IndexVal,
1508  /*ScratchpadWidth=*/WidthVal});
1509 
1510  CGF.FinishFunction();
1511  return Fn;
1512 }
1513 
1514 /// This function emits a helper that gathers Reduce lists from the first
1515 /// lane of every active warp to lanes in the first warp.
1516 ///
1517 /// void inter_warp_copy_func(void* reduce_data, num_warps)
1518 /// shared smem[warp_size];
1519 /// For all data entries D in reduce_data:
1520 /// If (I am the first lane in each warp)
1521 /// Copy my local D to smem[warp_id]
1522 /// sync
1523 /// if (I am the first warp)
1524 /// Copy smem[thread_id] to my local D
1525 /// sync
1527  ArrayRef<const Expr *> Privates,
1528  QualType ReductionArrayTy) {
1529  auto &C = CGM.getContext();
1530  auto &M = CGM.getModule();
1531 
1532  // ReduceList: thread local Reduce list.
1533  // At the stage of the computation when this function is called, partially
1534  // aggregated values reside in the first lane of every active warp.
1535  ImplicitParamDecl ReduceListArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
1536  // NumWarps: number of warps active in the parallel region. This could
1537  // be smaller than 32 (max warps in a CTA) for partial block reduction.
1538  ImplicitParamDecl NumWarpsArg(C,
1539  C.getIntTypeForBitwidth(32, /* Signed */ true),
1541  FunctionArgList Args;
1542  Args.push_back(&ReduceListArg);
1543  Args.push_back(&NumWarpsArg);
1544 
1545  auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
1546  auto *Fn = llvm::Function::Create(
1548  "_omp_reduction_inter_warp_copy_func", &CGM.getModule());
1549  CGM.SetInternalFunctionAttributes(/*DC=*/nullptr, Fn, CGFI);
1550  CodeGenFunction CGF(CGM);
1551  // We don't need debug information in this function as nothing here refers to
1552  // user code.
1553  CGF.disableDebugInfo();
1554  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
1555 
1556  auto &Bld = CGF.Builder;
1557 
1558  // This array is used as a medium to transfer, one reduce element at a time,
1559  // the data from the first lane of every warp to lanes in the first warp
1560  // in order to perform the final step of a reduction in a parallel region
1561  // (reduction across warps). The array is placed in NVPTX __shared__ memory
1562  // for reduced latency, as well as to have a distinct copy for concurrently
1563  // executing target regions. The array is declared with common linkage so
1564  // as to be shared across compilation units.
1565  const char *TransferMediumName =
1566  "__openmp_nvptx_data_transfer_temporary_storage";
1567  llvm::GlobalVariable *TransferMedium =
1568  M.getGlobalVariable(TransferMediumName);
1569  if (!TransferMedium) {
1570  auto *Ty = llvm::ArrayType::get(CGM.Int64Ty, WarpSize);
1571  unsigned SharedAddressSpace = C.getTargetAddressSpace(LangAS::cuda_shared);
1572  TransferMedium = new llvm::GlobalVariable(
1573  M, Ty,
1574  /*isConstant=*/false, llvm::GlobalVariable::CommonLinkage,
1575  llvm::Constant::getNullValue(Ty), TransferMediumName,
1576  /*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal,
1577  SharedAddressSpace);
1578  }
1579 
1580  // Get the CUDA thread id of the current OpenMP thread on the GPU.
1581  auto *ThreadID = getNVPTXThreadID(CGF);
1582  // nvptx_lane_id = nvptx_id % warpsize
1583  auto *LaneID = getNVPTXLaneID(CGF);
1584  // nvptx_warp_id = nvptx_id / warpsize
1585  auto *WarpID = getNVPTXWarpID(CGF);
1586 
1587  Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
1588  Address LocalReduceList(
1589  Bld.CreatePointerBitCastOrAddrSpaceCast(
1590  CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
1591  C.VoidPtrTy, SourceLocation()),
1592  CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
1593  CGF.getPointerAlign());
1594 
1595  unsigned Idx = 0;
1596  for (auto &Private : Privates) {
1597  //
1598  // Warp master copies reduce element to transfer medium in __shared__
1599  // memory.
1600  //
1601  llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
1602  llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
1603  llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
1604 
1605  // if (lane_id == 0)
1606  auto IsWarpMaster =
1607  Bld.CreateICmpEQ(LaneID, Bld.getInt32(0), "warp_master");
1608  Bld.CreateCondBr(IsWarpMaster, ThenBB, ElseBB);
1609  CGF.EmitBlock(ThenBB);
1610 
1611  // Reduce element = LocalReduceList[i]
1612  Address ElemPtrPtrAddr =
1613  Bld.CreateConstArrayGEP(LocalReduceList, Idx, CGF.getPointerSize());
1614  llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
1615  ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
1616  // elemptr = (type[i]*)(elemptrptr)
1617  Address ElemPtr =
1618  Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
1619  ElemPtr = Bld.CreateElementBitCast(
1620  ElemPtr, CGF.ConvertTypeForMem(Private->getType()));
1621  // elem = *elemptr
1622  llvm::Value *Elem = CGF.EmitLoadOfScalar(
1623  ElemPtr, /*Volatile=*/false, Private->getType(), SourceLocation());
1624 
1625  // Get pointer to location in transfer medium.
1626  // MediumPtr = &medium[warp_id]
1627  llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
1628  TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
1629  Address MediumPtr(MediumPtrVal, C.getTypeAlignInChars(Private->getType()));
1630  // Casting to actual data type.
1631  // MediumPtr = (type[i]*)MediumPtrAddr;
1632  MediumPtr = Bld.CreateElementBitCast(
1633  MediumPtr, CGF.ConvertTypeForMem(Private->getType()));
1634 
1635  //*MediumPtr = elem
1636  Bld.CreateStore(Elem, MediumPtr);
1637 
1638  Bld.CreateBr(MergeBB);
1639 
1640  CGF.EmitBlock(ElseBB);
1641  Bld.CreateBr(MergeBB);
1642 
1643  CGF.EmitBlock(MergeBB);
1644 
1645  Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg);
1646  llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar(
1647  AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, SourceLocation());
1648 
1649  auto *NumActiveThreads = Bld.CreateNSWMul(
1650  NumWarpsVal, getNVPTXWarpSize(CGF), "num_active_threads");
1651  // named_barrier_sync(ParallelBarrierID, num_active_threads)
1652  syncParallelThreads(CGF, NumActiveThreads);
1653 
1654  //
1655  // Warp 0 copies reduce element from transfer medium.
1656  //
1657  llvm::BasicBlock *W0ThenBB = CGF.createBasicBlock("then");
1658  llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else");
1659  llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont");
1660 
1661  // Up to 32 threads in warp 0 are active.
1662  auto IsActiveThread =
1663  Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread");
1664  Bld.CreateCondBr(IsActiveThread, W0ThenBB, W0ElseBB);
1665 
1666  CGF.EmitBlock(W0ThenBB);
1667 
1668  // SrcMediumPtr = &medium[tid]
1669  llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
1670  TransferMedium, {llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
1671  Address SrcMediumPtr(SrcMediumPtrVal,
1672  C.getTypeAlignInChars(Private->getType()));
1673  // SrcMediumVal = *SrcMediumPtr;
1674  SrcMediumPtr = Bld.CreateElementBitCast(
1675  SrcMediumPtr, CGF.ConvertTypeForMem(Private->getType()));
1676  llvm::Value *SrcMediumValue = CGF.EmitLoadOfScalar(
1677  SrcMediumPtr, /*Volatile=*/false, Private->getType(), SourceLocation());
1678 
1679  // TargetElemPtr = (type[i]*)(SrcDataAddr[i])
1680  Address TargetElemPtrPtr =
1681  Bld.CreateConstArrayGEP(LocalReduceList, Idx, CGF.getPointerSize());
1682  llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
1683  TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
1684  Address TargetElemPtr =
1685  Address(TargetElemPtrVal, C.getTypeAlignInChars(Private->getType()));
1686  TargetElemPtr = Bld.CreateElementBitCast(
1687  TargetElemPtr, CGF.ConvertTypeForMem(Private->getType()));
1688 
1689  // *TargetElemPtr = SrcMediumVal;
1690  CGF.EmitStoreOfScalar(SrcMediumValue, TargetElemPtr, /*Volatile=*/false,
1691  Private->getType());
1692  Bld.CreateBr(W0MergeBB);
1693 
1694  CGF.EmitBlock(W0ElseBB);
1695  Bld.CreateBr(W0MergeBB);
1696 
1697  CGF.EmitBlock(W0MergeBB);
1698 
1699  // While warp 0 copies values from transfer medium, all other warps must
1700  // wait.
1701  syncParallelThreads(CGF, NumActiveThreads);
1702  Idx++;
1703  }
1704 
1705  CGF.FinishFunction();
1706  return Fn;
1707 }
1708 
1709 /// Emit a helper that reduces data across two OpenMP threads (lanes)
1710 /// in the same warp. It uses shuffle instructions to copy over data from
1711 /// a remote lane's stack. The reduction algorithm performed is specified
1712 /// by the fourth parameter.
1713 ///
1714 /// Algorithm Versions.
1715 /// Full Warp Reduce (argument value 0):
1716 /// This algorithm assumes that all 32 lanes are active and gathers
1717 /// data from these 32 lanes, producing a single resultant value.
1718 /// Contiguous Partial Warp Reduce (argument value 1):
1719 /// This algorithm assumes that only a *contiguous* subset of lanes
1720 /// are active. This happens for the last warp in a parallel region
1721 /// when the user specified num_threads is not an integer multiple of
1722 /// 32. This contiguous subset always starts with the zeroth lane.
1723 /// Partial Warp Reduce (argument value 2):
1724 /// This algorithm gathers data from any number of lanes at any position.
1725 /// All reduced values are stored in the lowest possible lane. The set
1726 /// of problems every algorithm addresses is a super set of those
1727 /// addressable by algorithms with a lower version number. Overhead
1728 /// increases as algorithm version increases.
1729 ///
1730 /// Terminology
1731 /// Reduce element:
1732 /// Reduce element refers to the individual data field with primitive
1733 /// data types to be combined and reduced across threads.
1734 /// Reduce list:
1735 /// Reduce list refers to a collection of local, thread-private
1736 /// reduce elements.
1737 /// Remote Reduce list:
1738 /// Remote Reduce list refers to a collection of remote (relative to
1739 /// the current thread) reduce elements.
1740 ///
1741 /// We distinguish between three states of threads that are important to
1742 /// the implementation of this function.
1743 /// Alive threads:
1744 /// Threads in a warp executing the SIMT instruction, as distinguished from
1745 /// threads that are inactive due to divergent control flow.
1746 /// Active threads:
1747 /// The minimal set of threads that has to be alive upon entry to this
1748 /// function. The computation is correct iff active threads are alive.
1749 /// Some threads are alive but they are not active because they do not
1750 /// contribute to the computation in any useful manner. Turning them off
1751 /// may introduce control flow overheads without any tangible benefits.
1752 /// Effective threads:
1753 /// In order to comply with the argument requirements of the shuffle
1754 /// function, we must keep all lanes holding data alive. But at most
1755 /// half of them perform value aggregation; we refer to this half of
1756 /// threads as effective. The other half is simply handing off their
1757 /// data.
1758 ///
1759 /// Procedure
1760 /// Value shuffle:
1761 /// In this step active threads transfer data from higher lane positions
1762 /// in the warp to lower lane positions, creating Remote Reduce list.
1763 /// Value aggregation:
1764 /// In this step, effective threads combine their thread local Reduce list
1765 /// with Remote Reduce list and store the result in the thread local
1766 /// Reduce list.
1767 /// Value copy:
1768 /// In this step, we deal with the assumption made by algorithm 2
1769 /// (i.e. contiguity assumption). When we have an odd number of lanes
1770 /// active, say 2k+1, only k threads will be effective and therefore k
1771 /// new values will be produced. However, the Reduce list owned by the
1772 /// (2k+1)th thread is ignored in the value aggregation. Therefore
1773 /// we copy the Reduce list from the (2k+1)th lane to (k+1)th lane so
1774 /// that the contiguity assumption still holds.
1775 static llvm::Value *
1777  ArrayRef<const Expr *> Privates,
1778  QualType ReductionArrayTy, llvm::Value *ReduceFn) {
1779  auto &C = CGM.getContext();
1780 
1781  // Thread local Reduce list used to host the values of data to be reduced.
1782  ImplicitParamDecl ReduceListArg(C, C.VoidPtrTy, ImplicitParamDecl::Other);
1783  // Current lane id; could be logical.
1784  ImplicitParamDecl LaneIDArg(C, C.ShortTy, ImplicitParamDecl::Other);
1785  // Offset of the remote source lane relative to the current lane.
1786  ImplicitParamDecl RemoteLaneOffsetArg(C, C.ShortTy,
1788  // Algorithm version. This is expected to be known at compile time.
1789  ImplicitParamDecl AlgoVerArg(C, C.ShortTy, ImplicitParamDecl::Other);
1790  FunctionArgList Args;
1791  Args.push_back(&ReduceListArg);
1792  Args.push_back(&LaneIDArg);
1793  Args.push_back(&RemoteLaneOffsetArg);
1794  Args.push_back(&AlgoVerArg);
1795 
1796  auto &CGFI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
1797  auto *Fn = llvm::Function::Create(
1799  "_omp_reduction_shuffle_and_reduce_func", &CGM.getModule());
1800  CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, CGFI);
1801  CodeGenFunction CGF(CGM);
1802  // We don't need debug information in this function as nothing here refers to
1803  // user code.
1804  CGF.disableDebugInfo();
1805  CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, CGFI, Args);
1806 
1807  auto &Bld = CGF.Builder;
1808 
1809  Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
1810  Address LocalReduceList(
1811  Bld.CreatePointerBitCastOrAddrSpaceCast(
1812  CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
1813  C.VoidPtrTy, SourceLocation()),
1814  CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
1815  CGF.getPointerAlign());
1816 
1817  Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg);
1818  llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar(
1819  AddrLaneIDArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
1820 
1821  Address AddrRemoteLaneOffsetArg = CGF.GetAddrOfLocalVar(&RemoteLaneOffsetArg);
1822  llvm::Value *RemoteLaneOffsetArgVal = CGF.EmitLoadOfScalar(
1823  AddrRemoteLaneOffsetArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
1824 
1825  Address AddrAlgoVerArg = CGF.GetAddrOfLocalVar(&AlgoVerArg);
1826  llvm::Value *AlgoVerArgVal = CGF.EmitLoadOfScalar(
1827  AddrAlgoVerArg, /*Volatile=*/false, C.ShortTy, SourceLocation());
1828 
1829  // Create a local thread-private variable to host the Reduce list
1830  // from a remote lane.
1831  Address RemoteReduceList =
1832  CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.remote_reduce_list");
1833 
1834  // This loop iterates through the list of reduce elements and copies,
1835  // element by element, from a remote lane in the warp to RemoteReduceList,
1836  // hosted on the thread's stack.
1837  emitReductionListCopy(RemoteLaneToThread, CGF, ReductionArrayTy, Privates,
1838  LocalReduceList, RemoteReduceList,
1839  {/*RemoteLaneOffset=*/RemoteLaneOffsetArgVal,
1840  /*ScratchpadIndex=*/nullptr,
1841  /*ScratchpadWidth=*/nullptr});
1842 
1843  // The actions to be performed on the Remote Reduce list is dependent
1844  // on the algorithm version.
1845  //
1846  // if (AlgoVer==0) || (AlgoVer==1 && (LaneId < Offset)) || (AlgoVer==2 &&
1847  // LaneId % 2 == 0 && Offset > 0):
1848  // do the reduction value aggregation
1849  //
1850  // The thread local variable Reduce list is mutated in place to host the
1851  // reduced data, which is the aggregated value produced from local and
1852  // remote lanes.
1853  //
1854  // Note that AlgoVer is expected to be a constant integer known at compile
1855  // time.
1856  // When AlgoVer==0, the first conjunction evaluates to true, making
1857  // the entire predicate true during compile time.
1858  // When AlgoVer==1, the second conjunction has only the second part to be
1859  // evaluated during runtime. Other conjunctions evaluates to false
1860  // during compile time.
1861  // When AlgoVer==2, the third conjunction has only the second part to be
1862  // evaluated during runtime. Other conjunctions evaluates to false
1863  // during compile time.
1864  auto CondAlgo0 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(0));
1865 
1866  auto Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
1867  auto CondAlgo1 = Bld.CreateAnd(
1868  Algo1, Bld.CreateICmpULT(LaneIDArgVal, RemoteLaneOffsetArgVal));
1869 
1870  auto Algo2 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(2));
1871  auto CondAlgo2 = Bld.CreateAnd(
1872  Algo2,
1873  Bld.CreateICmpEQ(Bld.CreateAnd(LaneIDArgVal, Bld.getInt16(1)),
1874  Bld.getInt16(0)));
1875  CondAlgo2 = Bld.CreateAnd(
1876  CondAlgo2, Bld.CreateICmpSGT(RemoteLaneOffsetArgVal, Bld.getInt16(0)));
1877 
1878  auto CondReduce = Bld.CreateOr(CondAlgo0, CondAlgo1);
1879  CondReduce = Bld.CreateOr(CondReduce, CondAlgo2);
1880 
1881  llvm::BasicBlock *ThenBB = CGF.createBasicBlock("then");
1882  llvm::BasicBlock *ElseBB = CGF.createBasicBlock("else");
1883  llvm::BasicBlock *MergeBB = CGF.createBasicBlock("ifcont");
1884  Bld.CreateCondBr(CondReduce, ThenBB, ElseBB);
1885 
1886  CGF.EmitBlock(ThenBB);
1887  // reduce_function(LocalReduceList, RemoteReduceList)
1888  llvm::Value *LocalReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1889  LocalReduceList.getPointer(), CGF.VoidPtrTy);
1890  llvm::Value *RemoteReduceListPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
1891  RemoteReduceList.getPointer(), CGF.VoidPtrTy);
1892  CGF.EmitCallOrInvoke(ReduceFn, {LocalReduceListPtr, RemoteReduceListPtr});
1893  Bld.CreateBr(MergeBB);
1894 
1895  CGF.EmitBlock(ElseBB);
1896  Bld.CreateBr(MergeBB);
1897 
1898  CGF.EmitBlock(MergeBB);
1899 
1900  // if (AlgoVer==1 && (LaneId >= Offset)) copy Remote Reduce list to local
1901  // Reduce list.
1902  Algo1 = Bld.CreateICmpEQ(AlgoVerArgVal, Bld.getInt16(1));
1903  auto CondCopy = Bld.CreateAnd(
1904  Algo1, Bld.CreateICmpUGE(LaneIDArgVal, RemoteLaneOffsetArgVal));
1905 
1906  llvm::BasicBlock *CpyThenBB = CGF.createBasicBlock("then");
1907  llvm::BasicBlock *CpyElseBB = CGF.createBasicBlock("else");
1908  llvm::BasicBlock *CpyMergeBB = CGF.createBasicBlock("ifcont");
1909  Bld.CreateCondBr(CondCopy, CpyThenBB, CpyElseBB);
1910 
1911  CGF.EmitBlock(CpyThenBB);
1912  emitReductionListCopy(ThreadCopy, CGF, ReductionArrayTy, Privates,
1913  RemoteReduceList, LocalReduceList);
1914  Bld.CreateBr(CpyMergeBB);
1915 
1916  CGF.EmitBlock(CpyElseBB);
1917  Bld.CreateBr(CpyMergeBB);
1918 
1919  CGF.EmitBlock(CpyMergeBB);
1920 
1921  CGF.FinishFunction();
1922  return Fn;
1923 }
1924 
1925 ///
1926 /// Design of OpenMP reductions on the GPU
1927 ///
1928 /// Consider a typical OpenMP program with one or more reduction
1929 /// clauses:
1930 ///
1931 /// float foo;
1932 /// double bar;
1933 /// #pragma omp target teams distribute parallel for \
1934 /// reduction(+:foo) reduction(*:bar)
1935 /// for (int i = 0; i < N; i++) {
1936 /// foo += A[i]; bar *= B[i];
1937 /// }
1938 ///
1939 /// where 'foo' and 'bar' are reduced across all OpenMP threads in
1940 /// all teams. In our OpenMP implementation on the NVPTX device an
1941 /// OpenMP team is mapped to a CUDA threadblock and OpenMP threads
1942 /// within a team are mapped to CUDA threads within a threadblock.
1943 /// Our goal is to efficiently aggregate values across all OpenMP
1944 /// threads such that:
1945 ///
1946 /// - the compiler and runtime are logically concise, and
1947 /// - the reduction is performed efficiently in a hierarchical
1948 /// manner as follows: within OpenMP threads in the same warp,
1949 /// across warps in a threadblock, and finally across teams on
1950 /// the NVPTX device.
1951 ///
1952 /// Introduction to Decoupling
1953 ///
1954 /// We would like to decouple the compiler and the runtime so that the
1955 /// latter is ignorant of the reduction variables (number, data types)
1956 /// and the reduction operators. This allows a simpler interface
1957 /// and implementation while still attaining good performance.
1958 ///
1959 /// Pseudocode for the aforementioned OpenMP program generated by the
1960 /// compiler is as follows:
1961 ///
1962 /// 1. Create private copies of reduction variables on each OpenMP
1963 /// thread: 'foo_private', 'bar_private'
1964 /// 2. Each OpenMP thread reduces the chunk of 'A' and 'B' assigned
1965 /// to it and writes the result in 'foo_private' and 'bar_private'
1966 /// respectively.
1967 /// 3. Call the OpenMP runtime on the GPU to reduce within a team
1968 /// and store the result on the team master:
1969 ///
1970 /// __kmpc_nvptx_parallel_reduce_nowait(...,
1971 /// reduceData, shuffleReduceFn, interWarpCpyFn)
1972 ///
1973 /// where:
1974 /// struct ReduceData {
1975 /// double *foo;
1976 /// double *bar;
1977 /// } reduceData
1978 /// reduceData.foo = &foo_private
1979 /// reduceData.bar = &bar_private
1980 ///
1981 /// 'shuffleReduceFn' and 'interWarpCpyFn' are pointers to two
1982 /// auxiliary functions generated by the compiler that operate on
1983 /// variables of type 'ReduceData'. They aid the runtime perform
1984 /// algorithmic steps in a data agnostic manner.
1985 ///
1986 /// 'shuffleReduceFn' is a pointer to a function that reduces data
1987 /// of type 'ReduceData' across two OpenMP threads (lanes) in the
1988 /// same warp. It takes the following arguments as input:
1989 ///
1990 /// a. variable of type 'ReduceData' on the calling lane,
1991 /// b. its lane_id,
1992 /// c. an offset relative to the current lane_id to generate a
1993 /// remote_lane_id. The remote lane contains the second
1994 /// variable of type 'ReduceData' that is to be reduced.
1995 /// d. an algorithm version parameter determining which reduction
1996 /// algorithm to use.
1997 ///
1998 /// 'shuffleReduceFn' retrieves data from the remote lane using
1999 /// efficient GPU shuffle intrinsics and reduces, using the
2000 /// algorithm specified by the 4th parameter, the two operands
2001 /// element-wise. The result is written to the first operand.
2002 ///
2003 /// Different reduction algorithms are implemented in different
2004 /// runtime functions, all calling 'shuffleReduceFn' to perform
2005 /// the essential reduction step. Therefore, based on the 4th
2006 /// parameter, this function behaves slightly differently to
2007 /// cooperate with the runtime to ensure correctness under
2008 /// different circumstances.
2009 ///
2010 /// 'InterWarpCpyFn' is a pointer to a function that transfers
2011 /// reduced variables across warps. It tunnels, through CUDA
2012 /// shared memory, the thread-private data of type 'ReduceData'
2013 /// from lane 0 of each warp to a lane in the first warp.
2014 /// 4. Call the OpenMP runtime on the GPU to reduce across teams.
2015 /// The last team writes the global reduced value to memory.
2016 ///
2017 /// ret = __kmpc_nvptx_teams_reduce_nowait(...,
2018 /// reduceData, shuffleReduceFn, interWarpCpyFn,
2019 /// scratchpadCopyFn, loadAndReduceFn)
2020 ///
2021 /// 'scratchpadCopyFn' is a helper that stores reduced
2022 /// data from the team master to a scratchpad array in
2023 /// global memory.
2024 ///
2025 /// 'loadAndReduceFn' is a helper that loads data from
2026 /// the scratchpad array and reduces it with the input
2027 /// operand.
2028 ///
2029 /// These compiler generated functions hide address
2030 /// calculation and alignment information from the runtime.
2031 /// 5. if ret == 1:
2032 /// The team master of the last team stores the reduced
2033 /// result to the globals in memory.
2034 /// foo += reduceData.foo; bar *= reduceData.bar
2035 ///
2036 ///
2037 /// Warp Reduction Algorithms
2038 ///
2039 /// On the warp level, we have three algorithms implemented in the
2040 /// OpenMP runtime depending on the number of active lanes:
2041 ///
2042 /// Full Warp Reduction
2043 ///
2044 /// The reduce algorithm within a warp where all lanes are active
2045 /// is implemented in the runtime as follows:
2046 ///
2047 /// full_warp_reduce(void *reduce_data,
2048 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
2049 /// for (int offset = WARPSIZE/2; offset > 0; offset /= 2)
2050 /// ShuffleReduceFn(reduce_data, 0, offset, 0);
2051 /// }
2052 ///
2053 /// The algorithm completes in log(2, WARPSIZE) steps.
2054 ///
2055 /// 'ShuffleReduceFn' is used here with lane_id set to 0 because it is
2056 /// not used therefore we save instructions by not retrieving lane_id
2057 /// from the corresponding special registers. The 4th parameter, which
2058 /// represents the version of the algorithm being used, is set to 0 to
2059 /// signify full warp reduction.
2060 ///
2061 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
2062 ///
2063 /// #reduce_elem refers to an element in the local lane's data structure
2064 /// #remote_elem is retrieved from a remote lane
2065 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
2066 /// reduce_elem = reduce_elem REDUCE_OP remote_elem;
2067 ///
2068 /// Contiguous Partial Warp Reduction
2069 ///
2070 /// This reduce algorithm is used within a warp where only the first
2071 /// 'n' (n <= WARPSIZE) lanes are active. It is typically used when the
2072 /// number of OpenMP threads in a parallel region is not a multiple of
2073 /// WARPSIZE. The algorithm is implemented in the runtime as follows:
2074 ///
2075 /// void
2076 /// contiguous_partial_reduce(void *reduce_data,
2077 /// kmp_ShuffleReductFctPtr ShuffleReduceFn,
2078 /// int size, int lane_id) {
2079 /// int curr_size;
2080 /// int offset;
2081 /// curr_size = size;
2082 /// mask = curr_size/2;
2083 /// while (offset>0) {
2084 /// ShuffleReduceFn(reduce_data, lane_id, offset, 1);
2085 /// curr_size = (curr_size+1)/2;
2086 /// offset = curr_size/2;
2087 /// }
2088 /// }
2089 ///
2090 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
2091 ///
2092 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
2093 /// if (lane_id < offset)
2094 /// reduce_elem = reduce_elem REDUCE_OP remote_elem
2095 /// else
2096 /// reduce_elem = remote_elem
2097 ///
2098 /// This algorithm assumes that the data to be reduced are located in a
2099 /// contiguous subset of lanes starting from the first. When there is
2100 /// an odd number of active lanes, the data in the last lane is not
2101 /// aggregated with any other lane's dat but is instead copied over.
2102 ///
2103 /// Dispersed Partial Warp Reduction
2104 ///
2105 /// This algorithm is used within a warp when any discontiguous subset of
2106 /// lanes are active. It is used to implement the reduction operation
2107 /// across lanes in an OpenMP simd region or in a nested parallel region.
2108 ///
2109 /// void
2110 /// dispersed_partial_reduce(void *reduce_data,
2111 /// kmp_ShuffleReductFctPtr ShuffleReduceFn) {
2112 /// int size, remote_id;
2113 /// int logical_lane_id = number_of_active_lanes_before_me() * 2;
2114 /// do {
2115 /// remote_id = next_active_lane_id_right_after_me();
2116 /// # the above function returns 0 of no active lane
2117 /// # is present right after the current lane.
2118 /// size = number_of_active_lanes_in_this_warp();
2119 /// logical_lane_id /= 2;
2120 /// ShuffleReduceFn(reduce_data, logical_lane_id,
2121 /// remote_id-1-threadIdx.x, 2);
2122 /// } while (logical_lane_id % 2 == 0 && size > 1);
2123 /// }
2124 ///
2125 /// There is no assumption made about the initial state of the reduction.
2126 /// Any number of lanes (>=1) could be active at any position. The reduction
2127 /// result is returned in the first active lane.
2128 ///
2129 /// In this version, 'ShuffleReduceFn' behaves, per element, as follows:
2130 ///
2131 /// remote_elem = shuffle_down(reduce_elem, offset, WARPSIZE);
2132 /// if (lane_id % 2 == 0 && offset > 0)
2133 /// reduce_elem = reduce_elem REDUCE_OP remote_elem
2134 /// else
2135 /// reduce_elem = remote_elem
2136 ///
2137 ///
2138 /// Intra-Team Reduction
2139 ///
2140 /// This function, as implemented in the runtime call
2141 /// '__kmpc_nvptx_parallel_reduce_nowait', aggregates data across OpenMP
2142 /// threads in a team. It first reduces within a warp using the
2143 /// aforementioned algorithms. We then proceed to gather all such
2144 /// reduced values at the first warp.
2145 ///
2146 /// The runtime makes use of the function 'InterWarpCpyFn', which copies
2147 /// data from each of the "warp master" (zeroth lane of each warp, where
2148 /// warp-reduced data is held) to the zeroth warp. This step reduces (in
2149 /// a mathematical sense) the problem of reduction across warp masters in
2150 /// a block to the problem of warp reduction.
2151 ///
2152 ///
2153 /// Inter-Team Reduction
2154 ///
2155 /// Once a team has reduced its data to a single value, it is stored in
2156 /// a global scratchpad array. Since each team has a distinct slot, this
2157 /// can be done without locking.
2158 ///
2159 /// The last team to write to the scratchpad array proceeds to reduce the
2160 /// scratchpad array. One or more workers in the last team use the helper
2161 /// 'loadAndReduceDataFn' to load and reduce values from the array, i.e.,
2162 /// the k'th worker reduces every k'th element.
2163 ///
2164 /// Finally, a call is made to '__kmpc_nvptx_parallel_reduce_nowait' to
2165 /// reduce across workers and compute a globally reduced value.
2166 ///
2170  ArrayRef<const Expr *> ReductionOps, ReductionOptionsTy Options) {
2171  if (!CGF.HaveInsertPoint())
2172  return;
2173 
2174  bool ParallelReduction = isOpenMPParallelDirective(Options.ReductionKind);
2175  bool TeamsReduction = isOpenMPTeamsDirective(Options.ReductionKind);
2176  // FIXME: Add support for simd reduction.
2177  assert((TeamsReduction || ParallelReduction) &&
2178  "Invalid reduction selection in emitReduction.");
2179 
2180  auto &C = CGM.getContext();
2181 
2182  // 1. Build a list of reduction variables.
2183  // void *RedList[<n>] = {<ReductionVars>[0], ..., <ReductionVars>[<n>-1]};
2184  auto Size = RHSExprs.size();
2185  for (auto *E : Privates) {
2186  if (E->getType()->isVariablyModifiedType())
2187  // Reserve place for array size.
2188  ++Size;
2189  }
2190  llvm::APInt ArraySize(/*unsigned int numBits=*/32, Size);
2191  QualType ReductionArrayTy =
2192  C.getConstantArrayType(C.VoidPtrTy, ArraySize, ArrayType::Normal,
2193  /*IndexTypeQuals=*/0);
2194  Address ReductionList =
2195  CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
2196  auto IPriv = Privates.begin();
2197  unsigned Idx = 0;
2198  for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
2199  Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
2200  CGF.getPointerSize());
2201  CGF.Builder.CreateStore(
2203  CGF.EmitLValue(RHSExprs[I]).getPointer(), CGF.VoidPtrTy),
2204  Elem);
2205  if ((*IPriv)->getType()->isVariablyModifiedType()) {
2206  // Store array size.
2207  ++Idx;
2208  Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx,
2209  CGF.getPointerSize());
2210  llvm::Value *Size = CGF.Builder.CreateIntCast(
2211  CGF.getVLASize(
2212  CGF.getContext().getAsVariableArrayType((*IPriv)->getType()))
2213  .first,
2214  CGF.SizeTy, /*isSigned=*/false);
2215  CGF.Builder.CreateStore(CGF.Builder.CreateIntToPtr(Size, CGF.VoidPtrTy),
2216  Elem);
2217  }
2218  }
2219 
2220  // 2. Emit reduce_func().
2221  auto *ReductionFn = emitReductionFunction(
2222  CGM, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
2223  LHSExprs, RHSExprs, ReductionOps);
2224 
2225  // 4. Build res = __kmpc_reduce{_nowait}(<gtid>, <n>, sizeof(RedList),
2226  // RedList, shuffle_reduce_func, interwarp_copy_func);
2227  auto *ThreadId = getThreadID(CGF, Loc);
2228  auto *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
2230  ReductionList.getPointer(), CGF.VoidPtrTy);
2231 
2232  auto *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
2233  CGM, Privates, ReductionArrayTy, ReductionFn);
2234  auto *InterWarpCopyFn =
2235  emitInterWarpCopyFunction(CGM, Privates, ReductionArrayTy);
2236 
2237  llvm::Value *Res = nullptr;
2238  if (ParallelReduction) {
2239  llvm::Value *Args[] = {ThreadId,
2240  CGF.Builder.getInt32(RHSExprs.size()),
2241  ReductionArrayTySize,
2242  RL,
2243  ShuffleAndReduceFn,
2244  InterWarpCopyFn};
2245 
2246  Res = CGF.EmitRuntimeCall(
2247  createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_parallel_reduce_nowait),
2248  Args);
2249  }
2250 
2251  if (TeamsReduction) {
2252  auto *ScratchPadCopyFn =
2253  emitCopyToScratchpad(CGM, Privates, ReductionArrayTy);
2254  auto *LoadAndReduceFn = emitReduceScratchpadFunction(
2255  CGM, Privates, ReductionArrayTy, ReductionFn);
2256 
2257  llvm::Value *Args[] = {ThreadId,
2258  CGF.Builder.getInt32(RHSExprs.size()),
2259  ReductionArrayTySize,
2260  RL,
2261  ShuffleAndReduceFn,
2262  InterWarpCopyFn,
2263  ScratchPadCopyFn,
2264  LoadAndReduceFn};
2265  Res = CGF.EmitRuntimeCall(
2266  createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_teams_reduce_nowait),
2267  Args);
2268  }
2269 
2270  // 5. Build switch(res)
2271  auto *DefaultBB = CGF.createBasicBlock(".omp.reduction.default");
2272  auto *SwInst = CGF.Builder.CreateSwitch(Res, DefaultBB, /*NumCases=*/1);
2273 
2274  // 6. Build case 1: where we have reduced values in the master
2275  // thread in each team.
2276  // __kmpc_end_reduce{_nowait}(<gtid>);
2277  // break;
2278  auto *Case1BB = CGF.createBasicBlock(".omp.reduction.case1");
2279  SwInst->addCase(CGF.Builder.getInt32(1), Case1BB);
2280  CGF.EmitBlock(Case1BB);
2281 
2282  // Add emission of __kmpc_end_reduce{_nowait}(<gtid>);
2283  llvm::Value *EndArgs[] = {ThreadId};
2284  auto &&CodeGen = [&Privates, &LHSExprs, &RHSExprs, &ReductionOps,
2285  this](CodeGenFunction &CGF, PrePostActionTy &Action) {
2286  auto IPriv = Privates.begin();
2287  auto ILHS = LHSExprs.begin();
2288  auto IRHS = RHSExprs.begin();
2289  for (auto *E : ReductionOps) {
2290  emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
2291  cast<DeclRefExpr>(*IRHS));
2292  ++IPriv;
2293  ++ILHS;
2294  ++IRHS;
2295  }
2296  };
2297  RegionCodeGenTy RCG(CodeGen);
2298  NVPTXActionTy Action(
2299  nullptr, llvm::None,
2300  createNVPTXRuntimeFunction(OMPRTL_NVPTX__kmpc_end_reduce_nowait),
2301  EndArgs);
2302  RCG.setAction(Action);
2303  RCG(CGF);
2304  CGF.EmitBranch(DefaultBB);
2305  CGF.EmitBlock(DefaultBB, /*IsFinished=*/true);
2306 }
2307 
2308 const VarDecl *
2310  const VarDecl *NativeParam) const {
2311  if (!NativeParam->getType()->isReferenceType())
2312  return NativeParam;
2313  QualType ArgType = NativeParam->getType();
2314  QualifierCollector QC;
2315  const Type *NonQualTy = QC.strip(ArgType);
2316  QualType PointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
2317  if (const auto *Attr = FD->getAttr<OMPCaptureKindAttr>()) {
2318  if (Attr->getCaptureKind() == OMPC_map) {
2319  PointeeTy = CGM.getContext().getAddrSpaceQualType(PointeeTy,
2321  }
2322  }
2323  ArgType = CGM.getContext().getPointerType(PointeeTy);
2324  QC.addRestrict();
2325  enum { NVPTX_local_addr = 5 };
2326  QC.addAddressSpace(getLangASFromTargetAS(NVPTX_local_addr));
2327  ArgType = QC.apply(CGM.getContext(), ArgType);
2328  if (isa<ImplicitParamDecl>(NativeParam)) {
2330  CGM.getContext(), /*DC=*/nullptr, NativeParam->getLocation(),
2331  NativeParam->getIdentifier(), ArgType, ImplicitParamDecl::Other);
2332  }
2333  return ParmVarDecl::Create(
2334  CGM.getContext(),
2335  const_cast<DeclContext *>(NativeParam->getDeclContext()),
2336  NativeParam->getLocStart(), NativeParam->getLocation(),
2337  NativeParam->getIdentifier(), ArgType,
2338  /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
2339 }
2340 
2341 Address
2343  const VarDecl *NativeParam,
2344  const VarDecl *TargetParam) const {
2345  assert(NativeParam != TargetParam &&
2346  NativeParam->getType()->isReferenceType() &&
2347  "Native arg must not be the same as target arg.");
2348  Address LocalAddr = CGF.GetAddrOfLocalVar(TargetParam);
2349  QualType NativeParamType = NativeParam->getType();
2350  QualifierCollector QC;
2351  const Type *NonQualTy = QC.strip(NativeParamType);
2352  QualType NativePointeeTy = cast<ReferenceType>(NonQualTy)->getPointeeType();
2353  unsigned NativePointeeAddrSpace =
2354  CGF.getContext().getTargetAddressSpace(NativePointeeTy);
2355  QualType TargetTy = TargetParam->getType();
2356  llvm::Value *TargetAddr = CGF.EmitLoadOfScalar(
2357  LocalAddr, /*Volatile=*/false, TargetTy, SourceLocation());
2358  // First cast to generic.
2360  TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
2361  /*AddrSpace=*/0));
2362  // Cast from generic to native address space.
2364  TargetAddr, TargetAddr->getType()->getPointerElementType()->getPointerTo(
2365  NativePointeeAddrSpace));
2366  Address NativeParamAddr = CGF.CreateMemTemp(NativeParamType);
2367  CGF.EmitStoreOfScalar(TargetAddr, NativeParamAddr, /*Volatile=*/false,
2368  NativeParamType);
2369  return NativeParamAddr;
2370 }
2371 
2373  CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn,
2374  ArrayRef<llvm::Value *> Args) const {
2375  SmallVector<llvm::Value *, 4> TargetArgs;
2376  TargetArgs.reserve(Args.size());
2377  auto *FnType =
2378  cast<llvm::FunctionType>(OutlinedFn->getType()->getPointerElementType());
2379  for (unsigned I = 0, E = Args.size(); I < E; ++I) {
2380  if (FnType->isVarArg() && FnType->getNumParams() <= I) {
2381  TargetArgs.append(std::next(Args.begin(), I), Args.end());
2382  break;
2383  }
2384  llvm::Type *TargetType = FnType->getParamType(I);
2385  llvm::Value *NativeArg = Args[I];
2386  if (!TargetType->isPointerTy()) {
2387  TargetArgs.emplace_back(NativeArg);
2388  continue;
2389  }
2391  NativeArg, NativeArg->getType()->getPointerElementType()->getPointerTo(
2392  /*AddrSpace=*/0));
2393  TargetArgs.emplace_back(
2394  CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(TargetArg, TargetType));
2395  }
2396  CGOpenMPRuntime::emitOutlinedFunctionCall(CGF, Loc, OutlinedFn, TargetArgs);
2397 }
2398 
2399 /// Emit function which wraps the outline parallel region
2400 /// and controls the arguments which are passed to this function.
2401 /// The wrapper ensures that the outlined function is called
2402 /// with the correct arguments when data is shared.
2403 llvm::Function *CGOpenMPRuntimeNVPTX::createDataSharingWrapper(
2404  llvm::Function *OutlinedParallelFn, const OMPExecutableDirective &D) {
2405  ASTContext &Ctx = CGM.getContext();
2406  const auto &CS = *cast<CapturedStmt>(D.getAssociatedStmt());
2407 
2408  // Create a function that takes as argument the source thread.
2409  FunctionArgList WrapperArgs;
2410  QualType Int16QTy =
2411  Ctx.getIntTypeForBitwidth(/*DestWidth=*/16, /*Signed=*/false);
2412  QualType Int32QTy =
2413  Ctx.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/false);
2414  QualType Int32PtrQTy = Ctx.getPointerType(Int32QTy);
2415  QualType VoidPtrPtrQTy = Ctx.getPointerType(Ctx.VoidPtrTy);
2416  ImplicitParamDecl ParallelLevelArg(Ctx, Int16QTy, ImplicitParamDecl::Other);
2417  ImplicitParamDecl WrapperArg(Ctx, Int32QTy, ImplicitParamDecl::Other);
2418  ImplicitParamDecl SharedArgsList(Ctx, VoidPtrPtrQTy,
2420  WrapperArgs.emplace_back(&ParallelLevelArg);
2421  WrapperArgs.emplace_back(&WrapperArg);
2422  WrapperArgs.emplace_back(&SharedArgsList);
2423 
2424  auto &CGFI =
2425  CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, WrapperArgs);
2426 
2427  auto *Fn = llvm::Function::Create(
2429  OutlinedParallelFn->getName() + "_wrapper", &CGM.getModule());
2430  CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, CGFI);
2431  Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
2432 
2433  CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
2434  CGF.StartFunction(GlobalDecl(), Ctx.VoidTy, Fn, CGFI, WrapperArgs);
2435 
2436  const auto *RD = CS.getCapturedRecordDecl();
2437  auto CurField = RD->field_begin();
2438 
2439  // Get the array of arguments.
2441 
2442  // TODO: suppport SIMD and pass actual values
2443  Args.emplace_back(llvm::ConstantPointerNull::get(
2444  CGM.Int32Ty->getPointerTo()));
2445  Args.emplace_back(llvm::ConstantPointerNull::get(
2446  CGM.Int32Ty->getPointerTo()));
2447 
2448  CGBuilderTy &Bld = CGF.Builder;
2449  auto CI = CS.capture_begin();
2450 
2451  // Load the start of the array
2452  auto SharedArgs =
2453  CGF.EmitLoadOfPointer(CGF.GetAddrOfLocalVar(&SharedArgsList),
2454  VoidPtrPtrQTy->castAs<PointerType>());
2455 
2456  // For each captured variable
2457  for (unsigned I = 0; I < CS.capture_size(); ++I, ++CI, ++CurField) {
2458  // Name of captured variable
2459  StringRef Name;
2460  if (CI->capturesThis())
2461  Name = "this";
2462  else
2463  Name = CI->getCapturedVar()->getName();
2464 
2465  // We retrieve the CLANG type of the argument. We use it to create
2466  // an alloca which will give us the LLVM type.
2467  QualType ElemTy = CurField->getType();
2468  // If this is a capture by copy the element type has to be the pointer to
2469  // the data.
2470  if (CI->capturesVariableByCopy())
2471  ElemTy = Ctx.getPointerType(ElemTy);
2472 
2473  // Get shared address of the captured variable.
2474  Address ArgAddress = Bld.CreateConstInBoundsGEP(
2475  SharedArgs, I, CGF.getPointerSize());
2476  Address TypedArgAddress = Bld.CreateBitCast(
2477  ArgAddress, CGF.ConvertTypeForMem(Ctx.getPointerType(ElemTy)));
2478  llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedArgAddress,
2479  /*Volatile=*/false, Int32PtrQTy, SourceLocation());
2480  Args.emplace_back(Arg);
2481  }
2482 
2483  emitCall(CGF, OutlinedParallelFn, Args);
2484  CGF.FinishFunction();
2485  return Fn;
2486 }
const CGFunctionInfo & arrangeBuiltinFunctionDeclaration(QualType resultType, const FunctionArgList &args)
A builtin function is a freestanding function using the default C conventions.
Definition: CGCall.cpp:640
QualType getAddrSpaceQualType(QualType T, LangAS AddressSpace) const
Return the uniqued reference to the type for an address space qualified type with the specified type ...
Other implicit parameter.
Definition: Decl.h:1473
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition: Type.h:2285
CanQualType VoidPtrTy
Definition: ASTContext.h:1012
A (possibly-)qualified type.
Definition: Type.h:653
llvm::Type * ConvertTypeForMem(QualType T)
static llvm::Value * getNVPTXLaneID(CodeGenFunction &CGF)
Get the id of the current lane in the Warp.
bool HaveInsertPoint() const
HaveInsertPoint - True if an insertion point is defined.
void emitSingleReductionCombiner(CodeGenFunction &CGF, const Expr *ReductionOp, const Expr *PrivateRef, const DeclRefExpr *LHS, const DeclRefExpr *RHS)
Emits single reduction combiner.
static void getNVPTXBarrier(CodeGenFunction &CGF, int ID, llvm::Value *NumThreads)
Get barrier #ID to synchronize selected (multiple of warp size) threads in a CTA. ...
llvm::Value * emitParallelOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override
Emits inlined function for the specified OpenMP parallel.
static CGOpenMPRuntimeNVPTX::ExecutionMode getExecutionModeForDirective(CodeGenModule &CGM, const OMPExecutableDirective &D)
Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam, const VarDecl *TargetParam) const override
Gets the address of the native argument basing on the address of the target-specific parameter...
llvm::Value * getTypeSize(QualType Ty)
Returns calculated size of the specified type.
llvm::Value * ScratchpadIndex
The base class of the type hierarchy.
Definition: Type.h:1353
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy, LValueBaseInfo *BaseInfo=nullptr, TBAAAccessInfo *TBAAInfo=nullptr)
Definition: CGExpr.cpp:2232
llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee, ArrayRef< llvm::Value *> Args, const Twine &Name="")
Emits a call or invoke instruction to the given function, depending on the current state of the EH st...
Definition: CGCall.cpp:3677
llvm::Value * emitReductionFunction(CodeGenModule &CGM, llvm::Type *ArgsType, ArrayRef< const Expr *> Privates, ArrayRef< const Expr *> LHSExprs, ArrayRef< const Expr *> RHSExprs, ArrayRef< const Expr *> ReductionOps)
Emits reduction function.
llvm::IntegerType * Int8Ty
i8, i16, i32, and i64
Address GetAddrOfLocalVar(const VarDecl *VD)
GetAddrOfLocalVar - Return the address of a local variable.
VarDecl - An instance of this class is created to represent a variable declaration or definition...
Definition: Decl.h:806
llvm::Value * getThreadID(CodeGenFunction &CGF, SourceLocation Loc)
Gets thread id value for the current thread.
LangAS getLangASFromTargetAS(unsigned TargetAS)
Definition: AddressSpaces.h:67
virtual void emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn, ArrayRef< llvm::Value *> Args=llvm::None) const
Emits call of the outlined function with the provided arguments, translating these arguments to corre...
llvm::Value * getPointer() const
Definition: Address.h:38
static llvm::Value * emitInterWarpCopyFunction(CodeGenModule &CGM, ArrayRef< const Expr *> Privates, QualType ReductionArrayTy)
This function emits a helper that gathers Reduce lists from the first lane of every active warp to la...
IdentifierInfo * getIdentifier() const
getIdentifier - Get the identifier that names this declaration, if there is one.
Definition: Decl.h:265
CodeGenFunction - This class organizes the per-function state that is used while generating LLVM code...
llvm::Type * ConvertType(QualType T)
ConvertType - Convert type T into a llvm::Type.
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition: ASTContext.h:149
FieldDecl - An instance of this class is created by Sema::ActOnField to represent a member of a struc...
Definition: Decl.h:2457
static llvm::Value * getMasterThreadID(CodeGenFunction &CGF)
Get the thread id of the OMP master thread.
llvm::CallInst * EmitRuntimeCall(llvm::Value *callee, const Twine &name="")
bool isReferenceType() const
Definition: Type.h:5956
OpenMPDirectiveKind getDirectiveKind() const
Definition: StmtOpenMP.h:221
void InitTempAlloca(Address Alloca, llvm::Value *Value)
InitTempAlloca - Provide an initial value for the given alloca which will be observable at all locati...
Definition: CGExpr.cpp:114
void EmitStoreOfScalar(llvm::Value *Value, Address Addr, bool Volatile, QualType Ty, AlignmentSource Source=AlignmentSource::Type, bool isInit=false, bool isNontemporal=false)
EmitStoreOfScalar - Store a scalar value to an address, taking care to appropriately convert from the...
bool isOpenMPTeamsDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a teams-kind directive.
uint32_t Offset
Definition: CacheTokens.cpp:43
virtual void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc) override
Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads)...
void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams, const Expr *ThreadLimit, SourceLocation Loc) override
This function ought to emit, in the general case, a call to.
static llvm::Value * getNVPTXWarpID(CodeGenFunction &CGF)
Get the id of the warp in the block.
Scope - A scope is a transient data structure that is used while parsing the program.
Definition: Scope.h:39
llvm::BasicBlock * createBasicBlock(const Twine &name="", llvm::Function *parent=nullptr, llvm::BasicBlock *before=nullptr)
createBasicBlock - Create an LLVM basic block.
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn, ArrayRef< llvm::Value *> CapturedVars, const Expr *IfCond) override
Emits code for parallel or serial call of the OutlinedFn with variables captured in a record which ad...
llvm::Value * emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc, unsigned Flags=0)
Emits object of ident_t type with info for source location.
virtual llvm::Value * emitTeamsOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen)
Emits outlined function for the specified OpenMP teams directive D.
llvm::AllocaInst * CreateTempAlloca(llvm::Type *Ty, const Twine &Name="tmp", llvm::Value *ArraySize=nullptr)
CreateTempAlloca - This creates an alloca and inserts it into the entry block if ArraySize is nullptr...
Definition: CGExpr.cpp:94
llvm::Constant * CreateRuntimeFunction(llvm::FunctionType *Ty, StringRef Name, llvm::AttributeList ExtraAttrs=llvm::AttributeList(), bool Local=false)
Create a new runtime function with the specified type and name.
static void syncCTAThreads(CodeGenFunction &CGF)
Synchronize all GPU threads in a block.
llvm::Value * EmitLoadOfScalar(Address Addr, bool Volatile, QualType Ty, SourceLocation Loc, AlignmentSource Source=AlignmentSource::Type, bool isNontemporal=false)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
static ImplicitParamDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, ImplicitParamKind ParamKind)
Create implicit parameter.
Definition: Decl.cpp:4187
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition: CharUnits.h:179
Address CreateDefaultAlignTempAlloca(llvm::Type *Ty, const Twine &Name="tmp")
CreateDefaultAlignedTempAlloca - This creates an alloca with the default ABI alignment of the given L...
Definition: CGExpr.cpp:107
Expr - This represents one expression.
Definition: Expr.h:106
virtual llvm::Value * emitParallelOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen)
Emits outlined function for the specified OpenMP parallel directive D.
static Address invalid()
Definition: Address.h:35
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited...
bool isOpenMPParallelDirective(OpenMPDirectiveKind DKind)
Checks if the specified directive is a parallel-kind directive.
const CGFunctionInfo & arrangeNullaryFunction()
A nullary function is a freestanding function of type &#39;void ()&#39;.
Definition: CGCall.cpp:682
const T * castAs() const
Member-template castAs<specific type>.
Definition: Type.h:6370
void SetInternalFunctionAttributes(const Decl *D, llvm::Function *F, const CGFunctionInfo &FI)
Set the attributes on the LLVM function for the given decl and function info.
llvm::PointerType * getType() const
Return the type of the pointer value.
Definition: Address.h:44
DeclContext * getDeclContext()
Definition: DeclBase.h:425
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition: CharUnits.h:63
MachineConfiguration
GPU Configuration: This information can be derived from cuda registers, however, providing compile ti...
const LangOptions & getLangOpts() const
ASTContext & getContext() const
OpenMPProcBindClauseKind
OpenMP attributes for &#39;proc_bind&#39; clause.
Definition: OpenMPKinds.h:51
llvm::Value * ScratchpadWidth
GlobalDecl - represents a global declaration.
Definition: GlobalDecl.h:35
The l-value was considered opaque, so the alignment was determined from a type.
llvm::Value * emitTeamsOutlinedFunction(const OMPExecutableDirective &D, const VarDecl *ThreadIDVar, OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen) override
Emits inlined function for the specified OpenMP teams.
Address CreateBitCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:142
void emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc, llvm::Value *OutlinedFn, ArrayRef< llvm::Value *> Args=llvm::None) const override
Emits call of the outlined function with the provided arguments, translating these arguments to corre...
void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D, SourceLocation Loc, llvm::Value *OutlinedFn, ArrayRef< llvm::Value *> CapturedVars) override
Emits code for teams call of the OutlinedFn with variables captured in a record which address is stor...
void emitOMPIfClause(CodeGenFunction &CGF, const Expr *Cond, const RegionCodeGenTy &ThenGen, const RegionCodeGenTy &ElseGen)
Emits code for OpenMP &#39;if&#39; clause using specified CodeGen function.
Encodes a location in the source.
This is a basic class for representing single OpenMP executable directive.
Definition: StmtOpenMP.h:33
OpenMPDirectiveKind
OpenMP directives.
Definition: OpenMPKinds.h:23
static llvm::Value * emitReduceScratchpadFunction(CodeGenModule &CGM, ArrayRef< const Expr *> Privates, QualType ReductionArrayTy, llvm::Value *ReduceFn)
This function emits a helper that loads data from the scratchpad array and (optionally) reduces it wi...
This file defines OpenMP nodes for declarative directives.
CanQualType VoidTy
Definition: ASTContext.h:996
An aligned address.
Definition: Address.h:25
void StartFunction(GlobalDecl GD, QualType RetTy, llvm::Function *Fn, const CGFunctionInfo &FnInfo, const FunctionArgList &Args, SourceLocation Loc=SourceLocation(), SourceLocation StartLoc=SourceLocation())
Emit code for the start of a function.
OpenMPRTLFunctionNVPTX
virtual void emitProcBindClause(CodeGenFunction &CGF, OpenMPProcBindClauseKind ProcBind, SourceLocation Loc)
Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid, int proc_bind) to generat...
const VarDecl * translateParameter(const FieldDecl *FD, const VarDecl *NativeParam) const override
Translates the native parameter of outlined function if this is required for target.
void FinishFunction(SourceLocation EndLoc=SourceLocation())
FinishFunction - Complete IR generation of the current function.
void emitCall(CodeGenFunction &CGF, llvm::Value *Callee, ArrayRef< llvm::Value *> Args=llvm::None, SourceLocation Loc=SourceLocation()) const
Emits Callee function call with arguments Args with location Loc.
static llvm::Value * getThreadLimit(CodeGenFunction &CGF, bool IsInSpmdExecutionMode=false)
Get the value of the thread_limit clause in the teams directive.
FunctionArgList - Type for representing both the decl and type of parameters to a function...
Definition: CGCall.h:276
void setAction(PrePostActionTy &Action) const
This class organizes the cross-function state that is used while generating LLVM code.
CGOpenMPRuntime & getOpenMPRuntime()
Return a reference to the configured OpenMP runtime.
static ParmVarDecl * Create(ASTContext &C, DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo, StorageClass S, Expr *DefArg)
Definition: Decl.cpp:2451
Dataflow Directional Tag Classes.
Class provides a way to call simple version of codegen for OpenMP region, or an advanced with possibl...
static void getNVPTXCTABarrier(CodeGenFunction &CGF)
Get barrier to synchronize all threads in a block.
static llvm::Value * emitCopyToScratchpad(CodeGenModule &CGM, ArrayRef< const Expr *> Privates, QualType ReductionArrayTy)
This function emits a helper that stores reduced data from the team master to a scratchpad array in g...
A qualifier set is used to build a set of qualifiers.
Definition: Type.h:5687
DeclContext - This is used only as base class of specific decl types that can act as declaration cont...
Definition: DeclBase.h:1252
A basic class for pre|post-action for advanced codegen sequence for OpenMP region.
llvm::LoadInst * CreateLoad(Address Addr, const llvm::Twine &Name="")
Definition: CGBuilder.h:70
static void emitReductionListCopy(CopyAction Action, CodeGenFunction &CGF, QualType ReductionArrayTy, ArrayRef< const Expr *> Privates, Address SrcBase, Address DestBase, CopyOptionsTy CopyOptions={nullptr, nullptr, nullptr})
Emit instructions to copy a Reduce list, which contains partially aggregated values, in the specified direction.
const Type * strip(QualType type)
Collect any qualifiers on the given type and return an unqualified type.
Definition: Type.h:5694
static void setPropertyExecutionMode(CodeGenModule &CGM, StringRef Name, CGOpenMPRuntimeNVPTX::ExecutionMode Mode)
SourceLocation getLocStart() const LLVM_READONLY
Definition: Decl.h:731
Address CreateConstInBoundsGEP(Address Addr, uint64_t Index, CharUnits EltSize, const llvm::Twine &Name="")
Given addr = T* ...
Definition: CGBuilder.h:211
llvm::StoreInst * CreateStore(llvm::Value *Val, Address Addr, bool IsVolatile=false)
Definition: CGBuilder.h:108
llvm::Module & getModule() const
QualType apply(const ASTContext &Context, QualType QT) const
Apply the collected qualifiers to the given type.
Definition: Type.cpp:3271
static llvm::Value * createRuntimeShuffleFunction(CodeGenFunction &CGF, QualType ElemTy, llvm::Value *Elem, llvm::Value *Offset)
This function creates calls to one of two shuffle functions to copy variables between lanes in a warp...
static void syncParallelThreads(CodeGenFunction &CGF, llvm::Value *NumThreads)
Synchronize worker threads in a parallel region.
llvm::Constant * createNVPTXRuntimeFunction(unsigned Function)
Returns specified OpenMP runtime function for the current OpenMP implementation.
virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc, ArrayRef< const Expr *> Privates, ArrayRef< const Expr *> LHSExprs, ArrayRef< const Expr *> RHSExprs, ArrayRef< const Expr *> ReductionOps, ReductionOptionsTy Options) override
Emit a code for reduction clause.
This file defines OpenMP AST classes for executable directives and clauses.
Address CreateConstArrayGEP(Address Addr, uint64_t Index, CharUnits EltSize, const llvm::Twine &Name="")
Given addr = [n x T]* ...
Definition: CGBuilder.h:195
void addRestrict()
Definition: Type.h:288
T * getAttr() const
Definition: DeclBase.h:531
Internal linkage, which indicates that the entity can be referred to from within the translation unit...
Definition: Linkage.h:33
void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false)
EmitBlock - Emit the given block.
Definition: CGStmt.cpp:436
ExecutionMode
Target codegen is specialized based on two programming models: the &#39;generic&#39; fork-join model of OpenM...
void EmitBranch(llvm::BasicBlock *Block)
EmitBranch - Emit a branch to the specified basic block from the current insert block, taking care to avoid creation of branches from dummy blocks.
Definition: CGStmt.cpp:456
Privates[]
Gets the list of initial values for linear variables.
Definition: OpenMPClause.h:136
virtual void emitProcBindClause(CodeGenFunction &CGF, OpenMPProcBindClauseKind ProcBind, SourceLocation Loc) override
Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid, int proc_bind) to generat...
LValue EmitLValue(const Expr *E)
EmitLValue - Emit code to compute a designator that specifies the location of the expression...
Definition: CGExpr.cpp:1170
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
std::pair< llvm::Value *, QualType > getVLASize(const VariableArrayType *vla)
getVLASize - Returns an LLVM value that corresponds to the size, in non-variably-sized elements...
static llvm::Value * emitShuffleAndReduceFunction(CodeGenModule &CGM, ArrayRef< const Expr *> Privates, QualType ReductionArrayTy, llvm::Value *ReduceFn)
Emit a helper that reduces data across two OpenMP threads (lanes) in the same warp.
static llvm::Value * getNVPTXThreadID(CodeGenFunction &CGF)
Get the id of the current thread on the GPU.
llvm::Value * EmitScalarConversion(llvm::Value *Src, QualType SrcTy, QualType DstTy, SourceLocation Loc)
Emit a conversion from the specified type to the specified destination type, both of which are LLVM s...
const VariableArrayType * getAsVariableArrayType(QualType T) const
Definition: ASTContext.h:2324
static llvm::Value * getNVPTXWarpSize(CodeGenFunction &CGF)
Get the GPU warp size.
llvm::Value * RemoteLaneOffset
void addAddressSpace(LangAS space)
Definition: Type.h:393
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getType() const
Definition: Decl.h:638
CanQualType BoolTy
Definition: ASTContext.h:997
unsigned getTargetAddressSpace(QualType T) const
Definition: ASTContext.h:2407
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty, const llvm::Twine &Name="")
Definition: CGBuilder.h:164
Address CreateMemTemp(QualType T, const Twine &Name="tmp", bool CastToDefaultAddrSpace=true)
CreateMemTemp - Create a temporary memory object of the given type, with appropriate alignment...
Definition: CGExpr.cpp:127
static llvm::Value * getNVPTXNumThreads(CodeGenFunction &CGF)
Get the maximum number of threads in a block of the GPU.
llvm::Value * getPointer() const
Definition: CGValue.h:320
Stmt * getAssociatedStmt() const
Returns statement associated with the directive.
Definition: StmtOpenMP.h:196
virtual void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads, SourceLocation Loc)
Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads)...
Attr - This represents one attribute.
Definition: Attr.h:43
SourceLocation getLocation() const
Definition: DeclBase.h:416
QualType getIntTypeForBitwidth(unsigned DestWidth, unsigned Signed) const
getIntTypeForBitwidth - sets integer QualTy according to specified details: bitwidth, signed/unsigned.
static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef< Expr *> VL, ArrayRef< Expr *> PL, ArrayRef< Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate)
Creates clause with a list of variables VL and a linear step Step.
llvm::FunctionType * GetFunctionType(const CGFunctionInfo &Info)
GetFunctionType - Get the LLVM function type for.
Definition: CGCall.cpp:1524