clang  8.0.0svn
ThreadSafetyTIL.cpp
Go to the documentation of this file.
1 //===- ThreadSafetyTIL.cpp ------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT in the llvm repository for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
11 #include "clang/Basic/LLVM.h"
12 #include "llvm/Support/Casting.h"
13 #include <cassert>
14 #include <cstddef>
15 
16 using namespace clang;
17 using namespace threadSafety;
18 using namespace til;
19 
21  switch (Op) {
22  case UOP_Minus: return "-";
23  case UOP_BitNot: return "~";
24  case UOP_LogicNot: return "!";
25  }
26  return {};
27 }
28 
30  switch (Op) {
31  case BOP_Mul: return "*";
32  case BOP_Div: return "/";
33  case BOP_Rem: return "%";
34  case BOP_Add: return "+";
35  case BOP_Sub: return "-";
36  case BOP_Shl: return "<<";
37  case BOP_Shr: return ">>";
38  case BOP_BitAnd: return "&";
39  case BOP_BitXor: return "^";
40  case BOP_BitOr: return "|";
41  case BOP_Eq: return "==";
42  case BOP_Neq: return "!=";
43  case BOP_Lt: return "<";
44  case BOP_Leq: return "<=";
45  case BOP_Cmp: return "<=>";
46  case BOP_LogicAnd: return "&&";
47  case BOP_LogicOr: return "||";
48  }
49  return {};
50 }
51 
52 SExpr* Future::force() {
53  Status = FS_evaluating;
54  Result = compute();
55  Status = FS_done;
56  return Result;
57 }
58 
60  unsigned Idx = Predecessors.size();
61  Predecessors.reserveCheck(1, Arena);
62  Predecessors.push_back(Pred);
63  for (auto *E : Args) {
64  if (auto *Ph = dyn_cast<Phi>(E)) {
65  Ph->values().reserveCheck(1, Arena);
66  Ph->values().push_back(nullptr);
67  }
68  }
69  return Idx;
70 }
71 
72 void BasicBlock::reservePredecessors(unsigned NumPreds) {
73  Predecessors.reserve(NumPreds, Arena);
74  for (auto *E : Args) {
75  if (auto *Ph = dyn_cast<Phi>(E)) {
76  Ph->values().reserve(NumPreds, Arena);
77  }
78  }
79 }
80 
81 // If E is a variable, then trace back through any aliases or redundant
82 // Phi nodes to find the canonical definition.
83 const SExpr *til::getCanonicalVal(const SExpr *E) {
84  while (true) {
85  if (const auto *V = dyn_cast<Variable>(E)) {
86  if (V->kind() == Variable::VK_Let) {
87  E = V->definition();
88  continue;
89  }
90  }
91  if (const auto *Ph = dyn_cast<Phi>(E)) {
92  if (Ph->status() == Phi::PH_SingleVal) {
93  E = Ph->values()[0];
94  continue;
95  }
96  }
97  break;
98  }
99  return E;
100 }
101 
102 // If E is a variable, then trace back through any aliases or redundant
103 // Phi nodes to find the canonical definition.
104 // The non-const version will simplify incomplete Phi nodes.
106  while (true) {
107  if (auto *V = dyn_cast<Variable>(E)) {
108  if (V->kind() != Variable::VK_Let)
109  return V;
110  // Eliminate redundant variables, e.g. x = y, or x = 5,
111  // but keep anything more complicated.
112  if (til::ThreadSafetyTIL::isTrivial(V->definition())) {
113  E = V->definition();
114  continue;
115  }
116  return V;
117  }
118  if (auto *Ph = dyn_cast<Phi>(E)) {
119  if (Ph->status() == Phi::PH_Incomplete)
121  // Eliminate redundant Phi nodes.
122  if (Ph->status() == Phi::PH_SingleVal) {
123  E = Ph->values()[0];
124  continue;
125  }
126  }
127  return E;
128  }
129 }
130 
131 // Trace the arguments of an incomplete Phi node to see if they have the same
132 // canonical definition. If so, mark the Phi node as redundant.
133 // getCanonicalVal() will recursively call simplifyIncompletePhi().
135  assert(Ph && Ph->status() == Phi::PH_Incomplete);
136 
137  // eliminate infinite recursion -- assume that this node is not redundant.
139 
140  SExpr *E0 = simplifyToCanonicalVal(Ph->values()[0]);
141  for (unsigned i = 1, n = Ph->values().size(); i < n; ++i) {
142  SExpr *Ei = simplifyToCanonicalVal(Ph->values()[i]);
143  if (Ei == Ph)
144  continue; // Recursive reference to itself. Don't count.
145  if (Ei != E0) {
146  return; // Status is already set to MultiVal.
147  }
148  }
150 }
151 
152 // Renumbers the arguments and instructions to have unique, sequential IDs.
153 unsigned BasicBlock::renumberInstrs(unsigned ID) {
154  for (auto *Arg : Args)
155  Arg->setID(this, ID++);
156  for (auto *Instr : Instrs)
157  Instr->setID(this, ID++);
158  TermInstr->setID(this, ID++);
159  return ID;
160 }
161 
162 // Sorts the CFGs blocks using a reverse post-order depth-first traversal.
163 // Each block will be written into the Blocks array in order, and its BlockID
164 // will be set to the index in the array. Sorting should start from the entry
165 // block, and ID should be the total number of blocks.
166 unsigned BasicBlock::topologicalSort(SimpleArray<BasicBlock *> &Blocks,
167  unsigned ID) {
168  if (Visited) return ID;
169  Visited = true;
170  for (auto *Block : successors())
171  ID = Block->topologicalSort(Blocks, ID);
172  // set ID and update block array in place.
173  // We may lose pointers to unreachable blocks.
174  assert(ID > 0);
175  BlockID = --ID;
176  Blocks[BlockID] = this;
177  return ID;
178 }
179 
180 // Performs a reverse topological traversal, starting from the exit block and
181 // following back-edges. The dominator is serialized before any predecessors,
182 // which guarantees that all blocks are serialized after their dominator and
183 // before their post-dominator (because it's a reverse topological traversal).
184 // ID should be initially set to 0.
185 //
186 // This sort assumes that (1) dominators have been computed, (2) there are no
187 // critical edges, and (3) the entry block is reachable from the exit block
188 // and no blocks are accessible via traversal of back-edges from the exit that
189 // weren't accessible via forward edges from the entry.
190 unsigned BasicBlock::topologicalFinalSort(SimpleArray<BasicBlock *> &Blocks,
191  unsigned ID) {
192  // Visited is assumed to have been set by the topologicalSort. This pass
193  // assumes !Visited means that we've visited this node before.
194  if (!Visited) return ID;
195  Visited = false;
196  if (DominatorNode.Parent)
197  ID = DominatorNode.Parent->topologicalFinalSort(Blocks, ID);
198  for (auto *Pred : Predecessors)
199  ID = Pred->topologicalFinalSort(Blocks, ID);
200  assert(static_cast<size_t>(ID) < Blocks.size());
201  BlockID = ID++;
202  Blocks[BlockID] = this;
203  return ID;
204 }
205 
206 // Computes the immediate dominator of the current block. Assumes that all of
207 // its predecessors have already computed their dominators. This is achieved
208 // by visiting the nodes in topological order.
209 void BasicBlock::computeDominator() {
210  BasicBlock *Candidate = nullptr;
211  // Walk backwards from each predecessor to find the common dominator node.
212  for (auto *Pred : Predecessors) {
213  // Skip back-edges
214  if (Pred->BlockID >= BlockID) continue;
215  // If we don't yet have a candidate for dominator yet, take this one.
216  if (Candidate == nullptr) {
217  Candidate = Pred;
218  continue;
219  }
220  // Walk the alternate and current candidate back to find a common ancestor.
221  auto *Alternate = Pred;
222  while (Alternate != Candidate) {
223  if (Candidate->BlockID > Alternate->BlockID)
224  Candidate = Candidate->DominatorNode.Parent;
225  else
226  Alternate = Alternate->DominatorNode.Parent;
227  }
228  }
229  DominatorNode.Parent = Candidate;
230  DominatorNode.SizeOfSubTree = 1;
231 }
232 
233 // Computes the immediate post-dominator of the current block. Assumes that all
234 // of its successors have already computed their post-dominators. This is
235 // achieved visiting the nodes in reverse topological order.
236 void BasicBlock::computePostDominator() {
237  BasicBlock *Candidate = nullptr;
238  // Walk back from each predecessor to find the common post-dominator node.
239  for (auto *Succ : successors()) {
240  // Skip back-edges
241  if (Succ->BlockID <= BlockID) continue;
242  // If we don't yet have a candidate for post-dominator yet, take this one.
243  if (Candidate == nullptr) {
244  Candidate = Succ;
245  continue;
246  }
247  // Walk the alternate and current candidate back to find a common ancestor.
248  auto *Alternate = Succ;
249  while (Alternate != Candidate) {
250  if (Candidate->BlockID < Alternate->BlockID)
251  Candidate = Candidate->PostDominatorNode.Parent;
252  else
253  Alternate = Alternate->PostDominatorNode.Parent;
254  }
255  }
256  PostDominatorNode.Parent = Candidate;
257  PostDominatorNode.SizeOfSubTree = 1;
258 }
259 
260 // Renumber instructions in all blocks
261 void SCFG::renumberInstrs() {
262  unsigned InstrID = 0;
263  for (auto *Block : Blocks)
264  InstrID = Block->renumberInstrs(InstrID);
265 }
266 
267 static inline void computeNodeSize(BasicBlock *B,
269  BasicBlock::TopologyNode *N = &(B->*TN);
270  if (N->Parent) {
271  BasicBlock::TopologyNode *P = &(N->Parent->*TN);
272  // Initially set ID relative to the (as yet uncomputed) parent ID
273  N->NodeID = P->SizeOfSubTree;
274  P->SizeOfSubTree += N->SizeOfSubTree;
275  }
276 }
277 
278 static inline void computeNodeID(BasicBlock *B,
280  BasicBlock::TopologyNode *N = &(B->*TN);
281  if (N->Parent) {
282  BasicBlock::TopologyNode *P = &(N->Parent->*TN);
283  N->NodeID += P->NodeID; // Fix NodeIDs relative to starting node.
284  }
285 }
286 
287 // Normalizes a CFG. Normalization has a few major components:
288 // 1) Removing unreachable blocks.
289 // 2) Computing dominators and post-dominators
290 // 3) Topologically sorting the blocks into the "Blocks" array.
292  // Topologically sort the blocks starting from the entry block.
293  unsigned NumUnreachableBlocks = Entry->topologicalSort(Blocks, Blocks.size());
294  if (NumUnreachableBlocks > 0) {
295  // If there were unreachable blocks shift everything down, and delete them.
296  for (unsigned I = NumUnreachableBlocks, E = Blocks.size(); I < E; ++I) {
297  unsigned NI = I - NumUnreachableBlocks;
298  Blocks[NI] = Blocks[I];
299  Blocks[NI]->BlockID = NI;
300  // FIXME: clean up predecessor pointers to unreachable blocks?
301  }
302  Blocks.drop(NumUnreachableBlocks);
303  }
304 
305  // Compute dominators.
306  for (auto *Block : Blocks)
307  Block->computeDominator();
308 
309  // Once dominators have been computed, the final sort may be performed.
310  unsigned NumBlocks = Exit->topologicalFinalSort(Blocks, 0);
311  assert(static_cast<size_t>(NumBlocks) == Blocks.size());
312  (void) NumBlocks;
313 
314  // Renumber the instructions now that we have a final sort.
315  renumberInstrs();
316 
317  // Compute post-dominators and compute the sizes of each node in the
318  // dominator tree.
319  for (auto *Block : Blocks.reverse()) {
320  Block->computePostDominator();
321  computeNodeSize(Block, &BasicBlock::DominatorNode);
322  }
323  // Compute the sizes of each node in the post-dominator tree and assign IDs in
324  // the dominator tree.
325  for (auto *Block : Blocks) {
326  computeNodeID(Block, &BasicBlock::DominatorNode);
327  computeNodeSize(Block, &BasicBlock::PostDominatorNode);
328  }
329  // Assign IDs in the post-dominator tree.
330  for (auto *Block : Blocks.reverse()) {
331  computeNodeID(Block, &BasicBlock::PostDominatorNode);
332  }
333 }
StringRef getBinaryOpcodeString(TIL_BinaryOpcode Op)
Return the name of a binary opcode.
StringRef P
SExpr * simplifyToCanonicalVal(SExpr *E)
unsigned addPredecessor(BasicBlock *Pred)
Forward-declares and imports various common LLVM datatypes that clang wants to use unqualified...
A basic block is part of an SCFG.
static void computeNodeSize(BasicBlock *B, BasicBlock::TopologyNode BasicBlock::*TN)
StringRef getUnaryOpcodeString(TIL_UnaryOpcode Op)
Return the name of a unary opcode.
TIL_BinaryOpcode
Opcode for binary arithmetic operations.
void reservePredecessors(unsigned NumPreds)
const ValArray & values() const
TIL_UnaryOpcode
Opcode for unary arithmetic operations.
const SExpr * getCanonicalVal(const SExpr *E)
Dataflow Directional Tag Classes.
Phi Node, for code in SSA form.
static void computeNodeID(BasicBlock *B, BasicBlock::TopologyNode BasicBlock::*TN)
void simplifyIncompleteArg(til::Phi *Ph)
Base class for AST nodes in the typed intermediate language.