clang 22.0.0git
ThreadSafety.cpp
Go to the documentation of this file.
1//===- ThreadSafety.cpp ---------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// A intra-procedural analysis for thread safety (e.g. deadlocks and race
10// conditions), based off of an annotation system.
11//
12// See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
13// for more information.
14//
15//===----------------------------------------------------------------------===//
16
18#include "clang/AST/Attr.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/DeclCXX.h"
21#include "clang/AST/DeclGroup.h"
22#include "clang/AST/Expr.h"
23#include "clang/AST/ExprCXX.h"
25#include "clang/AST/Stmt.h"
27#include "clang/AST/Type.h"
33#include "clang/Analysis/CFG.h"
35#include "clang/Basic/LLVM.h"
39#include "llvm/ADT/DenseMap.h"
40#include "llvm/ADT/ImmutableMap.h"
41#include "llvm/ADT/STLExtras.h"
42#include "llvm/ADT/SmallVector.h"
43#include "llvm/ADT/StringRef.h"
44#include "llvm/Support/Allocator.h"
45#include "llvm/Support/ErrorHandling.h"
46#include "llvm/Support/TrailingObjects.h"
47#include "llvm/Support/raw_ostream.h"
48#include <cassert>
49#include <functional>
50#include <iterator>
51#include <memory>
52#include <optional>
53#include <string>
54#include <utility>
55#include <vector>
56
57using namespace clang;
58using namespace threadSafety;
59
60// Key method definition
62
63/// Issue a warning about an invalid lock expression
65 const Expr *MutexExp, const NamedDecl *D,
66 const Expr *DeclExp, StringRef Kind) {
68 if (DeclExp)
69 Loc = DeclExp->getExprLoc();
70
71 // FIXME: add a note about the attribute location in MutexExp or D
72 if (Loc.isValid())
73 Handler.handleInvalidLockExp(Loc);
74}
75
76namespace {
77
78/// A set of CapabilityExpr objects, which are compiled from thread safety
79/// attributes on a function.
80class CapExprSet : public SmallVector<CapabilityExpr, 4> {
81public:
82 /// Push M onto list, but discard duplicates.
83 void push_back_nodup(const CapabilityExpr &CapE) {
84 if (llvm::none_of(*this, [=](const CapabilityExpr &CapE2) {
85 return CapE.equals(CapE2);
86 }))
87 push_back(CapE);
88 }
89};
90
91class FactManager;
92class FactSet;
93
94/// This is a helper class that stores a fact that is known at a
95/// particular point in program execution. Currently, a fact is a capability,
96/// along with additional information, such as where it was acquired, whether
97/// it is exclusive or shared, etc.
98class FactEntry : public CapabilityExpr {
99public:
100 enum FactEntryKind { Lockable, ScopedLockable };
101
102 /// Where a fact comes from.
103 enum SourceKind {
104 Acquired, ///< The fact has been directly acquired.
105 Asserted, ///< The fact has been asserted to be held.
106 Declared, ///< The fact is assumed to be held by callers.
107 Managed, ///< The fact has been acquired through a scoped capability.
108 };
109
110private:
111 const FactEntryKind Kind : 8;
112
113 /// Exclusive or shared.
114 LockKind LKind : 8;
115
116 /// How it was acquired.
117 SourceKind Source : 8;
118
119 /// Where it was acquired.
120 SourceLocation AcquireLoc;
121
122protected:
123 ~FactEntry() = default;
124
125public:
126 FactEntry(FactEntryKind FK, const CapabilityExpr &CE, LockKind LK,
127 SourceLocation Loc, SourceKind Src)
128 : CapabilityExpr(CE), Kind(FK), LKind(LK), Source(Src), AcquireLoc(Loc) {}
129
130 LockKind kind() const { return LKind; }
131 SourceLocation loc() const { return AcquireLoc; }
132 FactEntryKind getFactEntryKind() const { return Kind; }
133
134 bool asserted() const { return Source == Asserted; }
135 bool declared() const { return Source == Declared; }
136 bool managed() const { return Source == Managed; }
137
138 virtual void
139 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
140 SourceLocation JoinLoc, LockErrorKind LEK,
141 ThreadSafetyHandler &Handler) const = 0;
142 virtual void handleLock(FactSet &FSet, FactManager &FactMan,
143 const FactEntry &entry,
144 ThreadSafetyHandler &Handler) const = 0;
145 virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
146 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
147 bool FullyRemove,
148 ThreadSafetyHandler &Handler) const = 0;
149
150 // Return true if LKind >= LK, where exclusive > shared
151 bool isAtLeast(LockKind LK) const {
152 return (LKind == LK_Exclusive) || (LK == LK_Shared);
153 }
154};
155
156using FactID = unsigned short;
157
158/// FactManager manages the memory for all facts that are created during
159/// the analysis of a single routine.
160class FactManager {
161private:
162 llvm::BumpPtrAllocator &Alloc;
163 std::vector<const FactEntry *> Facts;
164
165public:
166 FactManager(llvm::BumpPtrAllocator &Alloc) : Alloc(Alloc) {}
167
168 template <typename T, typename... ArgTypes>
169 T *createFact(ArgTypes &&...Args) {
170 static_assert(std::is_trivially_destructible_v<T>);
171 return T::create(Alloc, std::forward<ArgTypes>(Args)...);
172 }
173
174 FactID newFact(const FactEntry *Entry) {
175 Facts.push_back(Entry);
176 assert(Facts.size() - 1 <= std::numeric_limits<FactID>::max() &&
177 "FactID space exhausted");
178 return static_cast<unsigned short>(Facts.size() - 1);
179 }
180
181 const FactEntry &operator[](FactID F) const { return *Facts[F]; }
182};
183
184/// A FactSet is the set of facts that are known to be true at a
185/// particular program point. FactSets must be small, because they are
186/// frequently copied, and are thus implemented as a set of indices into a
187/// table maintained by a FactManager. A typical FactSet only holds 1 or 2
188/// locks, so we can get away with doing a linear search for lookup. Note
189/// that a hashtable or map is inappropriate in this case, because lookups
190/// may involve partial pattern matches, rather than exact matches.
191class FactSet {
192private:
193 using FactVec = SmallVector<FactID, 4>;
194
195 FactVec FactIDs;
196
197public:
198 using iterator = FactVec::iterator;
199 using const_iterator = FactVec::const_iterator;
200
201 iterator begin() { return FactIDs.begin(); }
202 const_iterator begin() const { return FactIDs.begin(); }
203
204 iterator end() { return FactIDs.end(); }
205 const_iterator end() const { return FactIDs.end(); }
206
207 bool isEmpty() const { return FactIDs.size() == 0; }
208
209 // Return true if the set contains only negative facts
210 bool isEmpty(FactManager &FactMan) const {
211 for (const auto FID : *this) {
212 if (!FactMan[FID].negative())
213 return false;
214 }
215 return true;
216 }
217
218 void addLockByID(FactID ID) { FactIDs.push_back(ID); }
219
220 FactID addLock(FactManager &FM, const FactEntry *Entry) {
221 FactID F = FM.newFact(Entry);
222 FactIDs.push_back(F);
223 return F;
224 }
225
226 bool removeLock(FactManager& FM, const CapabilityExpr &CapE) {
227 unsigned n = FactIDs.size();
228 if (n == 0)
229 return false;
230
231 for (unsigned i = 0; i < n-1; ++i) {
232 if (FM[FactIDs[i]].matches(CapE)) {
233 FactIDs[i] = FactIDs[n-1];
234 FactIDs.pop_back();
235 return true;
236 }
237 }
238 if (FM[FactIDs[n-1]].matches(CapE)) {
239 FactIDs.pop_back();
240 return true;
241 }
242 return false;
243 }
244
245 std::optional<FactID> replaceLock(FactManager &FM, iterator It,
246 const FactEntry *Entry) {
247 if (It == end())
248 return std::nullopt;
249 FactID F = FM.newFact(Entry);
250 *It = F;
251 return F;
252 }
253
254 std::optional<FactID> replaceLock(FactManager &FM, const CapabilityExpr &CapE,
255 const FactEntry *Entry) {
256 return replaceLock(FM, findLockIter(FM, CapE), Entry);
257 }
258
259 iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) {
260 return llvm::find_if(*this,
261 [&](FactID ID) { return FM[ID].matches(CapE); });
262 }
263
264 const FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const {
265 auto I =
266 llvm::find_if(*this, [&](FactID ID) { return FM[ID].matches(CapE); });
267 return I != end() ? &FM[*I] : nullptr;
268 }
269
270 const FactEntry *findLockUniv(FactManager &FM,
271 const CapabilityExpr &CapE) const {
272 auto I = llvm::find_if(
273 *this, [&](FactID ID) -> bool { return FM[ID].matchesUniv(CapE); });
274 return I != end() ? &FM[*I] : nullptr;
275 }
276
277 const FactEntry *findPartialMatch(FactManager &FM,
278 const CapabilityExpr &CapE) const {
279 auto I = llvm::find_if(*this, [&](FactID ID) -> bool {
280 return FM[ID].partiallyMatches(CapE);
281 });
282 return I != end() ? &FM[*I] : nullptr;
283 }
284
285 bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const {
286 auto I = llvm::find_if(
287 *this, [&](FactID ID) -> bool { return FM[ID].valueDecl() == Vd; });
288 return I != end();
289 }
290};
291
292class ThreadSafetyAnalyzer;
293
294} // namespace
295
296namespace clang {
297namespace threadSafety {
298
300private:
301 using BeforeVect = SmallVector<const ValueDecl *, 4>;
302
303 struct BeforeInfo {
304 BeforeVect Vect;
305 int Visited = 0;
306
307 BeforeInfo() = default;
308 BeforeInfo(BeforeInfo &&) = default;
309 };
310
311 using BeforeMap =
312 llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>;
313 using CycleMap = llvm::DenseMap<const ValueDecl *, bool>;
314
315public:
316 BeforeSet() = default;
317
318 BeforeInfo* insertAttrExprs(const ValueDecl* Vd,
319 ThreadSafetyAnalyzer& Analyzer);
320
321 BeforeInfo *getBeforeInfoForDecl(const ValueDecl *Vd,
322 ThreadSafetyAnalyzer &Analyzer);
323
324 void checkBeforeAfter(const ValueDecl* Vd,
325 const FactSet& FSet,
326 ThreadSafetyAnalyzer& Analyzer,
327 SourceLocation Loc, StringRef CapKind);
328
329private:
330 BeforeMap BMap;
331 CycleMap CycMap;
332};
333
334} // namespace threadSafety
335} // namespace clang
336
337namespace {
338
339class LocalVariableMap;
340
341using LocalVarContext = llvm::ImmutableMap<const NamedDecl *, unsigned>;
342
343/// A side (entry or exit) of a CFG node.
344enum CFGBlockSide { CBS_Entry, CBS_Exit };
345
346/// CFGBlockInfo is a struct which contains all the information that is
347/// maintained for each block in the CFG. See LocalVariableMap for more
348/// information about the contexts.
349struct CFGBlockInfo {
350 // Lockset held at entry to block
351 FactSet EntrySet;
352
353 // Lockset held at exit from block
354 FactSet ExitSet;
355
356 // Context held at entry to block
357 LocalVarContext EntryContext;
358
359 // Context held at exit from block
360 LocalVarContext ExitContext;
361
362 // Location of first statement in block
363 SourceLocation EntryLoc;
364
365 // Location of last statement in block.
366 SourceLocation ExitLoc;
367
368 // Used to replay contexts later
369 unsigned EntryIndex;
370
371 // Is this block reachable?
372 bool Reachable = false;
373
374 const FactSet &getSet(CFGBlockSide Side) const {
375 return Side == CBS_Entry ? EntrySet : ExitSet;
376 }
377
378 SourceLocation getLocation(CFGBlockSide Side) const {
379 return Side == CBS_Entry ? EntryLoc : ExitLoc;
380 }
381
382private:
383 CFGBlockInfo(LocalVarContext EmptyCtx)
384 : EntryContext(EmptyCtx), ExitContext(EmptyCtx) {}
385
386public:
387 static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M);
388};
389
390// A LocalVariableMap maintains a map from local variables to their currently
391// valid definitions. It provides SSA-like functionality when traversing the
392// CFG. Like SSA, each definition or assignment to a variable is assigned a
393// unique name (an integer), which acts as the SSA name for that definition.
394// The total set of names is shared among all CFG basic blocks.
395// Unlike SSA, we do not rewrite expressions to replace local variables declrefs
396// with their SSA-names. Instead, we compute a Context for each point in the
397// code, which maps local variables to the appropriate SSA-name. This map
398// changes with each assignment.
399//
400// The map is computed in a single pass over the CFG. Subsequent analyses can
401// then query the map to find the appropriate Context for a statement, and use
402// that Context to look up the definitions of variables.
403class LocalVariableMap {
404public:
405 using Context = LocalVarContext;
406
407 /// A VarDefinition consists of an expression, representing the value of the
408 /// variable, along with the context in which that expression should be
409 /// interpreted. A reference VarDefinition does not itself contain this
410 /// information, but instead contains a pointer to a previous VarDefinition.
411 struct VarDefinition {
412 public:
413 friend class LocalVariableMap;
414
415 // The original declaration for this variable.
416 const NamedDecl *Dec;
417
418 // The expression for this variable, OR
419 const Expr *Exp = nullptr;
420
421 // Reference to another VarDefinition
422 unsigned Ref = 0;
423
424 // The map with which Exp should be interpreted.
425 Context Ctx;
426
427 bool isReference() const { return !Exp; }
428
429 private:
430 // Create ordinary variable definition
431 VarDefinition(const NamedDecl *D, const Expr *E, Context C)
432 : Dec(D), Exp(E), Ctx(C) {}
433
434 // Create reference to previous definition
435 VarDefinition(const NamedDecl *D, unsigned R, Context C)
436 : Dec(D), Ref(R), Ctx(C) {}
437 };
438
439private:
440 Context::Factory ContextFactory;
441 std::vector<VarDefinition> VarDefinitions;
442 std::vector<std::pair<const Stmt *, Context>> SavedContexts;
443
444public:
445 LocalVariableMap() {
446 // index 0 is a placeholder for undefined variables (aka phi-nodes).
447 VarDefinitions.push_back(VarDefinition(nullptr, 0u, getEmptyContext()));
448 }
449
450 /// Look up a definition, within the given context.
451 const VarDefinition* lookup(const NamedDecl *D, Context Ctx) {
452 const unsigned *i = Ctx.lookup(D);
453 if (!i)
454 return nullptr;
455 assert(*i < VarDefinitions.size());
456 return &VarDefinitions[*i];
457 }
458
459 /// Look up the definition for D within the given context. Returns
460 /// NULL if the expression is not statically known. If successful, also
461 /// modifies Ctx to hold the context of the return Expr.
462 const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) {
463 const unsigned *P = Ctx.lookup(D);
464 if (!P)
465 return nullptr;
466
467 unsigned i = *P;
468 while (i > 0) {
469 if (VarDefinitions[i].Exp) {
470 Ctx = VarDefinitions[i].Ctx;
471 return VarDefinitions[i].Exp;
472 }
473 i = VarDefinitions[i].Ref;
474 }
475 return nullptr;
476 }
477
478 Context getEmptyContext() { return ContextFactory.getEmptyMap(); }
479
480 /// Return the next context after processing S. This function is used by
481 /// clients of the class to get the appropriate context when traversing the
482 /// CFG. It must be called for every assignment or DeclStmt.
483 Context getNextContext(unsigned &CtxIndex, const Stmt *S, Context C) {
484 if (SavedContexts[CtxIndex+1].first == S) {
485 CtxIndex++;
486 Context Result = SavedContexts[CtxIndex].second;
487 return Result;
488 }
489 return C;
490 }
491
492 void dumpVarDefinitionName(unsigned i) {
493 if (i == 0) {
494 llvm::errs() << "Undefined";
495 return;
496 }
497 const NamedDecl *Dec = VarDefinitions[i].Dec;
498 if (!Dec) {
499 llvm::errs() << "<<NULL>>";
500 return;
501 }
502 Dec->printName(llvm::errs());
503 llvm::errs() << "." << i << " " << ((const void*) Dec);
504 }
505
506 /// Dumps an ASCII representation of the variable map to llvm::errs()
507 void dump() {
508 for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
509 const Expr *Exp = VarDefinitions[i].Exp;
510 unsigned Ref = VarDefinitions[i].Ref;
511
512 dumpVarDefinitionName(i);
513 llvm::errs() << " = ";
514 if (Exp) Exp->dump();
515 else {
516 dumpVarDefinitionName(Ref);
517 llvm::errs() << "\n";
518 }
519 }
520 }
521
522 /// Dumps an ASCII representation of a Context to llvm::errs()
523 void dumpContext(Context C) {
524 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
525 const NamedDecl *D = I.getKey();
526 D->printName(llvm::errs());
527 llvm::errs() << " -> ";
528 dumpVarDefinitionName(I.getData());
529 llvm::errs() << "\n";
530 }
531 }
532
533 /// Builds the variable map.
534 void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph,
535 std::vector<CFGBlockInfo> &BlockInfo);
536
537protected:
538 friend class VarMapBuilder;
539
540 // Get the current context index
541 unsigned getContextIndex() { return SavedContexts.size()-1; }
542
543 // Save the current context for later replay
544 void saveContext(const Stmt *S, Context C) {
545 SavedContexts.push_back(std::make_pair(S, C));
546 }
547
548 // Adds a new definition to the given context, and returns a new context.
549 // This method should be called when declaring a new variable.
550 Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) {
551 assert(!Ctx.contains(D));
552 unsigned newID = VarDefinitions.size();
553 Context NewCtx = ContextFactory.add(Ctx, D, newID);
554 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
555 return NewCtx;
556 }
557
558 // Add a new reference to an existing definition.
559 Context addReference(const NamedDecl *D, unsigned i, Context Ctx) {
560 unsigned newID = VarDefinitions.size();
561 Context NewCtx = ContextFactory.add(Ctx, D, newID);
562 VarDefinitions.push_back(VarDefinition(D, i, Ctx));
563 return NewCtx;
564 }
565
566 // Updates a definition only if that definition is already in the map.
567 // This method should be called when assigning to an existing variable.
568 Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) {
569 if (Ctx.contains(D)) {
570 unsigned newID = VarDefinitions.size();
571 Context NewCtx = ContextFactory.remove(Ctx, D);
572 NewCtx = ContextFactory.add(NewCtx, D, newID);
573 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
574 return NewCtx;
575 }
576 return Ctx;
577 }
578
579 // Removes a definition from the context, but keeps the variable name
580 // as a valid variable. The index 0 is a placeholder for cleared definitions.
581 Context clearDefinition(const NamedDecl *D, Context Ctx) {
582 Context NewCtx = Ctx;
583 if (NewCtx.contains(D)) {
584 NewCtx = ContextFactory.remove(NewCtx, D);
585 NewCtx = ContextFactory.add(NewCtx, D, 0);
586 }
587 return NewCtx;
588 }
589
590 // Remove a definition entirely frmo the context.
591 Context removeDefinition(const NamedDecl *D, Context Ctx) {
592 Context NewCtx = Ctx;
593 if (NewCtx.contains(D)) {
594 NewCtx = ContextFactory.remove(NewCtx, D);
595 }
596 return NewCtx;
597 }
598
599 Context intersectContexts(Context C1, Context C2);
600 Context createReferenceContext(Context C);
601 void intersectBackEdge(Context C1, Context C2);
602};
603
604} // namespace
605
606// This has to be defined after LocalVariableMap.
607CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) {
608 return CFGBlockInfo(M.getEmptyContext());
609}
610
611namespace {
612
613/// Visitor which builds a LocalVariableMap
614class VarMapBuilder : public ConstStmtVisitor<VarMapBuilder> {
615public:
616 LocalVariableMap* VMap;
617 LocalVariableMap::Context Ctx;
618
619 VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
620 : VMap(VM), Ctx(C) {}
621
622 void VisitDeclStmt(const DeclStmt *S);
623 void VisitBinaryOperator(const BinaryOperator *BO);
624};
625
626} // namespace
627
628// Add new local variables to the variable map
629void VarMapBuilder::VisitDeclStmt(const DeclStmt *S) {
630 bool modifiedCtx = false;
631 const DeclGroupRef DGrp = S->getDeclGroup();
632 for (const auto *D : DGrp) {
633 if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) {
634 const Expr *E = VD->getInit();
635
636 // Add local variables with trivial type to the variable map
637 QualType T = VD->getType();
638 if (T.isTrivialType(VD->getASTContext())) {
639 Ctx = VMap->addDefinition(VD, E, Ctx);
640 modifiedCtx = true;
641 }
642 }
643 }
644 if (modifiedCtx)
645 VMap->saveContext(S, Ctx);
646}
647
648// Update local variable definitions in variable map
649void VarMapBuilder::VisitBinaryOperator(const BinaryOperator *BO) {
650 if (!BO->isAssignmentOp())
651 return;
652
653 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
654
655 // Update the variable map and current context.
656 if (const auto *DRE = dyn_cast<DeclRefExpr>(LHSExp)) {
657 const ValueDecl *VDec = DRE->getDecl();
658 if (Ctx.lookup(VDec)) {
659 if (BO->getOpcode() == BO_Assign)
660 Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx);
661 else
662 // FIXME -- handle compound assignment operators
663 Ctx = VMap->clearDefinition(VDec, Ctx);
664 VMap->saveContext(BO, Ctx);
665 }
666 }
667}
668
669// Computes the intersection of two contexts. The intersection is the
670// set of variables which have the same definition in both contexts;
671// variables with different definitions are discarded.
672LocalVariableMap::Context
673LocalVariableMap::intersectContexts(Context C1, Context C2) {
674 Context Result = C1;
675 for (const auto &P : C1) {
676 const NamedDecl *Dec = P.first;
677 const unsigned *i2 = C2.lookup(Dec);
678 if (!i2) // variable doesn't exist on second path
679 Result = removeDefinition(Dec, Result);
680 else if (*i2 != P.second) // variable exists, but has different definition
681 Result = clearDefinition(Dec, Result);
682 }
683 return Result;
684}
685
686// For every variable in C, create a new variable that refers to the
687// definition in C. Return a new context that contains these new variables.
688// (We use this for a naive implementation of SSA on loop back-edges.)
689LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
690 Context Result = getEmptyContext();
691 for (const auto &P : C)
692 Result = addReference(P.first, P.second, Result);
693 return Result;
694}
695
696// This routine also takes the intersection of C1 and C2, but it does so by
697// altering the VarDefinitions. C1 must be the result of an earlier call to
698// createReferenceContext.
699void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
700 for (const auto &P : C1) {
701 unsigned i1 = P.second;
702 VarDefinition *VDef = &VarDefinitions[i1];
703 assert(VDef->isReference());
704
705 const unsigned *i2 = C2.lookup(P.first);
706 if (!i2 || (*i2 != i1))
707 VDef->Ref = 0; // Mark this variable as undefined
708 }
709}
710
711// Traverse the CFG in topological order, so all predecessors of a block
712// (excluding back-edges) are visited before the block itself. At
713// each point in the code, we calculate a Context, which holds the set of
714// variable definitions which are visible at that point in execution.
715// Visible variables are mapped to their definitions using an array that
716// contains all definitions.
717//
718// At join points in the CFG, the set is computed as the intersection of
719// the incoming sets along each edge, E.g.
720//
721// { Context | VarDefinitions }
722// int x = 0; { x -> x1 | x1 = 0 }
723// int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
724// if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... }
725// else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... }
726// ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... }
727//
728// This is essentially a simpler and more naive version of the standard SSA
729// algorithm. Those definitions that remain in the intersection are from blocks
730// that strictly dominate the current block. We do not bother to insert proper
731// phi nodes, because they are not used in our analysis; instead, wherever
732// a phi node would be required, we simply remove that definition from the
733// context (E.g. x above).
734//
735// The initial traversal does not capture back-edges, so those need to be
736// handled on a separate pass. Whenever the first pass encounters an
737// incoming back edge, it duplicates the context, creating new definitions
738// that refer back to the originals. (These correspond to places where SSA
739// might have to insert a phi node.) On the second pass, these definitions are
740// set to NULL if the variable has changed on the back-edge (i.e. a phi
741// node was actually required.) E.g.
742//
743// { Context | VarDefinitions }
744// int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
745// while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; }
746// x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... }
747// ... { y -> y1 | x3 = 2, x2 = 1, ... }
748void LocalVariableMap::traverseCFG(CFG *CFGraph,
749 const PostOrderCFGView *SortedGraph,
750 std::vector<CFGBlockInfo> &BlockInfo) {
751 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
752
753 for (const auto *CurrBlock : *SortedGraph) {
754 unsigned CurrBlockID = CurrBlock->getBlockID();
755 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
756
757 VisitedBlocks.insert(CurrBlock);
758
759 // Calculate the entry context for the current block
760 bool HasBackEdges = false;
761 bool CtxInit = true;
762 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
763 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
764 // if *PI -> CurrBlock is a back edge, so skip it
765 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) {
766 HasBackEdges = true;
767 continue;
768 }
769
770 unsigned PrevBlockID = (*PI)->getBlockID();
771 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
772
773 if (CtxInit) {
774 CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext;
775 CtxInit = false;
776 }
777 else {
778 CurrBlockInfo->EntryContext =
779 intersectContexts(CurrBlockInfo->EntryContext,
780 PrevBlockInfo->ExitContext);
781 }
782 }
783
784 // Duplicate the context if we have back-edges, so we can call
785 // intersectBackEdges later.
786 if (HasBackEdges)
787 CurrBlockInfo->EntryContext =
788 createReferenceContext(CurrBlockInfo->EntryContext);
789
790 // Create a starting context index for the current block
791 saveContext(nullptr, CurrBlockInfo->EntryContext);
792 CurrBlockInfo->EntryIndex = getContextIndex();
793
794 // Visit all the statements in the basic block.
795 VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext);
796 for (const auto &BI : *CurrBlock) {
797 switch (BI.getKind()) {
799 CFGStmt CS = BI.castAs<CFGStmt>();
800 VMapBuilder.Visit(CS.getStmt());
801 break;
802 }
803 default:
804 break;
805 }
806 }
807 CurrBlockInfo->ExitContext = VMapBuilder.Ctx;
808
809 // Mark variables on back edges as "unknown" if they've been changed.
810 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
811 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
812 // if CurrBlock -> *SI is *not* a back edge
813 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
814 continue;
815
816 CFGBlock *FirstLoopBlock = *SI;
817 Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext;
818 Context LoopEnd = CurrBlockInfo->ExitContext;
819 intersectBackEdge(LoopBegin, LoopEnd);
820 }
821 }
822
823 // Put an extra entry at the end of the indexed context array
824 unsigned exitID = CFGraph->getExit().getBlockID();
825 saveContext(nullptr, BlockInfo[exitID].ExitContext);
826}
827
828/// Find the appropriate source locations to use when producing diagnostics for
829/// each block in the CFG.
830static void findBlockLocations(CFG *CFGraph,
831 const PostOrderCFGView *SortedGraph,
832 std::vector<CFGBlockInfo> &BlockInfo) {
833 for (const auto *CurrBlock : *SortedGraph) {
834 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()];
835
836 // Find the source location of the last statement in the block, if the
837 // block is not empty.
838 if (const Stmt *S = CurrBlock->getTerminatorStmt()) {
839 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getBeginLoc();
840 } else {
841 for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
842 BE = CurrBlock->rend(); BI != BE; ++BI) {
843 // FIXME: Handle other CFGElement kinds.
844 if (std::optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
845 CurrBlockInfo->ExitLoc = CS->getStmt()->getBeginLoc();
846 break;
847 }
848 }
849 }
850
851 if (CurrBlockInfo->ExitLoc.isValid()) {
852 // This block contains at least one statement. Find the source location
853 // of the first statement in the block.
854 for (const auto &BI : *CurrBlock) {
855 // FIXME: Handle other CFGElement kinds.
856 if (std::optional<CFGStmt> CS = BI.getAs<CFGStmt>()) {
857 CurrBlockInfo->EntryLoc = CS->getStmt()->getBeginLoc();
858 break;
859 }
860 }
861 } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() &&
862 CurrBlock != &CFGraph->getExit()) {
863 // The block is empty, and has a single predecessor. Use its exit
864 // location.
865 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
866 BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc;
867 } else if (CurrBlock->succ_size() == 1 && *CurrBlock->succ_begin()) {
868 // The block is empty, and has a single successor. Use its entry
869 // location.
870 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
871 BlockInfo[(*CurrBlock->succ_begin())->getBlockID()].EntryLoc;
872 }
873 }
874}
875
876namespace {
877
878class LockableFactEntry final : public FactEntry {
879private:
880 /// Reentrancy depth: incremented when a capability has been acquired
881 /// reentrantly (after initial acquisition). Always 0 for non-reentrant
882 /// capabilities.
883 unsigned int ReentrancyDepth = 0;
884
885 LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
886 SourceKind Src)
887 : FactEntry(Lockable, CE, LK, Loc, Src) {}
888
889public:
890 static LockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
891 const LockableFactEntry &Other) {
892 return new (Alloc) LockableFactEntry(Other);
893 }
894
895 static LockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
896 const CapabilityExpr &CE, LockKind LK,
897 SourceLocation Loc,
898 SourceKind Src = Acquired) {
899 return new (Alloc) LockableFactEntry(CE, LK, Loc, Src);
900 }
901
902 unsigned int getReentrancyDepth() const { return ReentrancyDepth; }
903
904 void
905 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
906 SourceLocation JoinLoc, LockErrorKind LEK,
907 ThreadSafetyHandler &Handler) const override {
908 if (!asserted() && !negative() && !isUniversal()) {
909 Handler.handleMutexHeldEndOfScope(getKind(), toString(), loc(), JoinLoc,
910 LEK);
911 }
912 }
913
914 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
915 ThreadSafetyHandler &Handler) const override {
916 if (const FactEntry *RFact = tryReenter(FactMan, entry.kind())) {
917 // This capability has been reentrantly acquired.
918 FSet.replaceLock(FactMan, entry, RFact);
919 } else {
920 Handler.handleDoubleLock(entry.getKind(), entry.toString(), loc(),
921 entry.loc());
922 }
923 }
924
925 void handleUnlock(FactSet &FSet, FactManager &FactMan,
926 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
927 bool FullyRemove,
928 ThreadSafetyHandler &Handler) const override {
929 FSet.removeLock(FactMan, Cp);
930
931 if (const FactEntry *RFact = leaveReentrant(FactMan)) {
932 // This capability remains reentrantly acquired.
933 FSet.addLock(FactMan, RFact);
934 } else if (!Cp.negative()) {
935 FSet.addLock(FactMan, FactMan.createFact<LockableFactEntry>(
936 !Cp, LK_Exclusive, UnlockLoc));
937 }
938 }
939
940 // Return an updated FactEntry if we can acquire this capability reentrant,
941 // nullptr otherwise.
942 const FactEntry *tryReenter(FactManager &FactMan,
943 LockKind ReenterKind) const {
944 if (!reentrant())
945 return nullptr;
946 if (kind() != ReenterKind)
947 return nullptr;
948 auto *NewFact = FactMan.createFact<LockableFactEntry>(*this);
949 NewFact->ReentrancyDepth++;
950 return NewFact;
951 }
952
953 // Return an updated FactEntry if we are releasing a capability previously
954 // acquired reentrant, nullptr otherwise.
955 const FactEntry *leaveReentrant(FactManager &FactMan) const {
956 if (!ReentrancyDepth)
957 return nullptr;
958 assert(reentrant());
959 auto *NewFact = FactMan.createFact<LockableFactEntry>(*this);
960 NewFact->ReentrancyDepth--;
961 return NewFact;
962 }
963
964 static bool classof(const FactEntry *A) {
965 return A->getFactEntryKind() == Lockable;
966 }
967};
968
969enum UnderlyingCapabilityKind {
970 UCK_Acquired, ///< Any kind of acquired capability.
971 UCK_ReleasedShared, ///< Shared capability that was released.
972 UCK_ReleasedExclusive, ///< Exclusive capability that was released.
973};
974
975struct UnderlyingCapability {
976 CapabilityExpr Cap;
977 UnderlyingCapabilityKind Kind;
978};
979
980class ScopedLockableFactEntry final
981 : public FactEntry,
982 private llvm::TrailingObjects<ScopedLockableFactEntry,
983 UnderlyingCapability> {
984 friend TrailingObjects;
985
986private:
987 const unsigned ManagedCapacity;
988 unsigned ManagedSize = 0;
989
990 ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc,
991 SourceKind Src, unsigned ManagedCapacity)
992 : FactEntry(ScopedLockable, CE, LK_Exclusive, Loc, Src),
993 ManagedCapacity(ManagedCapacity) {}
994
995 void addManaged(const CapabilityExpr &M, UnderlyingCapabilityKind UCK) {
996 assert(ManagedSize < ManagedCapacity);
997 new (getTrailingObjects() + ManagedSize) UnderlyingCapability{M, UCK};
998 ++ManagedSize;
999 }
1000
1001 ArrayRef<UnderlyingCapability> getManaged() const {
1002 return getTrailingObjects(ManagedSize);
1003 }
1004
1005public:
1006 static ScopedLockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
1007 const CapabilityExpr &CE,
1008 SourceLocation Loc, SourceKind Src,
1009 unsigned ManagedCapacity) {
1010 void *Storage =
1011 Alloc.Allocate(totalSizeToAlloc<UnderlyingCapability>(ManagedCapacity),
1012 alignof(ScopedLockableFactEntry));
1013 return new (Storage) ScopedLockableFactEntry(CE, Loc, Src, ManagedCapacity);
1014 }
1015
1016 CapExprSet getUnderlyingMutexes() const {
1017 CapExprSet UnderlyingMutexesSet;
1018 for (const UnderlyingCapability &UnderlyingMutex : getManaged())
1019 UnderlyingMutexesSet.push_back(UnderlyingMutex.Cap);
1020 return UnderlyingMutexesSet;
1021 }
1022
1023 /// \name Adding managed locks
1024 /// Capacity for managed locks must have been allocated via \ref create.
1025 /// There is no reallocation in case the capacity is exceeded!
1026 /// \{
1027 void addLock(const CapabilityExpr &M) { addManaged(M, UCK_Acquired); }
1028
1029 void addExclusiveUnlock(const CapabilityExpr &M) {
1030 addManaged(M, UCK_ReleasedExclusive);
1031 }
1032
1033 void addSharedUnlock(const CapabilityExpr &M) {
1034 addManaged(M, UCK_ReleasedShared);
1035 }
1036 /// \}
1037
1038 void
1039 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
1040 SourceLocation JoinLoc, LockErrorKind LEK,
1041 ThreadSafetyHandler &Handler) const override {
1043 return;
1044
1045 for (const auto &UnderlyingMutex : getManaged()) {
1046 const auto *Entry = FSet.findLock(FactMan, UnderlyingMutex.Cap);
1047 if ((UnderlyingMutex.Kind == UCK_Acquired && Entry) ||
1048 (UnderlyingMutex.Kind != UCK_Acquired && !Entry)) {
1049 // If this scoped lock manages another mutex, and if the underlying
1050 // mutex is still/not held, then warn about the underlying mutex.
1051 Handler.handleMutexHeldEndOfScope(UnderlyingMutex.Cap.getKind(),
1052 UnderlyingMutex.Cap.toString(), loc(),
1053 JoinLoc, LEK);
1054 }
1055 }
1056 }
1057
1058 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
1059 ThreadSafetyHandler &Handler) const override {
1060 for (const auto &UnderlyingMutex : getManaged()) {
1061 if (UnderlyingMutex.Kind == UCK_Acquired)
1062 lock(FSet, FactMan, UnderlyingMutex.Cap, entry.kind(), entry.loc(),
1063 &Handler);
1064 else
1065 unlock(FSet, FactMan, UnderlyingMutex.Cap, entry.loc(), &Handler);
1066 }
1067 }
1068
1069 void handleUnlock(FactSet &FSet, FactManager &FactMan,
1070 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
1071 bool FullyRemove,
1072 ThreadSafetyHandler &Handler) const override {
1073 assert(!Cp.negative() && "Managing object cannot be negative.");
1074 for (const auto &UnderlyingMutex : getManaged()) {
1075 // Remove/lock the underlying mutex if it exists/is still unlocked; warn
1076 // on double unlocking/locking if we're not destroying the scoped object.
1077 ThreadSafetyHandler *TSHandler = FullyRemove ? nullptr : &Handler;
1078 if (UnderlyingMutex.Kind == UCK_Acquired) {
1079 unlock(FSet, FactMan, UnderlyingMutex.Cap, UnlockLoc, TSHandler);
1080 } else {
1081 LockKind kind = UnderlyingMutex.Kind == UCK_ReleasedShared
1082 ? LK_Shared
1083 : LK_Exclusive;
1084 lock(FSet, FactMan, UnderlyingMutex.Cap, kind, UnlockLoc, TSHandler);
1085 }
1086 }
1087 if (FullyRemove)
1088 FSet.removeLock(FactMan, Cp);
1089 }
1090
1091 static bool classof(const FactEntry *A) {
1092 return A->getFactEntryKind() == ScopedLockable;
1093 }
1094
1095private:
1096 void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
1097 LockKind kind, SourceLocation loc,
1098 ThreadSafetyHandler *Handler) const {
1099 if (const auto It = FSet.findLockIter(FactMan, Cp); It != FSet.end()) {
1100 const auto &Fact = cast<LockableFactEntry>(FactMan[*It]);
1101 if (const FactEntry *RFact = Fact.tryReenter(FactMan, kind)) {
1102 // This capability has been reentrantly acquired.
1103 FSet.replaceLock(FactMan, It, RFact);
1104 } else if (Handler) {
1105 Handler->handleDoubleLock(Cp.getKind(), Cp.toString(), Fact.loc(), loc);
1106 }
1107 } else {
1108 FSet.removeLock(FactMan, !Cp);
1109 FSet.addLock(FactMan, FactMan.createFact<LockableFactEntry>(Cp, kind, loc,
1110 Managed));
1111 }
1112 }
1113
1114 void unlock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
1115 SourceLocation loc, ThreadSafetyHandler *Handler) const {
1116 if (const auto It = FSet.findLockIter(FactMan, Cp); It != FSet.end()) {
1117 const auto &Fact = cast<LockableFactEntry>(FactMan[*It]);
1118 if (const FactEntry *RFact = Fact.leaveReentrant(FactMan)) {
1119 // This capability remains reentrantly acquired.
1120 FSet.replaceLock(FactMan, It, RFact);
1121 return;
1122 }
1123
1124 FSet.replaceLock(
1125 FactMan, It,
1126 FactMan.createFact<LockableFactEntry>(!Cp, LK_Exclusive, loc));
1127 } else if (Handler) {
1128 SourceLocation PrevLoc;
1129 if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
1130 PrevLoc = Neg->loc();
1131 Handler->handleUnmatchedUnlock(Cp.getKind(), Cp.toString(), loc, PrevLoc);
1132 }
1133 }
1134};
1135
1136/// Class which implements the core thread safety analysis routines.
1137class ThreadSafetyAnalyzer {
1138 friend class BuildLockset;
1139 friend class threadSafety::BeforeSet;
1140
1141 llvm::BumpPtrAllocator Bpa;
1142 threadSafety::til::MemRegionRef Arena;
1143 threadSafety::SExprBuilder SxBuilder;
1144
1145 ThreadSafetyHandler &Handler;
1146 const FunctionDecl *CurrentFunction;
1147 LocalVariableMap LocalVarMap;
1148 // Maps constructed objects to `this` placeholder prior to initialization.
1149 llvm::SmallDenseMap<const Expr *, til::LiteralPtr *> ConstructedObjects;
1150 FactManager FactMan;
1151 std::vector<CFGBlockInfo> BlockInfo;
1152
1153 BeforeSet *GlobalBeforeSet;
1154
1155public:
1156 ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet *Bset)
1157 : Arena(&Bpa), SxBuilder(Arena), Handler(H), FactMan(Bpa),
1158 GlobalBeforeSet(Bset) {}
1159
1160 bool inCurrentScope(const CapabilityExpr &CapE);
1161
1162 void addLock(FactSet &FSet, const FactEntry *Entry, bool ReqAttr = false);
1163 void removeLock(FactSet &FSet, const CapabilityExpr &CapE,
1164 SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind);
1165
1166 template <typename AttrType>
1167 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1168 const NamedDecl *D, til::SExpr *Self = nullptr);
1169
1170 template <class AttrType>
1171 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1172 const NamedDecl *D,
1173 const CFGBlock *PredBlock, const CFGBlock *CurrBlock,
1174 Expr *BrE, bool Neg);
1175
1176 const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C,
1177 bool &Negate);
1178
1179 void getEdgeLockset(FactSet &Result, const FactSet &ExitSet,
1180 const CFGBlock* PredBlock,
1181 const CFGBlock *CurrBlock);
1182
1183 bool join(const FactEntry &A, const FactEntry &B, SourceLocation JoinLoc,
1184 LockErrorKind EntryLEK);
1185
1186 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1187 SourceLocation JoinLoc, LockErrorKind EntryLEK,
1188 LockErrorKind ExitLEK);
1189
1190 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1191 SourceLocation JoinLoc, LockErrorKind LEK) {
1192 intersectAndWarn(EntrySet, ExitSet, JoinLoc, LEK, LEK);
1193 }
1194
1195 void runAnalysis(AnalysisDeclContext &AC);
1196
1197 void warnIfMutexNotHeld(const FactSet &FSet, const NamedDecl *D,
1198 const Expr *Exp, AccessKind AK, Expr *MutexExp,
1199 ProtectedOperationKind POK, til::LiteralPtr *Self,
1200 SourceLocation Loc);
1201 void warnIfMutexHeld(const FactSet &FSet, const NamedDecl *D, const Expr *Exp,
1202 Expr *MutexExp, til::LiteralPtr *Self,
1203 SourceLocation Loc);
1204
1205 void checkAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
1207 void checkPtAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
1209};
1210
1211} // namespace
1212
1213/// Process acquired_before and acquired_after attributes on Vd.
1214BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
1215 ThreadSafetyAnalyzer& Analyzer) {
1216 // Create a new entry for Vd.
1217 BeforeInfo *Info = nullptr;
1218 {
1219 // Keep InfoPtr in its own scope in case BMap is modified later and the
1220 // reference becomes invalid.
1221 std::unique_ptr<BeforeInfo> &InfoPtr = BMap[Vd];
1222 if (!InfoPtr)
1223 InfoPtr.reset(new BeforeInfo());
1224 Info = InfoPtr.get();
1225 }
1226
1227 for (const auto *At : Vd->attrs()) {
1228 switch (At->getKind()) {
1229 case attr::AcquiredBefore: {
1230 const auto *A = cast<AcquiredBeforeAttr>(At);
1231
1232 // Read exprs from the attribute, and add them to BeforeVect.
1233 for (const auto *Arg : A->args()) {
1234 CapabilityExpr Cp =
1235 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
1236 if (const ValueDecl *Cpvd = Cp.valueDecl()) {
1237 Info->Vect.push_back(Cpvd);
1238 const auto It = BMap.find(Cpvd);
1239 if (It == BMap.end())
1240 insertAttrExprs(Cpvd, Analyzer);
1241 }
1242 }
1243 break;
1244 }
1245 case attr::AcquiredAfter: {
1246 const auto *A = cast<AcquiredAfterAttr>(At);
1247
1248 // Read exprs from the attribute, and add them to BeforeVect.
1249 for (const auto *Arg : A->args()) {
1250 CapabilityExpr Cp =
1251 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
1252 if (const ValueDecl *ArgVd = Cp.valueDecl()) {
1253 // Get entry for mutex listed in attribute
1254 BeforeInfo *ArgInfo = getBeforeInfoForDecl(ArgVd, Analyzer);
1255 ArgInfo->Vect.push_back(Vd);
1256 }
1257 }
1258 break;
1259 }
1260 default:
1261 break;
1262 }
1263 }
1264
1265 return Info;
1266}
1267
1268BeforeSet::BeforeInfo *
1270 ThreadSafetyAnalyzer &Analyzer) {
1271 auto It = BMap.find(Vd);
1272 BeforeInfo *Info = nullptr;
1273 if (It == BMap.end())
1274 Info = insertAttrExprs(Vd, Analyzer);
1275 else
1276 Info = It->second.get();
1277 assert(Info && "BMap contained nullptr?");
1278 return Info;
1279}
1280
1281/// Return true if any mutexes in FSet are in the acquired_before set of Vd.
1283 const FactSet& FSet,
1284 ThreadSafetyAnalyzer& Analyzer,
1285 SourceLocation Loc, StringRef CapKind) {
1287
1288 // Do a depth-first traversal of Vd.
1289 // Return true if there are cycles.
1290 std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) {
1291 if (!Vd)
1292 return false;
1293
1294 BeforeSet::BeforeInfo *Info = getBeforeInfoForDecl(Vd, Analyzer);
1295
1296 if (Info->Visited == 1)
1297 return true;
1298
1299 if (Info->Visited == 2)
1300 return false;
1301
1302 if (Info->Vect.empty())
1303 return false;
1304
1305 InfoVect.push_back(Info);
1306 Info->Visited = 1;
1307 for (const auto *Vdb : Info->Vect) {
1308 // Exclude mutexes in our immediate before set.
1309 if (FSet.containsMutexDecl(Analyzer.FactMan, Vdb)) {
1310 StringRef L1 = StartVd->getName();
1311 StringRef L2 = Vdb->getName();
1312 Analyzer.Handler.handleLockAcquiredBefore(CapKind, L1, L2, Loc);
1313 }
1314 // Transitively search other before sets, and warn on cycles.
1315 if (traverse(Vdb)) {
1316 if (CycMap.try_emplace(Vd, true).second) {
1317 StringRef L1 = Vd->getName();
1318 Analyzer.Handler.handleBeforeAfterCycle(L1, Vd->getLocation());
1319 }
1320 }
1321 }
1322 Info->Visited = 2;
1323 return false;
1324 };
1325
1326 traverse(StartVd);
1327
1328 for (auto *Info : InfoVect)
1329 Info->Visited = 0;
1330}
1331
1332/// Gets the value decl pointer from DeclRefExprs or MemberExprs.
1333static const ValueDecl *getValueDecl(const Expr *Exp) {
1334 if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp))
1335 return getValueDecl(CE->getSubExpr());
1336
1337 if (const auto *DR = dyn_cast<DeclRefExpr>(Exp))
1338 return DR->getDecl();
1339
1340 if (const auto *ME = dyn_cast<MemberExpr>(Exp))
1341 return ME->getMemberDecl();
1342
1343 return nullptr;
1344}
1345
1346bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
1347 const threadSafety::til::SExpr *SExp = CapE.sexpr();
1348 assert(SExp && "Null expressions should be ignored");
1349
1350 if (const auto *LP = dyn_cast<til::LiteralPtr>(SExp)) {
1351 const ValueDecl *VD = LP->clangDecl();
1352 // Variables defined in a function are always inaccessible.
1353 if (!VD || !VD->isDefinedOutsideFunctionOrMethod())
1354 return false;
1355 // For now we consider static class members to be inaccessible.
1357 return false;
1358 // Global variables are always in scope.
1359 return true;
1360 }
1361
1362 // Members are in scope from methods of the same class.
1363 if (const auto *P = dyn_cast<til::Project>(SExp)) {
1364 if (!isa_and_nonnull<CXXMethodDecl>(CurrentFunction))
1365 return false;
1366 const ValueDecl *VD = P->clangDecl();
1367 return VD->getDeclContext() == CurrentFunction->getDeclContext();
1368 }
1369
1370 return false;
1371}
1372
1373/// Add a new lock to the lockset, warning if the lock is already there.
1374/// \param ReqAttr -- true if this is part of an initial Requires attribute.
1375void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const FactEntry *Entry,
1376 bool ReqAttr) {
1377 if (Entry->shouldIgnore())
1378 return;
1379
1380 if (!ReqAttr && !Entry->negative()) {
1381 // look for the negative capability, and remove it from the fact set.
1382 CapabilityExpr NegC = !*Entry;
1383 const FactEntry *Nen = FSet.findLock(FactMan, NegC);
1384 if (Nen) {
1385 FSet.removeLock(FactMan, NegC);
1386 }
1387 else {
1388 if (inCurrentScope(*Entry) && !Entry->asserted() && !Entry->reentrant())
1389 Handler.handleNegativeNotHeld(Entry->getKind(), Entry->toString(),
1390 NegC.toString(), Entry->loc());
1391 }
1392 }
1393
1394 // Check before/after constraints
1395 if (!Entry->asserted() && !Entry->declared()) {
1396 GlobalBeforeSet->checkBeforeAfter(Entry->valueDecl(), FSet, *this,
1397 Entry->loc(), Entry->getKind());
1398 }
1399
1400 if (const FactEntry *Cp = FSet.findLock(FactMan, *Entry)) {
1401 if (!Entry->asserted())
1402 Cp->handleLock(FSet, FactMan, *Entry, Handler);
1403 } else {
1404 FSet.addLock(FactMan, Entry);
1405 }
1406}
1407
1408/// Remove a lock from the lockset, warning if the lock is not there.
1409/// \param UnlockLoc The source location of the unlock (only used in error msg)
1410void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
1411 SourceLocation UnlockLoc,
1412 bool FullyRemove, LockKind ReceivedKind) {
1413 if (Cp.shouldIgnore())
1414 return;
1415
1416 const FactEntry *LDat = FSet.findLock(FactMan, Cp);
1417 if (!LDat) {
1418 SourceLocation PrevLoc;
1419 if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
1420 PrevLoc = Neg->loc();
1421 Handler.handleUnmatchedUnlock(Cp.getKind(), Cp.toString(), UnlockLoc,
1422 PrevLoc);
1423 return;
1424 }
1425
1426 // Generic lock removal doesn't care about lock kind mismatches, but
1427 // otherwise diagnose when the lock kinds are mismatched.
1428 if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) {
1429 Handler.handleIncorrectUnlockKind(Cp.getKind(), Cp.toString(), LDat->kind(),
1430 ReceivedKind, LDat->loc(), UnlockLoc);
1431 }
1432
1433 LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler);
1434}
1435
1436/// Extract the list of mutexIDs from the attribute on an expression,
1437/// and push them onto Mtxs, discarding any duplicates.
1438template <typename AttrType>
1439void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1440 const Expr *Exp, const NamedDecl *D,
1441 til::SExpr *Self) {
1442 if (Attr->args_size() == 0) {
1443 // The mutex held is the "this" object.
1444 CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, Self);
1445 if (Cp.isInvalid()) {
1446 warnInvalidLock(Handler, nullptr, D, Exp, Cp.getKind());
1447 return;
1448 }
1449 //else
1450 if (!Cp.shouldIgnore())
1451 Mtxs.push_back_nodup(Cp);
1452 return;
1453 }
1454
1455 for (const auto *Arg : Attr->args()) {
1456 CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, Self);
1457 if (Cp.isInvalid()) {
1458 warnInvalidLock(Handler, nullptr, D, Exp, Cp.getKind());
1459 continue;
1460 }
1461 //else
1462 if (!Cp.shouldIgnore())
1463 Mtxs.push_back_nodup(Cp);
1464 }
1465}
1466
1467/// Extract the list of mutexIDs from a trylock attribute. If the
1468/// trylock applies to the given edge, then push them onto Mtxs, discarding
1469/// any duplicates.
1470template <class AttrType>
1471void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1472 const Expr *Exp, const NamedDecl *D,
1473 const CFGBlock *PredBlock,
1474 const CFGBlock *CurrBlock,
1475 Expr *BrE, bool Neg) {
1476 // Find out which branch has the lock
1477 bool branch = false;
1478 if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE))
1479 branch = BLE->getValue();
1480 else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(BrE))
1481 branch = ILE->getValue().getBoolValue();
1482
1483 int branchnum = branch ? 0 : 1;
1484 if (Neg)
1485 branchnum = !branchnum;
1486
1487 // If we've taken the trylock branch, then add the lock
1488 int i = 0;
1489 for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
1490 SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
1491 if (*SI == CurrBlock && i == branchnum)
1492 getMutexIDs(Mtxs, Attr, Exp, D);
1493 }
1494}
1495
1496static bool getStaticBooleanValue(Expr *E, bool &TCond) {
1498 TCond = false;
1499 return true;
1500 } else if (const auto *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) {
1501 TCond = BLE->getValue();
1502 return true;
1503 } else if (const auto *ILE = dyn_cast<IntegerLiteral>(E)) {
1504 TCond = ILE->getValue().getBoolValue();
1505 return true;
1506 } else if (auto *CE = dyn_cast<ImplicitCastExpr>(E))
1507 return getStaticBooleanValue(CE->getSubExpr(), TCond);
1508 return false;
1509}
1510
1511// If Cond can be traced back to a function call, return the call expression.
1512// The negate variable should be called with false, and will be set to true
1513// if the function call is negated, e.g. if (!mu.tryLock(...))
1514const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
1515 LocalVarContext C,
1516 bool &Negate) {
1517 if (!Cond)
1518 return nullptr;
1519
1520 if (const auto *CallExp = dyn_cast<CallExpr>(Cond)) {
1521 if (CallExp->getBuiltinCallee() == Builtin::BI__builtin_expect)
1522 return getTrylockCallExpr(CallExp->getArg(0), C, Negate);
1523 return CallExp;
1524 }
1525 else if (const auto *PE = dyn_cast<ParenExpr>(Cond))
1526 return getTrylockCallExpr(PE->getSubExpr(), C, Negate);
1527 else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Cond))
1528 return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
1529 else if (const auto *FE = dyn_cast<FullExpr>(Cond))
1530 return getTrylockCallExpr(FE->getSubExpr(), C, Negate);
1531 else if (const auto *DRE = dyn_cast<DeclRefExpr>(Cond)) {
1532 const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
1533 return getTrylockCallExpr(E, C, Negate);
1534 }
1535 else if (const auto *UOP = dyn_cast<UnaryOperator>(Cond)) {
1536 if (UOP->getOpcode() == UO_LNot) {
1537 Negate = !Negate;
1538 return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
1539 }
1540 return nullptr;
1541 }
1542 else if (const auto *BOP = dyn_cast<BinaryOperator>(Cond)) {
1543 if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) {
1544 if (BOP->getOpcode() == BO_NE)
1545 Negate = !Negate;
1546
1547 bool TCond = false;
1548 if (getStaticBooleanValue(BOP->getRHS(), TCond)) {
1549 if (!TCond) Negate = !Negate;
1550 return getTrylockCallExpr(BOP->getLHS(), C, Negate);
1551 }
1552 TCond = false;
1553 if (getStaticBooleanValue(BOP->getLHS(), TCond)) {
1554 if (!TCond) Negate = !Negate;
1555 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1556 }
1557 return nullptr;
1558 }
1559 if (BOP->getOpcode() == BO_LAnd) {
1560 // LHS must have been evaluated in a different block.
1561 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1562 }
1563 if (BOP->getOpcode() == BO_LOr)
1564 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1565 return nullptr;
1566 } else if (const auto *COP = dyn_cast<ConditionalOperator>(Cond)) {
1567 bool TCond, FCond;
1568 if (getStaticBooleanValue(COP->getTrueExpr(), TCond) &&
1569 getStaticBooleanValue(COP->getFalseExpr(), FCond)) {
1570 if (TCond && !FCond)
1571 return getTrylockCallExpr(COP->getCond(), C, Negate);
1572 if (!TCond && FCond) {
1573 Negate = !Negate;
1574 return getTrylockCallExpr(COP->getCond(), C, Negate);
1575 }
1576 }
1577 }
1578 return nullptr;
1579}
1580
1581/// Find the lockset that holds on the edge between PredBlock
1582/// and CurrBlock. The edge set is the exit set of PredBlock (passed
1583/// as the ExitSet parameter) plus any trylocks, which are conditionally held.
1584void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
1585 const FactSet &ExitSet,
1586 const CFGBlock *PredBlock,
1587 const CFGBlock *CurrBlock) {
1588 Result = ExitSet;
1589
1590 const Stmt *Cond = PredBlock->getTerminatorCondition();
1591 // We don't acquire try-locks on ?: branches, only when its result is used.
1592 if (!Cond || isa<ConditionalOperator>(PredBlock->getTerminatorStmt()))
1593 return;
1594
1595 bool Negate = false;
1596 const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
1597 const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
1598
1599 const auto *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate);
1600 if (!Exp)
1601 return;
1602
1603 auto *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
1604 if (!FunDecl || !FunDecl->hasAttr<TryAcquireCapabilityAttr>())
1605 return;
1606
1607 CapExprSet ExclusiveLocksToAdd;
1608 CapExprSet SharedLocksToAdd;
1609
1610 // If the condition is a call to a Trylock function, then grab the attributes
1611 for (const auto *Attr : FunDecl->specific_attrs<TryAcquireCapabilityAttr>())
1612 getMutexIDs(Attr->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, Attr,
1613 Exp, FunDecl, PredBlock, CurrBlock, Attr->getSuccessValue(),
1614 Negate);
1615
1616 // Add and remove locks.
1617 SourceLocation Loc = Exp->getExprLoc();
1618 for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
1619 addLock(Result, FactMan.createFact<LockableFactEntry>(ExclusiveLockToAdd,
1620 LK_Exclusive, Loc));
1621 for (const auto &SharedLockToAdd : SharedLocksToAdd)
1622 addLock(Result, FactMan.createFact<LockableFactEntry>(SharedLockToAdd,
1623 LK_Shared, Loc));
1624}
1625
1626namespace {
1627
1628/// We use this class to visit different types of expressions in
1629/// CFGBlocks, and build up the lockset.
1630/// An expression may cause us to add or remove locks from the lockset, or else
1631/// output error messages related to missing locks.
1632/// FIXME: In future, we may be able to not inherit from a visitor.
1633class BuildLockset : public ConstStmtVisitor<BuildLockset> {
1634 friend class ThreadSafetyAnalyzer;
1635
1636 ThreadSafetyAnalyzer *Analyzer;
1637 FactSet FSet;
1638 // The fact set for the function on exit.
1639 const FactSet &FunctionExitFSet;
1640 LocalVariableMap::Context LVarCtx;
1641 unsigned CtxIndex;
1642
1643 // helper functions
1644
1645 void checkAccess(const Expr *Exp, AccessKind AK,
1647 Analyzer->checkAccess(FSet, Exp, AK, POK);
1648 }
1649 void checkPtAccess(const Expr *Exp, AccessKind AK,
1651 Analyzer->checkPtAccess(FSet, Exp, AK, POK);
1652 }
1653
1654 void handleCall(const Expr *Exp, const NamedDecl *D,
1655 til::LiteralPtr *Self = nullptr,
1656 SourceLocation Loc = SourceLocation());
1657 void examineArguments(const FunctionDecl *FD,
1660 bool SkipFirstParam = false);
1661
1662public:
1663 BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info,
1664 const FactSet &FunctionExitFSet)
1665 : ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet),
1666 FunctionExitFSet(FunctionExitFSet), LVarCtx(Info.EntryContext),
1667 CtxIndex(Info.EntryIndex) {}
1668
1669 void VisitUnaryOperator(const UnaryOperator *UO);
1670 void VisitBinaryOperator(const BinaryOperator *BO);
1671 void VisitCastExpr(const CastExpr *CE);
1672 void VisitCallExpr(const CallExpr *Exp);
1673 void VisitCXXConstructExpr(const CXXConstructExpr *Exp);
1674 void VisitDeclStmt(const DeclStmt *S);
1675 void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Exp);
1676 void VisitReturnStmt(const ReturnStmt *S);
1677};
1678
1679} // namespace
1680
1681/// Warn if the LSet does not contain a lock sufficient to protect access
1682/// of at least the passed in AccessKind.
1683void ThreadSafetyAnalyzer::warnIfMutexNotHeld(
1684 const FactSet &FSet, const NamedDecl *D, const Expr *Exp, AccessKind AK,
1685 Expr *MutexExp, ProtectedOperationKind POK, til::LiteralPtr *Self,
1686 SourceLocation Loc) {
1688 CapabilityExpr Cp = SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self);
1689 if (Cp.isInvalid()) {
1690 warnInvalidLock(Handler, MutexExp, D, Exp, Cp.getKind());
1691 return;
1692 } else if (Cp.shouldIgnore()) {
1693 return;
1694 }
1695
1696 if (Cp.negative()) {
1697 // Negative capabilities act like locks excluded
1698 const FactEntry *LDat = FSet.findLock(FactMan, !Cp);
1699 if (LDat) {
1701 (!Cp).toString(), Loc);
1702 return;
1703 }
1704
1705 // If this does not refer to a negative capability in the same class,
1706 // then stop here.
1707 if (!inCurrentScope(Cp))
1708 return;
1709
1710 // Otherwise the negative requirement must be propagated to the caller.
1711 LDat = FSet.findLock(FactMan, Cp);
1712 if (!LDat) {
1713 Handler.handleNegativeNotHeld(D, Cp.toString(), Loc);
1714 }
1715 return;
1716 }
1717
1718 const FactEntry *LDat = FSet.findLockUniv(FactMan, Cp);
1719 bool NoError = true;
1720 if (!LDat) {
1721 // No exact match found. Look for a partial match.
1722 LDat = FSet.findPartialMatch(FactMan, Cp);
1723 if (LDat) {
1724 // Warn that there's no precise match.
1725 std::string PartMatchStr = LDat->toString();
1726 StringRef PartMatchName(PartMatchStr);
1727 Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc,
1728 &PartMatchName);
1729 } else {
1730 // Warn that there's no match at all.
1731 Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc);
1732 }
1733 NoError = false;
1734 }
1735 // Make sure the mutex we found is the right kind.
1736 if (NoError && LDat && !LDat->isAtLeast(LK)) {
1737 Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc);
1738 }
1739}
1740
1741/// Warn if the LSet contains the given lock.
1742void ThreadSafetyAnalyzer::warnIfMutexHeld(const FactSet &FSet,
1743 const NamedDecl *D, const Expr *Exp,
1744 Expr *MutexExp,
1745 til::LiteralPtr *Self,
1746 SourceLocation Loc) {
1747 CapabilityExpr Cp = SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self);
1748 if (Cp.isInvalid()) {
1749 warnInvalidLock(Handler, MutexExp, D, Exp, Cp.getKind());
1750 return;
1751 } else if (Cp.shouldIgnore()) {
1752 return;
1753 }
1754
1755 const FactEntry *LDat = FSet.findLock(FactMan, Cp);
1756 if (LDat) {
1758 Cp.toString(), Loc);
1759 }
1760}
1761
1762/// Checks guarded_by and pt_guarded_by attributes.
1763/// Whenever we identify an access (read or write) to a DeclRefExpr that is
1764/// marked with guarded_by, we must ensure the appropriate mutexes are held.
1765/// Similarly, we check if the access is to an expression that dereferences
1766/// a pointer marked with pt_guarded_by.
1767void ThreadSafetyAnalyzer::checkAccess(const FactSet &FSet, const Expr *Exp,
1768 AccessKind AK,
1770 Exp = Exp->IgnoreImplicit()->IgnoreParenCasts();
1771
1772 SourceLocation Loc = Exp->getExprLoc();
1773
1774 // Local variables of reference type cannot be re-assigned;
1775 // map them to their initializer.
1776 while (const auto *DRE = dyn_cast<DeclRefExpr>(Exp)) {
1777 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()->getCanonicalDecl());
1778 if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) {
1779 if (const auto *E = VD->getInit()) {
1780 // Guard against self-initialization. e.g., int &i = i;
1781 if (E == Exp)
1782 break;
1783 Exp = E->IgnoreImplicit()->IgnoreParenCasts();
1784 continue;
1785 }
1786 }
1787 break;
1788 }
1789
1790 if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) {
1791 // For dereferences
1792 if (UO->getOpcode() == UO_Deref)
1793 checkPtAccess(FSet, UO->getSubExpr(), AK, POK);
1794 return;
1795 }
1796
1797 if (const auto *BO = dyn_cast<BinaryOperator>(Exp)) {
1798 switch (BO->getOpcode()) {
1799 case BO_PtrMemD: // .*
1800 return checkAccess(FSet, BO->getLHS(), AK, POK);
1801 case BO_PtrMemI: // ->*
1802 return checkPtAccess(FSet, BO->getLHS(), AK, POK);
1803 default:
1804 return;
1805 }
1806 }
1807
1808 if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Exp)) {
1809 checkPtAccess(FSet, AE->getLHS(), AK, POK);
1810 return;
1811 }
1812
1813 if (const auto *ME = dyn_cast<MemberExpr>(Exp)) {
1814 if (ME->isArrow())
1815 checkPtAccess(FSet, ME->getBase(), AK, POK);
1816 else
1817 checkAccess(FSet, ME->getBase(), AK, POK);
1818 }
1819
1820 const ValueDecl *D = getValueDecl(Exp);
1821 if (!D || !D->hasAttrs())
1822 return;
1823
1824 if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(FactMan)) {
1825 Handler.handleNoMutexHeld(D, POK, AK, Loc);
1826 }
1827
1828 for (const auto *I : D->specific_attrs<GuardedByAttr>())
1829 warnIfMutexNotHeld(FSet, D, Exp, AK, I->getArg(), POK, nullptr, Loc);
1830}
1831
1832/// Checks pt_guarded_by and pt_guarded_var attributes.
1833/// POK is the same operationKind that was passed to checkAccess.
1834void ThreadSafetyAnalyzer::checkPtAccess(const FactSet &FSet, const Expr *Exp,
1835 AccessKind AK,
1837 // Strip off paren- and cast-expressions, checking if we encounter any other
1838 // operator that should be delegated to checkAccess() instead.
1839 while (true) {
1840 if (const auto *PE = dyn_cast<ParenExpr>(Exp)) {
1841 Exp = PE->getSubExpr();
1842 continue;
1843 }
1844 if (const auto *CE = dyn_cast<CastExpr>(Exp)) {
1845 if (CE->getCastKind() == CK_ArrayToPointerDecay) {
1846 // If it's an actual array, and not a pointer, then it's elements
1847 // are protected by GUARDED_BY, not PT_GUARDED_BY;
1848 checkAccess(FSet, CE->getSubExpr(), AK, POK);
1849 return;
1850 }
1851 Exp = CE->getSubExpr();
1852 continue;
1853 }
1854 break;
1855 }
1856
1857 if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) {
1858 if (UO->getOpcode() == UO_AddrOf) {
1859 // Pointer access via pointer taken of variable, so the dereferenced
1860 // variable is not actually a pointer.
1861 checkAccess(FSet, UO->getSubExpr(), AK, POK);
1862 return;
1863 }
1864 }
1865
1866 // Pass by reference/pointer warnings are under a different flag.
1868 switch (POK) {
1869 case POK_PassByRef:
1870 PtPOK = POK_PtPassByRef;
1871 break;
1872 case POK_ReturnByRef:
1873 PtPOK = POK_PtReturnByRef;
1874 break;
1875 case POK_PassPointer:
1876 PtPOK = POK_PtPassPointer;
1877 break;
1878 case POK_ReturnPointer:
1879 PtPOK = POK_PtReturnPointer;
1880 break;
1881 default:
1882 break;
1883 }
1884
1885 const ValueDecl *D = getValueDecl(Exp);
1886 if (!D || !D->hasAttrs())
1887 return;
1888
1889 if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(FactMan))
1890 Handler.handleNoMutexHeld(D, PtPOK, AK, Exp->getExprLoc());
1891
1892 for (auto const *I : D->specific_attrs<PtGuardedByAttr>())
1893 warnIfMutexNotHeld(FSet, D, Exp, AK, I->getArg(), PtPOK, nullptr,
1894 Exp->getExprLoc());
1895}
1896
1897/// Process a function call, method call, constructor call,
1898/// or destructor call. This involves looking at the attributes on the
1899/// corresponding function/method/constructor/destructor, issuing warnings,
1900/// and updating the locksets accordingly.
1901///
1902/// FIXME: For classes annotated with one of the guarded annotations, we need
1903/// to treat const method calls as reads and non-const method calls as writes,
1904/// and check that the appropriate locks are held. Non-const method calls with
1905/// the same signature as const method calls can be also treated as reads.
1906///
1907/// \param Exp The call expression.
1908/// \param D The callee declaration.
1909/// \param Self If \p Exp = nullptr, the implicit this argument or the argument
1910/// of an implicitly called cleanup function.
1911/// \param Loc If \p Exp = nullptr, the location.
1912void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
1913 til::LiteralPtr *Self, SourceLocation Loc) {
1914 CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
1915 CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
1916 CapExprSet ScopedReqsAndExcludes;
1917
1918 // Figure out if we're constructing an object of scoped lockable class
1919 CapabilityExpr Scp;
1920 if (Exp) {
1921 assert(!Self);
1922 const auto *TagT = Exp->getType()->getAs<TagType>();
1923 if (D->hasAttrs() && TagT && Exp->isPRValue()) {
1924 til::LiteralPtr *Placeholder =
1925 Analyzer->SxBuilder.createVariable(nullptr);
1926 [[maybe_unused]] auto inserted =
1927 Analyzer->ConstructedObjects.insert({Exp, Placeholder});
1928 assert(inserted.second && "Are we visiting the same expression again?");
1929 if (isa<CXXConstructExpr>(Exp))
1930 Self = Placeholder;
1931 if (TagT->getOriginalDecl()
1932 ->getMostRecentDecl()
1933 ->hasAttr<ScopedLockableAttr>())
1934 Scp = CapabilityExpr(Placeholder, Exp->getType(), /*Neg=*/false);
1935 }
1936
1937 assert(Loc.isInvalid());
1938 Loc = Exp->getExprLoc();
1939 }
1940
1941 for(const Attr *At : D->attrs()) {
1942 switch (At->getKind()) {
1943 // When we encounter a lock function, we need to add the lock to our
1944 // lockset.
1945 case attr::AcquireCapability: {
1946 const auto *A = cast<AcquireCapabilityAttr>(At);
1947 Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
1948 : ExclusiveLocksToAdd,
1949 A, Exp, D, Self);
1950 break;
1951 }
1952
1953 // An assert will add a lock to the lockset, but will not generate
1954 // a warning if it is already there, and will not generate a warning
1955 // if it is not removed.
1956 case attr::AssertCapability: {
1957 const auto *A = cast<AssertCapabilityAttr>(At);
1958 CapExprSet AssertLocks;
1959 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, Self);
1960 for (const auto &AssertLock : AssertLocks)
1961 Analyzer->addLock(
1962 FSet, Analyzer->FactMan.createFact<LockableFactEntry>(
1963 AssertLock, A->isShared() ? LK_Shared : LK_Exclusive,
1964 Loc, FactEntry::Asserted));
1965 break;
1966 }
1967
1968 // When we encounter an unlock function, we need to remove unlocked
1969 // mutexes from the lockset, and flag a warning if they are not there.
1970 case attr::ReleaseCapability: {
1971 const auto *A = cast<ReleaseCapabilityAttr>(At);
1972 if (A->isGeneric())
1973 Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, Self);
1974 else if (A->isShared())
1975 Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, Self);
1976 else
1977 Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, Self);
1978 break;
1979 }
1980
1981 case attr::RequiresCapability: {
1982 const auto *A = cast<RequiresCapabilityAttr>(At);
1983 for (auto *Arg : A->args()) {
1984 Analyzer->warnIfMutexNotHeld(FSet, D, Exp,
1985 A->isShared() ? AK_Read : AK_Written,
1986 Arg, POK_FunctionCall, Self, Loc);
1987 // use for adopting a lock
1988 if (!Scp.shouldIgnore())
1989 Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, Self);
1990 }
1991 break;
1992 }
1993
1994 case attr::LocksExcluded: {
1995 const auto *A = cast<LocksExcludedAttr>(At);
1996 for (auto *Arg : A->args()) {
1997 Analyzer->warnIfMutexHeld(FSet, D, Exp, Arg, Self, Loc);
1998 // use for deferring a lock
1999 if (!Scp.shouldIgnore())
2000 Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, Self);
2001 }
2002 break;
2003 }
2004
2005 // Ignore attributes unrelated to thread-safety
2006 default:
2007 break;
2008 }
2009 }
2010
2011 std::optional<CallExpr::const_arg_range> Args;
2012 if (Exp) {
2013 if (const auto *CE = dyn_cast<CallExpr>(Exp))
2014 Args = CE->arguments();
2015 else if (const auto *CE = dyn_cast<CXXConstructExpr>(Exp))
2016 Args = CE->arguments();
2017 else
2018 llvm_unreachable("Unknown call kind");
2019 }
2020 const auto *CalledFunction = dyn_cast<FunctionDecl>(D);
2021 if (CalledFunction && Args.has_value()) {
2022 for (auto [Param, Arg] : zip(CalledFunction->parameters(), *Args)) {
2023 CapExprSet DeclaredLocks;
2024 for (const Attr *At : Param->attrs()) {
2025 switch (At->getKind()) {
2026 case attr::AcquireCapability: {
2027 const auto *A = cast<AcquireCapabilityAttr>(At);
2028 Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
2029 : ExclusiveLocksToAdd,
2030 A, Exp, D, Self);
2031 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
2032 break;
2033 }
2034
2035 case attr::ReleaseCapability: {
2036 const auto *A = cast<ReleaseCapabilityAttr>(At);
2037 if (A->isGeneric())
2038 Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, Self);
2039 else if (A->isShared())
2040 Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, Self);
2041 else
2042 Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, Self);
2043 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
2044 break;
2045 }
2046
2047 case attr::RequiresCapability: {
2048 const auto *A = cast<RequiresCapabilityAttr>(At);
2049 for (auto *Arg : A->args())
2050 Analyzer->warnIfMutexNotHeld(FSet, D, Exp,
2051 A->isShared() ? AK_Read : AK_Written,
2052 Arg, POK_FunctionCall, Self, Loc);
2053 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
2054 break;
2055 }
2056
2057 case attr::LocksExcluded: {
2058 const auto *A = cast<LocksExcludedAttr>(At);
2059 for (auto *Arg : A->args())
2060 Analyzer->warnIfMutexHeld(FSet, D, Exp, Arg, Self, Loc);
2061 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
2062 break;
2063 }
2064
2065 default:
2066 break;
2067 }
2068 }
2069 if (DeclaredLocks.empty())
2070 continue;
2071 CapabilityExpr Cp(Analyzer->SxBuilder.translate(Arg, nullptr),
2072 StringRef("mutex"), /*Neg=*/false, /*Reentrant=*/false);
2073 if (const auto *CBTE = dyn_cast<CXXBindTemporaryExpr>(Arg->IgnoreCasts());
2074 Cp.isInvalid() && CBTE) {
2075 if (auto Object = Analyzer->ConstructedObjects.find(CBTE->getSubExpr());
2076 Object != Analyzer->ConstructedObjects.end())
2077 Cp = CapabilityExpr(Object->second, StringRef("mutex"), /*Neg=*/false,
2078 /*Reentrant=*/false);
2079 }
2080 const FactEntry *Fact = FSet.findLock(Analyzer->FactMan, Cp);
2081 if (!Fact) {
2082 Analyzer->Handler.handleMutexNotHeld(Cp.getKind(), D, POK_FunctionCall,
2083 Cp.toString(), LK_Exclusive,
2084 Exp->getExprLoc());
2085 continue;
2086 }
2087 const auto *Scope = cast<ScopedLockableFactEntry>(Fact);
2088 for (const auto &[a, b] :
2089 zip_longest(DeclaredLocks, Scope->getUnderlyingMutexes())) {
2090 if (!a.has_value()) {
2091 Analyzer->Handler.handleExpectFewerUnderlyingMutexes(
2092 Exp->getExprLoc(), D->getLocation(), Scope->toString(),
2093 b.value().getKind(), b.value().toString());
2094 } else if (!b.has_value()) {
2095 Analyzer->Handler.handleExpectMoreUnderlyingMutexes(
2096 Exp->getExprLoc(), D->getLocation(), Scope->toString(),
2097 a.value().getKind(), a.value().toString());
2098 } else if (!a.value().equals(b.value())) {
2099 Analyzer->Handler.handleUnmatchedUnderlyingMutexes(
2100 Exp->getExprLoc(), D->getLocation(), Scope->toString(),
2101 a.value().getKind(), a.value().toString(), b.value().toString());
2102 break;
2103 }
2104 }
2105 }
2106 }
2107 // Remove locks first to allow lock upgrading/downgrading.
2108 // FIXME -- should only fully remove if the attribute refers to 'this'.
2109 bool Dtor = isa<CXXDestructorDecl>(D);
2110 for (const auto &M : ExclusiveLocksToRemove)
2111 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive);
2112 for (const auto &M : SharedLocksToRemove)
2113 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared);
2114 for (const auto &M : GenericLocksToRemove)
2115 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic);
2116
2117 // Add locks.
2118 FactEntry::SourceKind Source =
2119 !Scp.shouldIgnore() ? FactEntry::Managed : FactEntry::Acquired;
2120 for (const auto &M : ExclusiveLocksToAdd)
2121 Analyzer->addLock(FSet, Analyzer->FactMan.createFact<LockableFactEntry>(
2122 M, LK_Exclusive, Loc, Source));
2123 for (const auto &M : SharedLocksToAdd)
2124 Analyzer->addLock(FSet, Analyzer->FactMan.createFact<LockableFactEntry>(
2125 M, LK_Shared, Loc, Source));
2126
2127 if (!Scp.shouldIgnore()) {
2128 // Add the managing object as a dummy mutex, mapped to the underlying mutex.
2129 auto *ScopedEntry = Analyzer->FactMan.createFact<ScopedLockableFactEntry>(
2130 Scp, Loc, FactEntry::Acquired,
2131 ExclusiveLocksToAdd.size() + SharedLocksToAdd.size() +
2132 ScopedReqsAndExcludes.size() + ExclusiveLocksToRemove.size() +
2133 SharedLocksToRemove.size());
2134 for (const auto &M : ExclusiveLocksToAdd)
2135 ScopedEntry->addLock(M);
2136 for (const auto &M : SharedLocksToAdd)
2137 ScopedEntry->addLock(M);
2138 for (const auto &M : ScopedReqsAndExcludes)
2139 ScopedEntry->addLock(M);
2140 for (const auto &M : ExclusiveLocksToRemove)
2141 ScopedEntry->addExclusiveUnlock(M);
2142 for (const auto &M : SharedLocksToRemove)
2143 ScopedEntry->addSharedUnlock(M);
2144 Analyzer->addLock(FSet, ScopedEntry);
2145 }
2146}
2147
2148/// For unary operations which read and write a variable, we need to
2149/// check whether we hold any required mutexes. Reads are checked in
2150/// VisitCastExpr.
2151void BuildLockset::VisitUnaryOperator(const UnaryOperator *UO) {
2152 switch (UO->getOpcode()) {
2153 case UO_PostDec:
2154 case UO_PostInc:
2155 case UO_PreDec:
2156 case UO_PreInc:
2157 checkAccess(UO->getSubExpr(), AK_Written);
2158 break;
2159 default:
2160 break;
2161 }
2162}
2163
2164/// For binary operations which assign to a variable (writes), we need to check
2165/// whether we hold any required mutexes.
2166/// FIXME: Deal with non-primitive types.
2167void BuildLockset::VisitBinaryOperator(const BinaryOperator *BO) {
2168 if (!BO->isAssignmentOp())
2169 return;
2170
2171 // adjust the context
2172 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx);
2173
2174 checkAccess(BO->getLHS(), AK_Written);
2175}
2176
2177/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
2178/// need to ensure we hold any required mutexes.
2179/// FIXME: Deal with non-primitive types.
2180void BuildLockset::VisitCastExpr(const CastExpr *CE) {
2181 if (CE->getCastKind() != CK_LValueToRValue)
2182 return;
2183 checkAccess(CE->getSubExpr(), AK_Read);
2184}
2185
2186void BuildLockset::examineArguments(const FunctionDecl *FD,
2189 bool SkipFirstParam) {
2190 // Currently we can't do anything if we don't know the function declaration.
2191 if (!FD)
2192 return;
2193
2194 // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it
2195 // only turns off checking within the body of a function, but we also
2196 // use it to turn off checking in arguments to the function. This
2197 // could result in some false negatives, but the alternative is to
2198 // create yet another attribute.
2199 if (FD->hasAttr<NoThreadSafetyAnalysisAttr>())
2200 return;
2201
2202 const ArrayRef<ParmVarDecl *> Params = FD->parameters();
2203 auto Param = Params.begin();
2204 if (SkipFirstParam)
2205 ++Param;
2206
2207 // There can be default arguments, so we stop when one iterator is at end().
2208 for (auto Arg = ArgBegin; Param != Params.end() && Arg != ArgEnd;
2209 ++Param, ++Arg) {
2210 QualType Qt = (*Param)->getType();
2211 if (Qt->isReferenceType())
2212 checkAccess(*Arg, AK_Read, POK_PassByRef);
2213 else if (Qt->isPointerType())
2214 checkPtAccess(*Arg, AK_Read, POK_PassPointer);
2215 }
2216}
2217
2218void BuildLockset::VisitCallExpr(const CallExpr *Exp) {
2219 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Exp)) {
2220 const auto *ME = dyn_cast<MemberExpr>(CE->getCallee());
2221 // ME can be null when calling a method pointer
2222 const CXXMethodDecl *MD = CE->getMethodDecl();
2223
2224 if (ME && MD) {
2225 if (ME->isArrow()) {
2226 // Should perhaps be AK_Written if !MD->isConst().
2227 checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
2228 } else {
2229 // Should perhaps be AK_Written if !MD->isConst().
2230 checkAccess(CE->getImplicitObjectArgument(), AK_Read);
2231 }
2232 }
2233
2234 examineArguments(CE->getDirectCallee(), CE->arg_begin(), CE->arg_end());
2235 } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
2236 OverloadedOperatorKind OEop = OE->getOperator();
2237 switch (OEop) {
2238 case OO_Equal:
2239 case OO_PlusEqual:
2240 case OO_MinusEqual:
2241 case OO_StarEqual:
2242 case OO_SlashEqual:
2243 case OO_PercentEqual:
2244 case OO_CaretEqual:
2245 case OO_AmpEqual:
2246 case OO_PipeEqual:
2247 case OO_LessLessEqual:
2248 case OO_GreaterGreaterEqual:
2249 checkAccess(OE->getArg(1), AK_Read);
2250 [[fallthrough]];
2251 case OO_PlusPlus:
2252 case OO_MinusMinus:
2253 checkAccess(OE->getArg(0), AK_Written);
2254 break;
2255 case OO_Star:
2256 case OO_ArrowStar:
2257 case OO_Arrow:
2258 case OO_Subscript:
2259 if (!(OEop == OO_Star && OE->getNumArgs() > 1)) {
2260 // Grrr. operator* can be multiplication...
2261 checkPtAccess(OE->getArg(0), AK_Read);
2262 }
2263 [[fallthrough]];
2264 default: {
2265 // TODO: get rid of this, and rely on pass-by-ref instead.
2266 const Expr *Obj = OE->getArg(0);
2267 checkAccess(Obj, AK_Read);
2268 // Check the remaining arguments. For method operators, the first
2269 // argument is the implicit self argument, and doesn't appear in the
2270 // FunctionDecl, but for non-methods it does.
2271 const FunctionDecl *FD = OE->getDirectCallee();
2272 examineArguments(FD, std::next(OE->arg_begin()), OE->arg_end(),
2273 /*SkipFirstParam*/ !isa<CXXMethodDecl>(FD));
2274 break;
2275 }
2276 }
2277 } else {
2278 examineArguments(Exp->getDirectCallee(), Exp->arg_begin(), Exp->arg_end());
2279 }
2280
2281 auto *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
2282 if (!D)
2283 return;
2284 handleCall(Exp, D);
2285}
2286
2287void BuildLockset::VisitCXXConstructExpr(const CXXConstructExpr *Exp) {
2288 const CXXConstructorDecl *D = Exp->getConstructor();
2289 if (D && D->isCopyConstructor()) {
2290 const Expr* Source = Exp->getArg(0);
2291 checkAccess(Source, AK_Read);
2292 } else {
2293 examineArguments(D, Exp->arg_begin(), Exp->arg_end());
2294 }
2295 if (D && D->hasAttrs())
2296 handleCall(Exp, D);
2297}
2298
2299static const Expr *UnpackConstruction(const Expr *E) {
2300 if (auto *CE = dyn_cast<CastExpr>(E))
2301 if (CE->getCastKind() == CK_NoOp)
2302 E = CE->getSubExpr()->IgnoreParens();
2303 if (auto *CE = dyn_cast<CastExpr>(E))
2304 if (CE->getCastKind() == CK_ConstructorConversion ||
2305 CE->getCastKind() == CK_UserDefinedConversion)
2306 E = CE->getSubExpr();
2307 if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E))
2308 E = BTE->getSubExpr();
2309 return E;
2310}
2311
2312void BuildLockset::VisitDeclStmt(const DeclStmt *S) {
2313 // adjust the context
2314 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
2315
2316 for (auto *D : S->getDeclGroup()) {
2317 if (auto *VD = dyn_cast_or_null<VarDecl>(D)) {
2318 const Expr *E = VD->getInit();
2319 if (!E)
2320 continue;
2321 E = E->IgnoreParens();
2322
2323 // handle constructors that involve temporaries
2324 if (auto *EWC = dyn_cast<ExprWithCleanups>(E))
2325 E = EWC->getSubExpr()->IgnoreParens();
2326 E = UnpackConstruction(E);
2327
2328 if (auto Object = Analyzer->ConstructedObjects.find(E);
2329 Object != Analyzer->ConstructedObjects.end()) {
2330 Object->second->setClangDecl(VD);
2331 Analyzer->ConstructedObjects.erase(Object);
2332 }
2333 }
2334 }
2335}
2336
2337void BuildLockset::VisitMaterializeTemporaryExpr(
2338 const MaterializeTemporaryExpr *Exp) {
2339 if (const ValueDecl *ExtD = Exp->getExtendingDecl()) {
2340 if (auto Object = Analyzer->ConstructedObjects.find(
2342 Object != Analyzer->ConstructedObjects.end()) {
2343 Object->second->setClangDecl(ExtD);
2344 Analyzer->ConstructedObjects.erase(Object);
2345 }
2346 }
2347}
2348
2349void BuildLockset::VisitReturnStmt(const ReturnStmt *S) {
2350 if (Analyzer->CurrentFunction == nullptr)
2351 return;
2352 const Expr *RetVal = S->getRetValue();
2353 if (!RetVal)
2354 return;
2355
2356 // If returning by reference or pointer, check that the function requires the
2357 // appropriate capabilities.
2358 const QualType ReturnType =
2359 Analyzer->CurrentFunction->getReturnType().getCanonicalType();
2360 if (ReturnType->isLValueReferenceType()) {
2361 Analyzer->checkAccess(
2362 FunctionExitFSet, RetVal,
2365 } else if (ReturnType->isPointerType()) {
2366 Analyzer->checkPtAccess(
2367 FunctionExitFSet, RetVal,
2370 }
2371}
2372
2373/// Given two facts merging on a join point, possibly warn and decide whether to
2374/// keep or replace.
2375///
2376/// \return false if we should keep \p A, true if we should take \p B.
2377bool ThreadSafetyAnalyzer::join(const FactEntry &A, const FactEntry &B,
2378 SourceLocation JoinLoc,
2379 LockErrorKind EntryLEK) {
2380 // Whether we can replace \p A by \p B.
2381 const bool CanModify = EntryLEK != LEK_LockedSomeLoopIterations;
2382 unsigned int ReentrancyDepthA = 0;
2383 unsigned int ReentrancyDepthB = 0;
2384
2385 if (const auto *LFE = dyn_cast<LockableFactEntry>(&A))
2386 ReentrancyDepthA = LFE->getReentrancyDepth();
2387 if (const auto *LFE = dyn_cast<LockableFactEntry>(&B))
2388 ReentrancyDepthB = LFE->getReentrancyDepth();
2389
2390 if (ReentrancyDepthA != ReentrancyDepthB) {
2391 Handler.handleMutexHeldEndOfScope(B.getKind(), B.toString(), B.loc(),
2392 JoinLoc, EntryLEK,
2393 /*ReentrancyMismatch=*/true);
2394 // Pick the FactEntry with the greater reentrancy depth as the "good"
2395 // fact to reduce potential later warnings.
2396 return CanModify && ReentrancyDepthA < ReentrancyDepthB;
2397 } else if (A.kind() != B.kind()) {
2398 // For managed capabilities, the destructor should unlock in the right mode
2399 // anyway. For asserted capabilities no unlocking is needed.
2400 if ((A.managed() || A.asserted()) && (B.managed() || B.asserted())) {
2401 // The shared capability subsumes the exclusive capability, if possible.
2402 bool ShouldTakeB = B.kind() == LK_Shared;
2403 if (CanModify || !ShouldTakeB)
2404 return ShouldTakeB;
2405 }
2406 Handler.handleExclusiveAndShared(B.getKind(), B.toString(), B.loc(),
2407 A.loc());
2408 // Take the exclusive capability to reduce further warnings.
2409 return CanModify && B.kind() == LK_Exclusive;
2410 } else {
2411 // The non-asserted capability is the one we want to track.
2412 return CanModify && A.asserted() && !B.asserted();
2413 }
2414}
2415
2416/// Compute the intersection of two locksets and issue warnings for any
2417/// locks in the symmetric difference.
2418///
2419/// This function is used at a merge point in the CFG when comparing the lockset
2420/// of each branch being merged. For example, given the following sequence:
2421/// A; if () then B; else C; D; we need to check that the lockset after B and C
2422/// are the same. In the event of a difference, we use the intersection of these
2423/// two locksets at the start of D.
2424///
2425/// \param EntrySet A lockset for entry into a (possibly new) block.
2426/// \param ExitSet The lockset on exiting a preceding block.
2427/// \param JoinLoc The location of the join point for error reporting
2428/// \param EntryLEK The warning if a mutex is missing from \p EntrySet.
2429/// \param ExitLEK The warning if a mutex is missing from \p ExitSet.
2430void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &EntrySet,
2431 const FactSet &ExitSet,
2432 SourceLocation JoinLoc,
2433 LockErrorKind EntryLEK,
2434 LockErrorKind ExitLEK) {
2435 FactSet EntrySetOrig = EntrySet;
2436
2437 // Find locks in ExitSet that conflict or are not in EntrySet, and warn.
2438 for (const auto &Fact : ExitSet) {
2439 const FactEntry &ExitFact = FactMan[Fact];
2440
2441 FactSet::iterator EntryIt = EntrySet.findLockIter(FactMan, ExitFact);
2442 if (EntryIt != EntrySet.end()) {
2443 if (join(FactMan[*EntryIt], ExitFact, JoinLoc, EntryLEK))
2444 *EntryIt = Fact;
2445 } else if (!ExitFact.managed() || EntryLEK == LEK_LockedAtEndOfFunction) {
2446 ExitFact.handleRemovalFromIntersection(ExitSet, FactMan, JoinLoc,
2447 EntryLEK, Handler);
2448 }
2449 }
2450
2451 // Find locks in EntrySet that are not in ExitSet, and remove them.
2452 for (const auto &Fact : EntrySetOrig) {
2453 const FactEntry *EntryFact = &FactMan[Fact];
2454 const FactEntry *ExitFact = ExitSet.findLock(FactMan, *EntryFact);
2455
2456 if (!ExitFact) {
2457 if (!EntryFact->managed() || ExitLEK == LEK_LockedSomeLoopIterations ||
2459 EntryFact->handleRemovalFromIntersection(EntrySetOrig, FactMan, JoinLoc,
2460 ExitLEK, Handler);
2461 if (ExitLEK == LEK_LockedSomePredecessors)
2462 EntrySet.removeLock(FactMan, *EntryFact);
2463 }
2464 }
2465}
2466
2467// Return true if block B never continues to its successors.
2468static bool neverReturns(const CFGBlock *B) {
2469 if (B->hasNoReturnElement())
2470 return true;
2471 if (B->empty())
2472 return false;
2473
2474 CFGElement Last = B->back();
2475 if (std::optional<CFGStmt> S = Last.getAs<CFGStmt>()) {
2476 if (isa<CXXThrowExpr>(S->getStmt()))
2477 return true;
2478 }
2479 return false;
2480}
2481
2482/// Check a function's CFG for thread-safety violations.
2483///
2484/// We traverse the blocks in the CFG, compute the set of mutexes that are held
2485/// at the end of each block, and issue warnings for thread safety violations.
2486/// Each block in the CFG is traversed exactly once.
2487void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
2488 // TODO: this whole function needs be rewritten as a visitor for CFGWalker.
2489 // For now, we just use the walker to set things up.
2490 threadSafety::CFGWalker walker;
2491 if (!walker.init(AC))
2492 return;
2493
2494 // AC.dumpCFG(true);
2495 // threadSafety::printSCFG(walker);
2496
2497 CFG *CFGraph = walker.getGraph();
2498 const NamedDecl *D = walker.getDecl();
2499 CurrentFunction = dyn_cast<FunctionDecl>(D);
2500
2501 if (D->hasAttr<NoThreadSafetyAnalysisAttr>())
2502 return;
2503
2504 // FIXME: Do something a bit more intelligent inside constructor and
2505 // destructor code. Constructors and destructors must assume unique access
2506 // to 'this', so checks on member variable access is disabled, but we should
2507 // still enable checks on other objects.
2509 return; // Don't check inside constructors.
2511 return; // Don't check inside destructors.
2512
2513 Handler.enterFunction(CurrentFunction);
2514
2515 BlockInfo.resize(CFGraph->getNumBlockIDs(),
2516 CFGBlockInfo::getEmptyBlockInfo(LocalVarMap));
2517
2518 // We need to explore the CFG via a "topological" ordering.
2519 // That way, we will be guaranteed to have information about required
2520 // predecessor locksets when exploring a new block.
2521 const PostOrderCFGView *SortedGraph = walker.getSortedGraph();
2522 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
2523
2524 CFGBlockInfo &Initial = BlockInfo[CFGraph->getEntry().getBlockID()];
2525 CFGBlockInfo &Final = BlockInfo[CFGraph->getExit().getBlockID()];
2526
2527 // Mark entry block as reachable
2528 Initial.Reachable = true;
2529
2530 // Compute SSA names for local variables
2531 LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo);
2532
2533 // Fill in source locations for all CFGBlocks.
2534 findBlockLocations(CFGraph, SortedGraph, BlockInfo);
2535
2536 CapExprSet ExclusiveLocksAcquired;
2537 CapExprSet SharedLocksAcquired;
2538 CapExprSet LocksReleased;
2539
2540 // Add locks from exclusive_locks_required and shared_locks_required
2541 // to initial lockset. Also turn off checking for lock and unlock functions.
2542 // FIXME: is there a more intelligent way to check lock/unlock functions?
2543 if (!SortedGraph->empty()) {
2544 assert(*SortedGraph->begin() == &CFGraph->getEntry());
2545 FactSet &InitialLockset = Initial.EntrySet;
2546
2547 CapExprSet ExclusiveLocksToAdd;
2548 CapExprSet SharedLocksToAdd;
2549
2550 SourceLocation Loc = D->getLocation();
2551 for (const auto *Attr : D->attrs()) {
2552 Loc = Attr->getLocation();
2553 if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) {
2554 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2555 nullptr, D);
2556 } else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) {
2557 // UNLOCK_FUNCTION() is used to hide the underlying lock implementation.
2558 // We must ignore such methods.
2559 if (A->args_size() == 0)
2560 return;
2561 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2562 nullptr, D);
2563 getMutexIDs(LocksReleased, A, nullptr, D);
2564 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
2565 if (A->args_size() == 0)
2566 return;
2567 getMutexIDs(A->isShared() ? SharedLocksAcquired
2568 : ExclusiveLocksAcquired,
2569 A, nullptr, D);
2570 } else if (isa<TryAcquireCapabilityAttr>(Attr)) {
2571 // Don't try to check trylock functions for now.
2572 return;
2573 }
2574 }
2575 ArrayRef<ParmVarDecl *> Params;
2576 if (CurrentFunction)
2577 Params = CurrentFunction->getCanonicalDecl()->parameters();
2578 else if (auto CurrentMethod = dyn_cast<ObjCMethodDecl>(D))
2579 Params = CurrentMethod->getCanonicalDecl()->parameters();
2580 else
2581 llvm_unreachable("Unknown function kind");
2582 for (const ParmVarDecl *Param : Params) {
2583 CapExprSet UnderlyingLocks;
2584 for (const auto *Attr : Param->attrs()) {
2585 Loc = Attr->getLocation();
2586 if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) {
2587 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2588 nullptr, Param);
2589 getMutexIDs(LocksReleased, A, nullptr, Param);
2590 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2591 } else if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) {
2592 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2593 nullptr, Param);
2594 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2595 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
2596 getMutexIDs(A->isShared() ? SharedLocksAcquired
2597 : ExclusiveLocksAcquired,
2598 A, nullptr, Param);
2599 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2600 } else if (const auto *A = dyn_cast<LocksExcludedAttr>(Attr)) {
2601 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2602 }
2603 }
2604 if (UnderlyingLocks.empty())
2605 continue;
2606 CapabilityExpr Cp(SxBuilder.createVariable(Param), StringRef(),
2607 /*Neg=*/false, /*Reentrant=*/false);
2608 auto *ScopedEntry = FactMan.createFact<ScopedLockableFactEntry>(
2609 Cp, Param->getLocation(), FactEntry::Declared,
2610 UnderlyingLocks.size());
2611 for (const CapabilityExpr &M : UnderlyingLocks)
2612 ScopedEntry->addLock(M);
2613 addLock(InitialLockset, ScopedEntry, true);
2614 }
2615
2616 // FIXME -- Loc can be wrong here.
2617 for (const auto &Mu : ExclusiveLocksToAdd) {
2618 const auto *Entry = FactMan.createFact<LockableFactEntry>(
2619 Mu, LK_Exclusive, Loc, FactEntry::Declared);
2620 addLock(InitialLockset, Entry, true);
2621 }
2622 for (const auto &Mu : SharedLocksToAdd) {
2623 const auto *Entry = FactMan.createFact<LockableFactEntry>(
2624 Mu, LK_Shared, Loc, FactEntry::Declared);
2625 addLock(InitialLockset, Entry, true);
2626 }
2627 }
2628
2629 // Compute the expected exit set.
2630 // By default, we expect all locks held on entry to be held on exit.
2631 FactSet ExpectedFunctionExitSet = Initial.EntrySet;
2632
2633 // Adjust the expected exit set by adding or removing locks, as declared
2634 // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then
2635 // issue the appropriate warning.
2636 // FIXME: the location here is not quite right.
2637 for (const auto &Lock : ExclusiveLocksAcquired)
2638 ExpectedFunctionExitSet.addLock(
2639 FactMan, FactMan.createFact<LockableFactEntry>(Lock, LK_Exclusive,
2640 D->getLocation()));
2641 for (const auto &Lock : SharedLocksAcquired)
2642 ExpectedFunctionExitSet.addLock(
2643 FactMan, FactMan.createFact<LockableFactEntry>(Lock, LK_Shared,
2644 D->getLocation()));
2645 for (const auto &Lock : LocksReleased)
2646 ExpectedFunctionExitSet.removeLock(FactMan, Lock);
2647
2648 for (const auto *CurrBlock : *SortedGraph) {
2649 unsigned CurrBlockID = CurrBlock->getBlockID();
2650 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
2651
2652 // Use the default initial lockset in case there are no predecessors.
2653 VisitedBlocks.insert(CurrBlock);
2654
2655 // Iterate through the predecessor blocks and warn if the lockset for all
2656 // predecessors is not the same. We take the entry lockset of the current
2657 // block to be the intersection of all previous locksets.
2658 // FIXME: By keeping the intersection, we may output more errors in future
2659 // for a lock which is not in the intersection, but was in the union. We
2660 // may want to also keep the union in future. As an example, let's say
2661 // the intersection contains Mutex L, and the union contains L and M.
2662 // Later we unlock M. At this point, we would output an error because we
2663 // never locked M; although the real error is probably that we forgot to
2664 // lock M on all code paths. Conversely, let's say that later we lock M.
2665 // In this case, we should compare against the intersection instead of the
2666 // union because the real error is probably that we forgot to unlock M on
2667 // all code paths.
2668 bool LocksetInitialized = false;
2669 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
2670 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
2671 // if *PI -> CurrBlock is a back edge
2672 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI))
2673 continue;
2674
2675 unsigned PrevBlockID = (*PI)->getBlockID();
2676 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
2677
2678 // Ignore edges from blocks that can't return.
2679 if (neverReturns(*PI) || !PrevBlockInfo->Reachable)
2680 continue;
2681
2682 // Okay, we can reach this block from the entry.
2683 CurrBlockInfo->Reachable = true;
2684
2685 FactSet PrevLockset;
2686 getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock);
2687
2688 if (!LocksetInitialized) {
2689 CurrBlockInfo->EntrySet = PrevLockset;
2690 LocksetInitialized = true;
2691 } else {
2692 // Surprisingly 'continue' doesn't always produce back edges, because
2693 // the CFG has empty "transition" blocks where they meet with the end
2694 // of the regular loop body. We still want to diagnose them as loop.
2695 intersectAndWarn(
2696 CurrBlockInfo->EntrySet, PrevLockset, CurrBlockInfo->EntryLoc,
2697 isa_and_nonnull<ContinueStmt>((*PI)->getTerminatorStmt())
2700 }
2701 }
2702
2703 // Skip rest of block if it's not reachable.
2704 if (!CurrBlockInfo->Reachable)
2705 continue;
2706
2707 BuildLockset LocksetBuilder(this, *CurrBlockInfo, ExpectedFunctionExitSet);
2708
2709 // Visit all the statements in the basic block.
2710 for (const auto &BI : *CurrBlock) {
2711 switch (BI.getKind()) {
2712 case CFGElement::Statement: {
2713 CFGStmt CS = BI.castAs<CFGStmt>();
2714 LocksetBuilder.Visit(CS.getStmt());
2715 break;
2716 }
2717 // Ignore BaseDtor and MemberDtor for now.
2719 CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>();
2720 const auto *DD = AD.getDestructorDecl(AC.getASTContext());
2721 if (!DD->hasAttrs())
2722 break;
2723
2724 LocksetBuilder.handleCall(nullptr, DD,
2725 SxBuilder.createVariable(AD.getVarDecl()),
2726 AD.getTriggerStmt()->getEndLoc());
2727 break;
2728 }
2729
2731 const CFGCleanupFunction &CF = BI.castAs<CFGCleanupFunction>();
2732 LocksetBuilder.handleCall(/*Exp=*/nullptr, CF.getFunctionDecl(),
2733 SxBuilder.createVariable(CF.getVarDecl()),
2734 CF.getVarDecl()->getLocation());
2735 break;
2736 }
2737
2739 auto TD = BI.castAs<CFGTemporaryDtor>();
2740
2741 // Clean up constructed object even if there are no attributes to
2742 // keep the number of objects in limbo as small as possible.
2743 if (auto Object = ConstructedObjects.find(
2744 TD.getBindTemporaryExpr()->getSubExpr());
2745 Object != ConstructedObjects.end()) {
2746 const auto *DD = TD.getDestructorDecl(AC.getASTContext());
2747 if (DD->hasAttrs())
2748 // TODO: the location here isn't quite correct.
2749 LocksetBuilder.handleCall(nullptr, DD, Object->second,
2750 TD.getBindTemporaryExpr()->getEndLoc());
2751 ConstructedObjects.erase(Object);
2752 }
2753 break;
2754 }
2755 default:
2756 break;
2757 }
2758 }
2759 CurrBlockInfo->ExitSet = LocksetBuilder.FSet;
2760
2761 // For every back edge from CurrBlock (the end of the loop) to another block
2762 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to
2763 // the one held at the beginning of FirstLoopBlock. We can look up the
2764 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
2765 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
2766 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
2767 // if CurrBlock -> *SI is *not* a back edge
2768 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
2769 continue;
2770
2771 CFGBlock *FirstLoopBlock = *SI;
2772 CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()];
2773 CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID];
2774 intersectAndWarn(PreLoop->EntrySet, LoopEnd->ExitSet, PreLoop->EntryLoc,
2776 }
2777 }
2778
2779 // Skip the final check if the exit block is unreachable.
2780 if (!Final.Reachable)
2781 return;
2782
2783 // FIXME: Should we call this function for all blocks which exit the function?
2784 intersectAndWarn(ExpectedFunctionExitSet, Final.ExitSet, Final.ExitLoc,
2786
2787 Handler.leaveFunction(CurrentFunction);
2788}
2789
2790/// Check a function's CFG for thread-safety violations.
2791///
2792/// We traverse the blocks in the CFG, compute the set of mutexes that are held
2793/// at the end of each block, and issue warnings for thread safety violations.
2794/// Each block in the CFG is traversed exactly once.
2796 ThreadSafetyHandler &Handler,
2797 BeforeSet **BSet) {
2798 if (!*BSet)
2799 *BSet = new BeforeSet;
2800 ThreadSafetyAnalyzer Analyzer(Handler, *BSet);
2801 Analyzer.runAnalysis(AC);
2802}
2803
2805
2806/// Helper function that returns a LockKind required for the given level
2807/// of access.
2809 switch (AK) {
2810 case AK_Read :
2811 return LK_Shared;
2812 case AK_Written :
2813 return LK_Exclusive;
2814 }
2815 llvm_unreachable("Unknown AccessKind");
2816}
This file defines AnalysisDeclContext, a class that manages the analysis context data for context sen...
Defines enum values for all the target-independent builtin functions.
static void dump(llvm::raw_ostream &OS, StringRef FunctionName, ArrayRef< CounterExpression > Expressions, ArrayRef< CounterMappingRegion > Regions)
static Decl::Kind getKind(const Decl *D)
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Defines the clang::Expr interface and subclasses for C++ expressions.
Forward-declares and imports various common LLVM datatypes that clang wants to use unqualified.
Defines an enumeration for C++ overloaded operators.
static std::string toString(const clang::SanitizerSet &Sanitizers)
Produce a string containing comma-separated names of sanitizers in Sanitizers set.
Defines the clang::SourceLocation class and associated facilities.
Defines various enumerations that describe declaration and type specifiers.
static void warnInvalidLock(ThreadSafetyHandler &Handler, const Expr *MutexExp, const NamedDecl *D, const Expr *DeclExp, StringRef Kind)
Issue a warning about an invalid lock expression.
static bool getStaticBooleanValue(Expr *E, bool &TCond)
static bool neverReturns(const CFGBlock *B)
static void findBlockLocations(CFG *CFGraph, const PostOrderCFGView *SortedGraph, std::vector< CFGBlockInfo > &BlockInfo)
Find the appropriate source locations to use when producing diagnostics for each block in the CFG.
static const ValueDecl * getValueDecl(const Expr *Exp)
Gets the value decl pointer from DeclRefExprs or MemberExprs.
static const Expr * UnpackConstruction(const Expr *E)
C Language Family Type Representation.
__device__ __2f16 b
AnalysisDeclContext contains the context data for the function, method or block under analysis.
ASTContext & getASTContext() const
Expr * getLHS() const
Definition Expr.h:4024
Expr * getRHS() const
Definition Expr.h:4026
static bool isAssignmentOp(Opcode Opc)
Definition Expr.h:4110
Opcode getOpcode() const
Definition Expr.h:4019
const VarDecl * getVarDecl() const
Definition CFG.h:423
const Stmt * getTriggerStmt() const
Definition CFG.h:428
Represents a single basic block in a source-level CFG.
Definition CFG.h:605
pred_iterator pred_end()
Definition CFG.h:973
succ_iterator succ_end()
Definition CFG.h:991
bool hasNoReturnElement() const
Definition CFG.h:1109
CFGElement back() const
Definition CFG.h:908
ElementList::const_reverse_iterator const_reverse_iterator
Definition CFG.h:903
bool empty() const
Definition CFG.h:953
succ_iterator succ_begin()
Definition CFG.h:990
Stmt * getTerminatorStmt()
Definition CFG.h:1087
AdjacentBlocks::const_iterator const_pred_iterator
Definition CFG.h:959
pred_iterator pred_begin()
Definition CFG.h:972
unsigned getBlockID() const
Definition CFG.h:1111
Stmt * getTerminatorCondition(bool StripParens=true)
Definition CFG.cpp:6378
AdjacentBlocks::const_iterator const_succ_iterator
Definition CFG.h:966
Represents a top-level expression in a basic block.
Definition CFG.h:55
@ CleanupFunction
Definition CFG.h:79
@ AutomaticObjectDtor
Definition CFG.h:72
const CXXDestructorDecl * getDestructorDecl(ASTContext &astContext) const
Definition CFG.cpp:5398
const Stmt * getStmt() const
Definition CFG.h:139
Represents a source-level, intra-procedural CFG that represents the control-flow of a Stmt.
Definition CFG.h:1222
CFGBlock & getExit()
Definition CFG.h:1332
CFGBlock & getEntry()
Definition CFG.h:1330
unsigned getNumBlockIDs() const
Returns the total number of BlockIDs allocated (which start at 0).
Definition CFG.h:1409
arg_iterator arg_begin()
Definition ExprCXX.h:1678
Expr * getArg(unsigned Arg)
Return the specified argument.
Definition ExprCXX.h:1692
arg_iterator arg_end()
Definition ExprCXX.h:1679
CXXConstructorDecl * getConstructor() const
Get the constructor that this expression will (ultimately) call.
Definition ExprCXX.h:1612
bool isCopyConstructor(unsigned &TypeQuals) const
Whether this constructor is a copy constructor (C++ [class.copy]p2, which can be used to copy the cla...
Definition DeclCXX.cpp:3008
ConstExprIterator const_arg_iterator
Definition Expr.h:3127
arg_iterator arg_begin()
Definition Expr.h:3136
arg_iterator arg_end()
Definition Expr.h:3139
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3062
Decl * getCalleeDecl()
Definition Expr.h:3056
CastKind getCastKind() const
Definition Expr.h:3656
Expr * getSubExpr()
Definition Expr.h:3662
const DeclGroupRef getDeclGroup() const
Definition Stmt.h:1629
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.h:1637
bool hasAttrs() const
Definition DeclBase.h:518
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition DeclBase.h:559
SourceLocation getLocation() const
Definition DeclBase.h:439
bool isDefinedOutsideFunctionOrMethod() const
isDefinedOutsideFunctionOrMethod - This predicate returns true if this scoped decl is defined outside...
Definition DeclBase.h:949
DeclContext * getDeclContext()
Definition DeclBase.h:448
attr_range attrs() const
Definition DeclBase.h:535
bool hasAttr() const
Definition DeclBase.h:577
This represents one expression.
Definition Expr.h:112
Expr * IgnoreParenCasts() LLVM_READONLY
Skip past any parentheses and casts which might surround this expression until reaching a fixed point...
Definition Expr.cpp:3078
Expr * IgnoreImplicit() LLVM_READONLY
Skip past any implicit AST nodes which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3061
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3069
bool isPRValue() const
Definition Expr.h:285
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
QualType getReturnType() const
Definition Decl.h:2842
ArrayRef< ParmVarDecl * > parameters() const
Definition Decl.h:2771
FunctionDecl * getCanonicalDecl() override
Retrieves the "canonical" declaration of the given declaration.
Definition Decl.cpp:3688
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4931
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition ExprCXX.h:4964
This represents a decl that may have a name.
Definition Decl.h:273
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:300
std::string getNameAsString() const
Get a human-readable name for the declaration, even if it is one of the special kinds of names (C++ c...
Definition Decl.h:316
virtual void printName(raw_ostream &OS, const PrintingPolicy &Policy) const
Pretty-print the unqualified name of this declaration.
Definition Decl.cpp:1672
QualType getCanonicalType() const
Definition TypeBase.h:8395
bool isConstQualified() const
Determine whether this type is const-qualified.
Definition TypeBase.h:8416
Expr * getRetValue()
Definition Stmt.h:3187
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
Stmt - This represents one statement.
Definition Stmt.h:85
SourceLocation getEndLoc() const LLVM_READONLY
Definition Stmt.cpp:358
void dump() const
Dumps the specified AST fragment and all subtrees to llvm::errs().
bool isPointerType() const
Definition TypeBase.h:8580
bool isReferenceType() const
Definition TypeBase.h:8604
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isLValueReferenceType() const
Definition TypeBase.h:8608
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9159
Expr * getSubExpr() const
Definition Expr.h:2287
Opcode getOpcode() const
Definition Expr.h:2282
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:711
QualType getType() const
Definition Decl.h:722
void checkBeforeAfter(const ValueDecl *Vd, const FactSet &FSet, ThreadSafetyAnalyzer &Analyzer, SourceLocation Loc, StringRef CapKind)
Return true if any mutexes in FSet are in the acquired_before set of Vd.
BeforeInfo * insertAttrExprs(const ValueDecl *Vd, ThreadSafetyAnalyzer &Analyzer)
Process acquired_before and acquired_after attributes on Vd.
BeforeInfo * getBeforeInfoForDecl(const ValueDecl *Vd, ThreadSafetyAnalyzer &Analyzer)
const PostOrderCFGView * getSortedGraph() const
const NamedDecl * getDecl() const
bool init(AnalysisDeclContext &AC)
bool equals(const CapabilityExpr &other) const
CapabilityExpr translateAttrExpr(const Expr *AttrExp, const NamedDecl *D, const Expr *DeclExp, til::SExpr *Self=nullptr)
Translate a clang expression in an attribute to a til::SExpr.
til::SExpr * translate(const Stmt *S, CallingContext *Ctx)
til::LiteralPtr * createVariable(const VarDecl *VD)
Handler class for thread safety warnings.
virtual void handleExpectMoreUnderlyingMutexes(SourceLocation Loc, SourceLocation DLoc, Name ScopeName, StringRef Kind, Name Expected)
Warn when we get fewer underlying mutexes than expected.
virtual void handleInvalidLockExp(SourceLocation Loc)
Warn about lock expressions which fail to resolve to lockable objects.
virtual void handleUnmatchedUnderlyingMutexes(SourceLocation Loc, SourceLocation DLoc, Name ScopeName, StringRef Kind, Name Expected, Name Actual)
Warn when an actual underlying mutex of a scoped lockable does not match the expected.
virtual void handleExpectFewerUnderlyingMutexes(SourceLocation Loc, SourceLocation DLoc, Name ScopeName, StringRef Kind, Name Actual)
Warn when we get more underlying mutexes than expected.
virtual void enterFunction(const FunctionDecl *FD)
Called by the analysis when starting analysis of a function.
virtual void handleIncorrectUnlockKind(StringRef Kind, Name LockName, LockKind Expected, LockKind Received, SourceLocation LocLocked, SourceLocation LocUnlock)
Warn about an unlock function call that attempts to unlock a lock with the incorrect lock kind.
virtual void handleMutexHeldEndOfScope(StringRef Kind, Name LockName, SourceLocation LocLocked, SourceLocation LocEndOfScope, LockErrorKind LEK, bool ReentrancyMismatch=false)
Warn about situations where a mutex is sometimes held and sometimes not.
virtual void leaveFunction(const FunctionDecl *FD)
Called by the analysis when finishing analysis of a function.
virtual void handleExclusiveAndShared(StringRef Kind, Name LockName, SourceLocation Loc1, SourceLocation Loc2)
Warn when a mutex is held exclusively and shared at the same point.
virtual void handleMutexNotHeld(StringRef Kind, const NamedDecl *D, ProtectedOperationKind POK, Name LockName, LockKind LK, SourceLocation Loc, Name *PossibleMatch=nullptr)
Warn when a protected operation occurs while the specific mutex protecting the operation is not locke...
virtual void handleFunExcludesLock(StringRef Kind, Name FunName, Name LockName, SourceLocation Loc)
Warn when a function is called while an excluded mutex is locked.
virtual void handleNoMutexHeld(const NamedDecl *D, ProtectedOperationKind POK, AccessKind AK, SourceLocation Loc)
Warn when a protected operation occurs while no locks are held.
virtual void handleUnmatchedUnlock(StringRef Kind, Name LockName, SourceLocation Loc, SourceLocation LocPreviousUnlock)
Warn about unlock function calls that do not have a prior matching lock expression.
virtual void handleNegativeNotHeld(StringRef Kind, Name LockName, Name Neg, SourceLocation Loc)
Warn when acquiring a lock that the negative capability is not held.
virtual void handleDoubleLock(StringRef Kind, Name LockName, SourceLocation LocLocked, SourceLocation LocDoubleLock)
Warn about lock function calls for locks which are already held.
#define bool
Definition gpuintrin.h:32
internal::Matcher< T > traverse(TraversalKind TK, const internal::Matcher< T > &InnerMatcher)
Causes all nested matchers to be matched with the specified traversal kind.
unsigned kind
All of the diagnostics that can be emitted by the frontend.
@ CF
Indicates that the tracked object is a CF object.
bool Alloc(InterpState &S, CodePtr OpPC, const Descriptor *Desc)
Definition Interp.h:3464
bool Dec(InterpState &S, CodePtr OpPC, bool CanOverflow)
1) Pops a pointer from the stack 2) Load the value from the pointer 3) Writes the value decreased by ...
Definition Interp.h:900
bool Neg(InterpState &S, CodePtr OpPC)
Definition Interp.h:749
std::unique_ptr< DiagnosticConsumer > create(StringRef OutputFile, DiagnosticOptions &DiagOpts, bool MergeChildRecords=false)
Returns a DiagnosticConsumer that serializes diagnostics to a bitcode file.
bool matches(const til::SExpr *E1, const til::SExpr *E2)
LockKind getLockKindFromAccessKind(AccessKind AK)
Helper function that returns a LockKind required for the given level of access.
LockErrorKind
This enum distinguishes between different situations where we warn due to inconsistent locking.
@ LEK_NotLockedAtEndOfFunction
Expecting a capability to be held at the end of function.
@ LEK_LockedSomePredecessors
A capability is locked in some but not all predecessors of a CFGBlock.
@ LEK_LockedAtEndOfFunction
A capability is still locked at the end of a function.
@ LEK_LockedSomeLoopIterations
A capability is locked for some but not all loop iterations.
void threadSafetyCleanup(BeforeSet *Cache)
AccessKind
This enum distinguishes between different ways to access (read or write) a variable.
@ AK_Written
Writing a variable.
@ AK_Read
Reading a variable.
LockKind
This enum distinguishes between different kinds of lock actions.
@ LK_Shared
Shared/reader lock of a mutex.
@ LK_Exclusive
Exclusive/writer lock of a mutex.
@ LK_Generic
Can be either Shared or Exclusive.
void runThreadSafetyAnalysis(AnalysisDeclContext &AC, ThreadSafetyHandler &Handler, BeforeSet **Bset)
Check a function's CFG for thread-safety violations.
ProtectedOperationKind
This enum distinguishes between different kinds of operations that may need to be protected by locks.
@ POK_PtPassByRef
Passing a pt-guarded variable by reference.
@ POK_PassPointer
Passing pointer to a guarded variable.
@ POK_VarDereference
Dereferencing a variable (e.g. p in *p = 5;)
@ POK_PassByRef
Passing a guarded variable by reference.
@ POK_ReturnByRef
Returning a guarded variable by reference.
@ POK_PtPassPointer
Passing a pt-guarded pointer.
@ POK_PtReturnPointer
Returning a pt-guarded pointer.
@ POK_VarAccess
Reading or writing a variable (e.g. x in x = 5;)
@ POK_FunctionCall
Making a function call (e.g. fool())
@ POK_ReturnPointer
Returning pointer to a guarded variable.
@ POK_PtReturnByRef
Returning a pt-guarded variable by reference.
The JSON file list parser is used to communicate input to InstallAPI.
OverloadedOperatorKind
Enumeration specifying the different kinds of C++ overloaded operators.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ Self
'self' clause, allowed on Compute and Combined Constructs, plus 'update'.
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Expr * Cond
};
static bool classof(const Stmt *T)
@ Result
The result type of a method or function.
Definition TypeBase.h:905
const FunctionProtoType * T
U cast(CodeGen::Address addr)
Definition Address.h:327
@ Other
Other implicit parameter.
Definition Decl.h:1745
int const char * function
Definition c++config.h:31