clang 22.0.0git
ThreadSafety.cpp
Go to the documentation of this file.
1//===- ThreadSafety.cpp ---------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// A intra-procedural analysis for thread safety (e.g. deadlocks and race
10// conditions), based off of an annotation system.
11//
12// See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
13// for more information.
14//
15//===----------------------------------------------------------------------===//
16
18#include "clang/AST/Attr.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/DeclCXX.h"
21#include "clang/AST/DeclGroup.h"
22#include "clang/AST/Expr.h"
23#include "clang/AST/ExprCXX.h"
25#include "clang/AST/Stmt.h"
27#include "clang/AST/Type.h"
33#include "clang/Analysis/CFG.h"
35#include "clang/Basic/LLVM.h"
39#include "llvm/ADT/DenseMap.h"
40#include "llvm/ADT/ImmutableMap.h"
41#include "llvm/ADT/STLExtras.h"
42#include "llvm/ADT/ScopeExit.h"
43#include "llvm/ADT/SmallVector.h"
44#include "llvm/ADT/StringRef.h"
45#include "llvm/Support/Allocator.h"
46#include "llvm/Support/Casting.h"
47#include "llvm/Support/ErrorHandling.h"
48#include "llvm/Support/TrailingObjects.h"
49#include "llvm/Support/raw_ostream.h"
50#include <cassert>
51#include <functional>
52#include <iterator>
53#include <memory>
54#include <optional>
55#include <string>
56#include <utility>
57#include <vector>
58
59using namespace clang;
60using namespace threadSafety;
61
62// Key method definition
64
65/// Issue a warning about an invalid lock expression
67 const Expr *MutexExp, const NamedDecl *D,
68 const Expr *DeclExp, StringRef Kind) {
70 if (DeclExp)
71 Loc = DeclExp->getExprLoc();
72
73 // FIXME: add a note about the attribute location in MutexExp or D
74 if (Loc.isValid())
75 Handler.handleInvalidLockExp(Loc);
76}
77
78namespace {
79
80/// A set of CapabilityExpr objects, which are compiled from thread safety
81/// attributes on a function.
82class CapExprSet : public SmallVector<CapabilityExpr, 4> {
83public:
84 /// Push M onto list, but discard duplicates.
85 void push_back_nodup(const CapabilityExpr &CapE) {
86 if (llvm::none_of(*this, [=](const CapabilityExpr &CapE2) {
87 return CapE.equals(CapE2);
88 }))
89 push_back(CapE);
90 }
91};
92
93class FactManager;
94class FactSet;
95
96/// This is a helper class that stores a fact that is known at a
97/// particular point in program execution. Currently, a fact is a capability,
98/// along with additional information, such as where it was acquired, whether
99/// it is exclusive or shared, etc.
100class FactEntry : public CapabilityExpr {
101public:
102 enum FactEntryKind { Lockable, ScopedLockable };
103
104 /// Where a fact comes from.
105 enum SourceKind {
106 Acquired, ///< The fact has been directly acquired.
107 Asserted, ///< The fact has been asserted to be held.
108 Declared, ///< The fact is assumed to be held by callers.
109 Managed, ///< The fact has been acquired through a scoped capability.
110 };
111
112private:
113 const FactEntryKind Kind : 8;
114
115 /// Exclusive or shared.
116 LockKind LKind : 8;
117
118 /// How it was acquired.
119 SourceKind Source : 8;
120
121 /// Where it was acquired.
122 SourceLocation AcquireLoc;
123
124protected:
125 ~FactEntry() = default;
126
127public:
128 FactEntry(FactEntryKind FK, const CapabilityExpr &CE, LockKind LK,
129 SourceLocation Loc, SourceKind Src)
130 : CapabilityExpr(CE), Kind(FK), LKind(LK), Source(Src), AcquireLoc(Loc) {}
131
132 LockKind kind() const { return LKind; }
133 SourceLocation loc() const { return AcquireLoc; }
134 FactEntryKind getFactEntryKind() const { return Kind; }
135
136 bool asserted() const { return Source == Asserted; }
137 bool declared() const { return Source == Declared; }
138 bool managed() const { return Source == Managed; }
139
140 virtual void
141 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
142 SourceLocation JoinLoc, LockErrorKind LEK,
143 ThreadSafetyHandler &Handler) const = 0;
144 virtual void handleLock(FactSet &FSet, FactManager &FactMan,
145 const FactEntry &entry,
146 ThreadSafetyHandler &Handler) const = 0;
147 virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
148 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
149 bool FullyRemove,
150 ThreadSafetyHandler &Handler) const = 0;
151
152 // Return true if LKind >= LK, where exclusive > shared
153 bool isAtLeast(LockKind LK) const {
154 return (LKind == LK_Exclusive) || (LK == LK_Shared);
155 }
156};
157
158using FactID = unsigned short;
159
160/// FactManager manages the memory for all facts that are created during
161/// the analysis of a single routine.
162class FactManager {
163private:
164 llvm::BumpPtrAllocator &Alloc;
165 std::vector<const FactEntry *> Facts;
166
167public:
168 FactManager(llvm::BumpPtrAllocator &Alloc) : Alloc(Alloc) {}
169
170 template <typename T, typename... ArgTypes>
171 T *createFact(ArgTypes &&...Args) {
172 static_assert(std::is_trivially_destructible_v<T>);
173 return T::create(Alloc, std::forward<ArgTypes>(Args)...);
174 }
175
176 FactID newFact(const FactEntry *Entry) {
177 Facts.push_back(Entry);
178 assert(Facts.size() - 1 <= std::numeric_limits<FactID>::max() &&
179 "FactID space exhausted");
180 return static_cast<unsigned short>(Facts.size() - 1);
181 }
182
183 const FactEntry &operator[](FactID F) const { return *Facts[F]; }
184};
185
186/// A FactSet is the set of facts that are known to be true at a
187/// particular program point. FactSets must be small, because they are
188/// frequently copied, and are thus implemented as a set of indices into a
189/// table maintained by a FactManager. A typical FactSet only holds 1 or 2
190/// locks, so we can get away with doing a linear search for lookup. Note
191/// that a hashtable or map is inappropriate in this case, because lookups
192/// may involve partial pattern matches, rather than exact matches.
193class FactSet {
194private:
195 using FactVec = SmallVector<FactID, 4>;
196
197 FactVec FactIDs;
198
199public:
200 using iterator = FactVec::iterator;
201 using const_iterator = FactVec::const_iterator;
202
203 iterator begin() { return FactIDs.begin(); }
204 const_iterator begin() const { return FactIDs.begin(); }
205
206 iterator end() { return FactIDs.end(); }
207 const_iterator end() const { return FactIDs.end(); }
208
209 bool isEmpty() const { return FactIDs.size() == 0; }
210
211 // Return true if the set contains only negative facts
212 bool isEmpty(FactManager &FactMan) const {
213 for (const auto FID : *this) {
214 if (!FactMan[FID].negative())
215 return false;
216 }
217 return true;
218 }
219
220 void addLockByID(FactID ID) { FactIDs.push_back(ID); }
221
222 FactID addLock(FactManager &FM, const FactEntry *Entry) {
223 FactID F = FM.newFact(Entry);
224 FactIDs.push_back(F);
225 return F;
226 }
227
228 bool removeLock(FactManager& FM, const CapabilityExpr &CapE) {
229 unsigned n = FactIDs.size();
230 if (n == 0)
231 return false;
232
233 for (unsigned i = 0; i < n-1; ++i) {
234 if (FM[FactIDs[i]].matches(CapE)) {
235 FactIDs[i] = FactIDs[n-1];
236 FactIDs.pop_back();
237 return true;
238 }
239 }
240 if (FM[FactIDs[n-1]].matches(CapE)) {
241 FactIDs.pop_back();
242 return true;
243 }
244 return false;
245 }
246
247 std::optional<FactID> replaceLock(FactManager &FM, iterator It,
248 const FactEntry *Entry) {
249 if (It == end())
250 return std::nullopt;
251 FactID F = FM.newFact(Entry);
252 *It = F;
253 return F;
254 }
255
256 std::optional<FactID> replaceLock(FactManager &FM, const CapabilityExpr &CapE,
257 const FactEntry *Entry) {
258 return replaceLock(FM, findLockIter(FM, CapE), Entry);
259 }
260
261 iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) {
262 return llvm::find_if(*this,
263 [&](FactID ID) { return FM[ID].matches(CapE); });
264 }
265
266 const FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const {
267 auto I =
268 llvm::find_if(*this, [&](FactID ID) { return FM[ID].matches(CapE); });
269 return I != end() ? &FM[*I] : nullptr;
270 }
271
272 const FactEntry *findLockUniv(FactManager &FM,
273 const CapabilityExpr &CapE) const {
274 auto I = llvm::find_if(
275 *this, [&](FactID ID) -> bool { return FM[ID].matchesUniv(CapE); });
276 return I != end() ? &FM[*I] : nullptr;
277 }
278
279 const FactEntry *findPartialMatch(FactManager &FM,
280 const CapabilityExpr &CapE) const {
281 auto I = llvm::find_if(*this, [&](FactID ID) -> bool {
282 return FM[ID].partiallyMatches(CapE);
283 });
284 return I != end() ? &FM[*I] : nullptr;
285 }
286
287 bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const {
288 auto I = llvm::find_if(
289 *this, [&](FactID ID) -> bool { return FM[ID].valueDecl() == Vd; });
290 return I != end();
291 }
292};
293
294class ThreadSafetyAnalyzer;
295
296} // namespace
297
298namespace clang {
299namespace threadSafety {
300
302private:
303 using BeforeVect = SmallVector<const ValueDecl *, 4>;
304
305 struct BeforeInfo {
306 BeforeVect Vect;
307 int Visited = 0;
308
309 BeforeInfo() = default;
310 BeforeInfo(BeforeInfo &&) = default;
311 };
312
313 using BeforeMap =
314 llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>;
315 using CycleMap = llvm::DenseMap<const ValueDecl *, bool>;
316
317public:
318 BeforeSet() = default;
319
320 BeforeInfo* insertAttrExprs(const ValueDecl* Vd,
321 ThreadSafetyAnalyzer& Analyzer);
322
323 BeforeInfo *getBeforeInfoForDecl(const ValueDecl *Vd,
324 ThreadSafetyAnalyzer &Analyzer);
325
326 void checkBeforeAfter(const ValueDecl* Vd,
327 const FactSet& FSet,
328 ThreadSafetyAnalyzer& Analyzer,
329 SourceLocation Loc, StringRef CapKind);
330
331private:
332 BeforeMap BMap;
333 CycleMap CycMap;
334};
335
336} // namespace threadSafety
337} // namespace clang
338
339namespace {
340
341class LocalVariableMap;
342
343using LocalVarContext = llvm::ImmutableMap<const NamedDecl *, unsigned>;
344
345/// A side (entry or exit) of a CFG node.
346enum CFGBlockSide { CBS_Entry, CBS_Exit };
347
348/// CFGBlockInfo is a struct which contains all the information that is
349/// maintained for each block in the CFG. See LocalVariableMap for more
350/// information about the contexts.
351struct CFGBlockInfo {
352 // Lockset held at entry to block
353 FactSet EntrySet;
354
355 // Lockset held at exit from block
356 FactSet ExitSet;
357
358 // Context held at entry to block
359 LocalVarContext EntryContext;
360
361 // Context held at exit from block
362 LocalVarContext ExitContext;
363
364 // Location of first statement in block
365 SourceLocation EntryLoc;
366
367 // Location of last statement in block.
368 SourceLocation ExitLoc;
369
370 // Used to replay contexts later
371 unsigned EntryIndex;
372
373 // Is this block reachable?
374 bool Reachable = false;
375
376 const FactSet &getSet(CFGBlockSide Side) const {
377 return Side == CBS_Entry ? EntrySet : ExitSet;
378 }
379
380 SourceLocation getLocation(CFGBlockSide Side) const {
381 return Side == CBS_Entry ? EntryLoc : ExitLoc;
382 }
383
384private:
385 CFGBlockInfo(LocalVarContext EmptyCtx)
386 : EntryContext(EmptyCtx), ExitContext(EmptyCtx) {}
387
388public:
389 static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M);
390};
391
392// A LocalVariableMap maintains a map from local variables to their currently
393// valid definitions. It provides SSA-like functionality when traversing the
394// CFG. Like SSA, each definition or assignment to a variable is assigned a
395// unique name (an integer), which acts as the SSA name for that definition.
396// The total set of names is shared among all CFG basic blocks.
397// Unlike SSA, we do not rewrite expressions to replace local variables declrefs
398// with their SSA-names. Instead, we compute a Context for each point in the
399// code, which maps local variables to the appropriate SSA-name. This map
400// changes with each assignment.
401//
402// The map is computed in a single pass over the CFG. Subsequent analyses can
403// then query the map to find the appropriate Context for a statement, and use
404// that Context to look up the definitions of variables.
405class LocalVariableMap {
406public:
407 using Context = LocalVarContext;
408
409 /// A VarDefinition consists of an expression, representing the value of the
410 /// variable, along with the context in which that expression should be
411 /// interpreted. A reference VarDefinition does not itself contain this
412 /// information, but instead contains a pointer to a previous VarDefinition.
413 struct VarDefinition {
414 public:
415 friend class LocalVariableMap;
416
417 // The original declaration for this variable.
418 const NamedDecl *Dec;
419
420 // The expression for this variable, OR
421 const Expr *Exp = nullptr;
422
423 // Direct reference to another VarDefinition
424 unsigned DirectRef = 0;
425
426 // Reference to underlying canonical non-reference VarDefinition.
427 unsigned CanonicalRef = 0;
428
429 // The map with which Exp should be interpreted.
430 Context Ctx;
431
432 bool isReference() const { return !Exp; }
433
434 void invalidateRef() { DirectRef = CanonicalRef = 0; }
435
436 private:
437 // Create ordinary variable definition
438 VarDefinition(const NamedDecl *D, const Expr *E, Context C)
439 : Dec(D), Exp(E), Ctx(C) {}
440
441 // Create reference to previous definition
442 VarDefinition(const NamedDecl *D, unsigned DirectRef, unsigned CanonicalRef,
443 Context C)
444 : Dec(D), DirectRef(DirectRef), CanonicalRef(CanonicalRef), Ctx(C) {}
445 };
446
447private:
448 Context::Factory ContextFactory;
449 std::vector<VarDefinition> VarDefinitions;
450 std::vector<std::pair<const Stmt *, Context>> SavedContexts;
451
452public:
453 LocalVariableMap() {
454 // index 0 is a placeholder for undefined variables (aka phi-nodes).
455 VarDefinitions.push_back(VarDefinition(nullptr, 0, 0, getEmptyContext()));
456 }
457
458 /// Look up a definition, within the given context.
459 const VarDefinition* lookup(const NamedDecl *D, Context Ctx) {
460 const unsigned *i = Ctx.lookup(D);
461 if (!i)
462 return nullptr;
463 assert(*i < VarDefinitions.size());
464 return &VarDefinitions[*i];
465 }
466
467 /// Look up the definition for D within the given context. Returns
468 /// NULL if the expression is not statically known. If successful, also
469 /// modifies Ctx to hold the context of the return Expr.
470 const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) {
471 const unsigned *P = Ctx.lookup(D);
472 if (!P)
473 return nullptr;
474
475 unsigned i = *P;
476 while (i > 0) {
477 if (VarDefinitions[i].Exp) {
478 Ctx = VarDefinitions[i].Ctx;
479 return VarDefinitions[i].Exp;
480 }
481 i = VarDefinitions[i].DirectRef;
482 }
483 return nullptr;
484 }
485
486 Context getEmptyContext() { return ContextFactory.getEmptyMap(); }
487
488 /// Return the next context after processing S. This function is used by
489 /// clients of the class to get the appropriate context when traversing the
490 /// CFG. It must be called for every assignment or DeclStmt.
491 Context getNextContext(unsigned &CtxIndex, const Stmt *S, Context C) {
492 if (SavedContexts[CtxIndex+1].first == S) {
493 CtxIndex++;
494 Context Result = SavedContexts[CtxIndex].second;
495 return Result;
496 }
497 return C;
498 }
499
500 void dumpVarDefinitionName(unsigned i) {
501 if (i == 0) {
502 llvm::errs() << "Undefined";
503 return;
504 }
505 const NamedDecl *Dec = VarDefinitions[i].Dec;
506 if (!Dec) {
507 llvm::errs() << "<<NULL>>";
508 return;
509 }
510 Dec->printName(llvm::errs());
511 llvm::errs() << "." << i << " " << ((const void*) Dec);
512 }
513
514 /// Dumps an ASCII representation of the variable map to llvm::errs()
515 void dump() {
516 for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
517 const Expr *Exp = VarDefinitions[i].Exp;
518 unsigned Ref = VarDefinitions[i].DirectRef;
519
520 dumpVarDefinitionName(i);
521 llvm::errs() << " = ";
522 if (Exp) Exp->dump();
523 else {
524 dumpVarDefinitionName(Ref);
525 llvm::errs() << "\n";
526 }
527 }
528 }
529
530 /// Dumps an ASCII representation of a Context to llvm::errs()
531 void dumpContext(Context C) {
532 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
533 const NamedDecl *D = I.getKey();
534 D->printName(llvm::errs());
535 llvm::errs() << " -> ";
536 dumpVarDefinitionName(I.getData());
537 llvm::errs() << "\n";
538 }
539 }
540
541 /// Builds the variable map.
542 void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph,
543 std::vector<CFGBlockInfo> &BlockInfo);
544
545protected:
546 friend class VarMapBuilder;
547
548 // Resolve any definition ID down to its non-reference base ID.
549 unsigned getCanonicalDefinitionID(unsigned ID) const {
550 while (ID > 0 && VarDefinitions[ID].isReference())
551 ID = VarDefinitions[ID].CanonicalRef;
552 return ID;
553 }
554
555 // Get the current context index
556 unsigned getContextIndex() { return SavedContexts.size()-1; }
557
558 // Save the current context for later replay
559 void saveContext(const Stmt *S, Context C) {
560 SavedContexts.push_back(std::make_pair(S, C));
561 }
562
563 // Adds a new definition to the given context, and returns a new context.
564 // This method should be called when declaring a new variable.
565 Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) {
566 assert(!Ctx.contains(D));
567 unsigned newID = VarDefinitions.size();
568 Context NewCtx = ContextFactory.add(Ctx, D, newID);
569 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
570 return NewCtx;
571 }
572
573 // Add a new reference to an existing definition.
574 Context addReference(const NamedDecl *D, unsigned Ref, Context Ctx) {
575 unsigned newID = VarDefinitions.size();
576 Context NewCtx = ContextFactory.add(Ctx, D, newID);
577 VarDefinitions.push_back(
578 VarDefinition(D, Ref, getCanonicalDefinitionID(Ref), Ctx));
579 return NewCtx;
580 }
581
582 // Updates a definition only if that definition is already in the map.
583 // This method should be called when assigning to an existing variable.
584 Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) {
585 if (Ctx.contains(D)) {
586 unsigned newID = VarDefinitions.size();
587 Context NewCtx = ContextFactory.remove(Ctx, D);
588 NewCtx = ContextFactory.add(NewCtx, D, newID);
589 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
590 return NewCtx;
591 }
592 return Ctx;
593 }
594
595 // Removes a definition from the context, but keeps the variable name
596 // as a valid variable. The index 0 is a placeholder for cleared definitions.
597 Context clearDefinition(const NamedDecl *D, Context Ctx) {
598 Context NewCtx = Ctx;
599 if (NewCtx.contains(D)) {
600 NewCtx = ContextFactory.remove(NewCtx, D);
601 NewCtx = ContextFactory.add(NewCtx, D, 0);
602 }
603 return NewCtx;
604 }
605
606 // Remove a definition entirely frmo the context.
607 Context removeDefinition(const NamedDecl *D, Context Ctx) {
608 Context NewCtx = Ctx;
609 if (NewCtx.contains(D)) {
610 NewCtx = ContextFactory.remove(NewCtx, D);
611 }
612 return NewCtx;
613 }
614
615 Context intersectContexts(Context C1, Context C2);
616 Context createReferenceContext(Context C);
617 void intersectBackEdge(Context C1, Context C2);
618};
619
620} // namespace
621
622// This has to be defined after LocalVariableMap.
623CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) {
624 return CFGBlockInfo(M.getEmptyContext());
625}
626
627namespace {
628
629/// Visitor which builds a LocalVariableMap
630class VarMapBuilder : public ConstStmtVisitor<VarMapBuilder> {
631public:
632 LocalVariableMap* VMap;
633 LocalVariableMap::Context Ctx;
634
635 VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
636 : VMap(VM), Ctx(C) {}
637
638 void VisitDeclStmt(const DeclStmt *S);
639 void VisitBinaryOperator(const BinaryOperator *BO);
640 void VisitCallExpr(const CallExpr *CE);
641};
642
643} // namespace
644
645// Add new local variables to the variable map
646void VarMapBuilder::VisitDeclStmt(const DeclStmt *S) {
647 bool modifiedCtx = false;
648 const DeclGroupRef DGrp = S->getDeclGroup();
649 for (const auto *D : DGrp) {
650 if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) {
651 const Expr *E = VD->getInit();
652
653 // Add local variables with trivial type to the variable map
654 QualType T = VD->getType();
655 if (T.isTrivialType(VD->getASTContext())) {
656 Ctx = VMap->addDefinition(VD, E, Ctx);
657 modifiedCtx = true;
658 }
659 }
660 }
661 if (modifiedCtx)
662 VMap->saveContext(S, Ctx);
663}
664
665// Update local variable definitions in variable map
666void VarMapBuilder::VisitBinaryOperator(const BinaryOperator *BO) {
667 if (!BO->isAssignmentOp())
668 return;
669
670 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
671
672 // Update the variable map and current context.
673 if (const auto *DRE = dyn_cast<DeclRefExpr>(LHSExp)) {
674 const ValueDecl *VDec = DRE->getDecl();
675 if (Ctx.lookup(VDec)) {
676 if (BO->getOpcode() == BO_Assign)
677 Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx);
678 else
679 // FIXME -- handle compound assignment operators
680 Ctx = VMap->clearDefinition(VDec, Ctx);
681 VMap->saveContext(BO, Ctx);
682 }
683 }
684}
685
686// Invalidates local variable definitions if variable escaped.
687void VarMapBuilder::VisitCallExpr(const CallExpr *CE) {
688 const FunctionDecl *FD = CE->getDirectCallee();
689 if (!FD)
690 return;
691
692 // Heuristic for likely-benign functions that pass by mutable reference. This
693 // is needed to avoid a slew of false positives due to mutable reference
694 // passing where the captured reference is usually passed on by-value.
695 if (const IdentifierInfo *II = FD->getIdentifier()) {
696 // Any kind of std::bind-like functions.
697 if (II->isStr("bind") || II->isStr("bind_front"))
698 return;
699 }
700
701 // Invalidate local variable definitions that are passed by non-const
702 // reference or non-const pointer.
703 for (unsigned Idx = 0; Idx < CE->getNumArgs(); ++Idx) {
704 if (Idx >= FD->getNumParams())
705 break;
706
707 const Expr *Arg = CE->getArg(Idx)->IgnoreParenImpCasts();
708 const ParmVarDecl *PVD = FD->getParamDecl(Idx);
709 QualType ParamType = PVD->getType();
710
711 // Potential reassignment if passed by non-const reference / pointer.
712 const ValueDecl *VDec = nullptr;
713 if (ParamType->isReferenceType() &&
714 !ParamType->getPointeeType().isConstQualified()) {
715 if (const auto *DRE = dyn_cast<DeclRefExpr>(Arg))
716 VDec = DRE->getDecl();
717 } else if (ParamType->isPointerType() &&
718 !ParamType->getPointeeType().isConstQualified()) {
719 Arg = Arg->IgnoreParenCasts();
720 if (const auto *UO = dyn_cast<UnaryOperator>(Arg)) {
721 if (UO->getOpcode() == UO_AddrOf) {
722 const Expr *SubE = UO->getSubExpr()->IgnoreParenCasts();
723 if (const auto *DRE = dyn_cast<DeclRefExpr>(SubE))
724 VDec = DRE->getDecl();
725 }
726 }
727 }
728
729 if (VDec && Ctx.lookup(VDec)) {
730 Ctx = VMap->clearDefinition(VDec, Ctx);
731 VMap->saveContext(CE, Ctx);
732 }
733 }
734}
735
736// Computes the intersection of two contexts. The intersection is the
737// set of variables which have the same definition in both contexts;
738// variables with different definitions are discarded.
739LocalVariableMap::Context
740LocalVariableMap::intersectContexts(Context C1, Context C2) {
741 Context Result = C1;
742 for (const auto &P : C1) {
743 const NamedDecl *Dec = P.first;
744 const unsigned *I2 = C2.lookup(Dec);
745 if (!I2) {
746 // The variable doesn't exist on second path.
747 Result = removeDefinition(Dec, Result);
748 } else if (getCanonicalDefinitionID(P.second) !=
749 getCanonicalDefinitionID(*I2)) {
750 // If canonical definitions mismatch the underlying definitions are
751 // different, invalidate.
752 Result = clearDefinition(Dec, Result);
753 }
754 }
755 return Result;
756}
757
758// For every variable in C, create a new variable that refers to the
759// definition in C. Return a new context that contains these new variables.
760// (We use this for a naive implementation of SSA on loop back-edges.)
761LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
762 Context Result = getEmptyContext();
763 for (const auto &P : C)
764 Result = addReference(P.first, P.second, Result);
765 return Result;
766}
767
768// This routine also takes the intersection of C1 and C2, but it does so by
769// altering the VarDefinitions. C1 must be the result of an earlier call to
770// createReferenceContext.
771void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
772 for (const auto &P : C1) {
773 const unsigned I1 = P.second;
774 VarDefinition *VDef = &VarDefinitions[I1];
775 assert(VDef->isReference());
776
777 const unsigned *I2 = C2.lookup(P.first);
778 if (!I2) {
779 // Variable does not exist at the end of the loop, invalidate.
780 VDef->invalidateRef();
781 continue;
782 }
783
784 // Compare the canonical IDs. This correctly handles chains of references
785 // and determines if the variable is truly loop-invariant.
786 if (VDef->CanonicalRef != getCanonicalDefinitionID(*I2))
787 VDef->invalidateRef(); // Mark this variable as undefined
788 }
789}
790
791// Traverse the CFG in topological order, so all predecessors of a block
792// (excluding back-edges) are visited before the block itself. At
793// each point in the code, we calculate a Context, which holds the set of
794// variable definitions which are visible at that point in execution.
795// Visible variables are mapped to their definitions using an array that
796// contains all definitions.
797//
798// At join points in the CFG, the set is computed as the intersection of
799// the incoming sets along each edge, E.g.
800//
801// { Context | VarDefinitions }
802// int x = 0; { x -> x1 | x1 = 0 }
803// int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
804// if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... }
805// else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... }
806// ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... }
807//
808// This is essentially a simpler and more naive version of the standard SSA
809// algorithm. Those definitions that remain in the intersection are from blocks
810// that strictly dominate the current block. We do not bother to insert proper
811// phi nodes, because they are not used in our analysis; instead, wherever
812// a phi node would be required, we simply remove that definition from the
813// context (E.g. x above).
814//
815// The initial traversal does not capture back-edges, so those need to be
816// handled on a separate pass. Whenever the first pass encounters an
817// incoming back edge, it duplicates the context, creating new definitions
818// that refer back to the originals. (These correspond to places where SSA
819// might have to insert a phi node.) On the second pass, these definitions are
820// set to NULL if the variable has changed on the back-edge (i.e. a phi
821// node was actually required.) E.g.
822//
823// { Context | VarDefinitions }
824// int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
825// while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; }
826// x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... }
827// ... { y -> y1 | x3 = 2, x2 = 1, ... }
828void LocalVariableMap::traverseCFG(CFG *CFGraph,
829 const PostOrderCFGView *SortedGraph,
830 std::vector<CFGBlockInfo> &BlockInfo) {
831 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
832
833 for (const auto *CurrBlock : *SortedGraph) {
834 unsigned CurrBlockID = CurrBlock->getBlockID();
835 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
836
837 VisitedBlocks.insert(CurrBlock);
838
839 // Calculate the entry context for the current block
840 bool HasBackEdges = false;
841 bool CtxInit = true;
842 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
843 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
844 // if *PI -> CurrBlock is a back edge, so skip it
845 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) {
846 HasBackEdges = true;
847 continue;
848 }
849
850 unsigned PrevBlockID = (*PI)->getBlockID();
851 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
852
853 if (CtxInit) {
854 CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext;
855 CtxInit = false;
856 }
857 else {
858 CurrBlockInfo->EntryContext =
859 intersectContexts(CurrBlockInfo->EntryContext,
860 PrevBlockInfo->ExitContext);
861 }
862 }
863
864 // Duplicate the context if we have back-edges, so we can call
865 // intersectBackEdges later.
866 if (HasBackEdges)
867 CurrBlockInfo->EntryContext =
868 createReferenceContext(CurrBlockInfo->EntryContext);
869
870 // Create a starting context index for the current block
871 saveContext(nullptr, CurrBlockInfo->EntryContext);
872 CurrBlockInfo->EntryIndex = getContextIndex();
873
874 // Visit all the statements in the basic block.
875 VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext);
876 for (const auto &BI : *CurrBlock) {
877 switch (BI.getKind()) {
879 CFGStmt CS = BI.castAs<CFGStmt>();
880 VMapBuilder.Visit(CS.getStmt());
881 break;
882 }
883 default:
884 break;
885 }
886 }
887 CurrBlockInfo->ExitContext = VMapBuilder.Ctx;
888
889 // Mark variables on back edges as "unknown" if they've been changed.
890 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
891 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
892 // if CurrBlock -> *SI is *not* a back edge
893 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
894 continue;
895
896 CFGBlock *FirstLoopBlock = *SI;
897 Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext;
898 Context LoopEnd = CurrBlockInfo->ExitContext;
899 intersectBackEdge(LoopBegin, LoopEnd);
900 }
901 }
902
903 // Put an extra entry at the end of the indexed context array
904 unsigned exitID = CFGraph->getExit().getBlockID();
905 saveContext(nullptr, BlockInfo[exitID].ExitContext);
906}
907
908/// Find the appropriate source locations to use when producing diagnostics for
909/// each block in the CFG.
910static void findBlockLocations(CFG *CFGraph,
911 const PostOrderCFGView *SortedGraph,
912 std::vector<CFGBlockInfo> &BlockInfo) {
913 for (const auto *CurrBlock : *SortedGraph) {
914 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()];
915
916 // Find the source location of the last statement in the block, if the
917 // block is not empty.
918 if (const Stmt *S = CurrBlock->getTerminatorStmt()) {
919 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getBeginLoc();
920 } else {
921 for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
922 BE = CurrBlock->rend(); BI != BE; ++BI) {
923 // FIXME: Handle other CFGElement kinds.
924 if (std::optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
925 CurrBlockInfo->ExitLoc = CS->getStmt()->getBeginLoc();
926 break;
927 }
928 }
929 }
930
931 if (CurrBlockInfo->ExitLoc.isValid()) {
932 // This block contains at least one statement. Find the source location
933 // of the first statement in the block.
934 for (const auto &BI : *CurrBlock) {
935 // FIXME: Handle other CFGElement kinds.
936 if (std::optional<CFGStmt> CS = BI.getAs<CFGStmt>()) {
937 CurrBlockInfo->EntryLoc = CS->getStmt()->getBeginLoc();
938 break;
939 }
940 }
941 } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() &&
942 CurrBlock != &CFGraph->getExit()) {
943 // The block is empty, and has a single predecessor. Use its exit
944 // location.
945 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
946 BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc;
947 } else if (CurrBlock->succ_size() == 1 && *CurrBlock->succ_begin()) {
948 // The block is empty, and has a single successor. Use its entry
949 // location.
950 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
951 BlockInfo[(*CurrBlock->succ_begin())->getBlockID()].EntryLoc;
952 }
953 }
954}
955
956namespace {
957
958class LockableFactEntry final : public FactEntry {
959private:
960 /// Reentrancy depth: incremented when a capability has been acquired
961 /// reentrantly (after initial acquisition). Always 0 for non-reentrant
962 /// capabilities.
963 unsigned int ReentrancyDepth = 0;
964
965 LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
966 SourceKind Src)
967 : FactEntry(Lockable, CE, LK, Loc, Src) {}
968
969public:
970 static LockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
971 const LockableFactEntry &Other) {
972 return new (Alloc) LockableFactEntry(Other);
973 }
974
975 static LockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
976 const CapabilityExpr &CE, LockKind LK,
977 SourceLocation Loc,
978 SourceKind Src = Acquired) {
979 return new (Alloc) LockableFactEntry(CE, LK, Loc, Src);
980 }
981
982 unsigned int getReentrancyDepth() const { return ReentrancyDepth; }
983
984 void
985 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
986 SourceLocation JoinLoc, LockErrorKind LEK,
987 ThreadSafetyHandler &Handler) const override {
988 if (!asserted() && !negative() && !isUniversal()) {
989 Handler.handleMutexHeldEndOfScope(getKind(), toString(), loc(), JoinLoc,
990 LEK);
991 }
992 }
993
994 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
995 ThreadSafetyHandler &Handler) const override {
996 if (const FactEntry *RFact = tryReenter(FactMan, entry.kind())) {
997 // This capability has been reentrantly acquired.
998 FSet.replaceLock(FactMan, entry, RFact);
999 } else {
1000 Handler.handleDoubleLock(entry.getKind(), entry.toString(), loc(),
1001 entry.loc());
1002 }
1003 }
1004
1005 void handleUnlock(FactSet &FSet, FactManager &FactMan,
1006 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
1007 bool FullyRemove,
1008 ThreadSafetyHandler &Handler) const override {
1009 FSet.removeLock(FactMan, Cp);
1010
1011 if (const FactEntry *RFact = leaveReentrant(FactMan)) {
1012 // This capability remains reentrantly acquired.
1013 FSet.addLock(FactMan, RFact);
1014 } else if (!Cp.negative()) {
1015 FSet.addLock(FactMan, FactMan.createFact<LockableFactEntry>(
1016 !Cp, LK_Exclusive, UnlockLoc));
1017 }
1018 }
1019
1020 // Return an updated FactEntry if we can acquire this capability reentrant,
1021 // nullptr otherwise.
1022 const FactEntry *tryReenter(FactManager &FactMan,
1023 LockKind ReenterKind) const {
1024 if (!reentrant())
1025 return nullptr;
1026 if (kind() != ReenterKind)
1027 return nullptr;
1028 auto *NewFact = FactMan.createFact<LockableFactEntry>(*this);
1029 NewFact->ReentrancyDepth++;
1030 return NewFact;
1031 }
1032
1033 // Return an updated FactEntry if we are releasing a capability previously
1034 // acquired reentrant, nullptr otherwise.
1035 const FactEntry *leaveReentrant(FactManager &FactMan) const {
1036 if (!ReentrancyDepth)
1037 return nullptr;
1038 assert(reentrant());
1039 auto *NewFact = FactMan.createFact<LockableFactEntry>(*this);
1040 NewFact->ReentrancyDepth--;
1041 return NewFact;
1042 }
1043
1044 static bool classof(const FactEntry *A) {
1045 return A->getFactEntryKind() == Lockable;
1046 }
1047};
1048
1049enum UnderlyingCapabilityKind {
1050 UCK_Acquired, ///< Any kind of acquired capability.
1051 UCK_ReleasedShared, ///< Shared capability that was released.
1052 UCK_ReleasedExclusive, ///< Exclusive capability that was released.
1053};
1054
1055struct UnderlyingCapability {
1056 CapabilityExpr Cap;
1057 UnderlyingCapabilityKind Kind;
1058};
1059
1060class ScopedLockableFactEntry final
1061 : public FactEntry,
1062 private llvm::TrailingObjects<ScopedLockableFactEntry,
1063 UnderlyingCapability> {
1064 friend TrailingObjects;
1065
1066private:
1067 const unsigned ManagedCapacity;
1068 unsigned ManagedSize = 0;
1069
1070 ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc,
1071 SourceKind Src, unsigned ManagedCapacity)
1072 : FactEntry(ScopedLockable, CE, LK_Exclusive, Loc, Src),
1073 ManagedCapacity(ManagedCapacity) {}
1074
1075 void addManaged(const CapabilityExpr &M, UnderlyingCapabilityKind UCK) {
1076 assert(ManagedSize < ManagedCapacity);
1077 new (getTrailingObjects() + ManagedSize) UnderlyingCapability{M, UCK};
1078 ++ManagedSize;
1079 }
1080
1081 ArrayRef<UnderlyingCapability> getManaged() const {
1082 return getTrailingObjects(ManagedSize);
1083 }
1084
1085public:
1086 static ScopedLockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
1087 const CapabilityExpr &CE,
1088 SourceLocation Loc, SourceKind Src,
1089 unsigned ManagedCapacity) {
1090 void *Storage =
1091 Alloc.Allocate(totalSizeToAlloc<UnderlyingCapability>(ManagedCapacity),
1092 alignof(ScopedLockableFactEntry));
1093 return new (Storage) ScopedLockableFactEntry(CE, Loc, Src, ManagedCapacity);
1094 }
1095
1096 CapExprSet getUnderlyingMutexes() const {
1097 CapExprSet UnderlyingMutexesSet;
1098 for (const UnderlyingCapability &UnderlyingMutex : getManaged())
1099 UnderlyingMutexesSet.push_back(UnderlyingMutex.Cap);
1100 return UnderlyingMutexesSet;
1101 }
1102
1103 /// \name Adding managed locks
1104 /// Capacity for managed locks must have been allocated via \ref create.
1105 /// There is no reallocation in case the capacity is exceeded!
1106 /// \{
1107 void addLock(const CapabilityExpr &M) { addManaged(M, UCK_Acquired); }
1108
1109 void addExclusiveUnlock(const CapabilityExpr &M) {
1110 addManaged(M, UCK_ReleasedExclusive);
1111 }
1112
1113 void addSharedUnlock(const CapabilityExpr &M) {
1114 addManaged(M, UCK_ReleasedShared);
1115 }
1116 /// \}
1117
1118 void
1119 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
1120 SourceLocation JoinLoc, LockErrorKind LEK,
1121 ThreadSafetyHandler &Handler) const override {
1123 return;
1124
1125 for (const auto &UnderlyingMutex : getManaged()) {
1126 const auto *Entry = FSet.findLock(FactMan, UnderlyingMutex.Cap);
1127 if ((UnderlyingMutex.Kind == UCK_Acquired && Entry) ||
1128 (UnderlyingMutex.Kind != UCK_Acquired && !Entry)) {
1129 // If this scoped lock manages another mutex, and if the underlying
1130 // mutex is still/not held, then warn about the underlying mutex.
1131 Handler.handleMutexHeldEndOfScope(UnderlyingMutex.Cap.getKind(),
1132 UnderlyingMutex.Cap.toString(), loc(),
1133 JoinLoc, LEK);
1134 }
1135 }
1136 }
1137
1138 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
1139 ThreadSafetyHandler &Handler) const override {
1140 for (const auto &UnderlyingMutex : getManaged()) {
1141 if (UnderlyingMutex.Kind == UCK_Acquired)
1142 lock(FSet, FactMan, UnderlyingMutex.Cap, entry.kind(), entry.loc(),
1143 &Handler);
1144 else
1145 unlock(FSet, FactMan, UnderlyingMutex.Cap, entry.loc(), &Handler);
1146 }
1147 }
1148
1149 void handleUnlock(FactSet &FSet, FactManager &FactMan,
1150 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
1151 bool FullyRemove,
1152 ThreadSafetyHandler &Handler) const override {
1153 assert(!Cp.negative() && "Managing object cannot be negative.");
1154 for (const auto &UnderlyingMutex : getManaged()) {
1155 // Remove/lock the underlying mutex if it exists/is still unlocked; warn
1156 // on double unlocking/locking if we're not destroying the scoped object.
1157 ThreadSafetyHandler *TSHandler = FullyRemove ? nullptr : &Handler;
1158 if (UnderlyingMutex.Kind == UCK_Acquired) {
1159 unlock(FSet, FactMan, UnderlyingMutex.Cap, UnlockLoc, TSHandler);
1160 } else {
1161 LockKind kind = UnderlyingMutex.Kind == UCK_ReleasedShared
1162 ? LK_Shared
1163 : LK_Exclusive;
1164 lock(FSet, FactMan, UnderlyingMutex.Cap, kind, UnlockLoc, TSHandler);
1165 }
1166 }
1167 if (FullyRemove)
1168 FSet.removeLock(FactMan, Cp);
1169 }
1170
1171 static bool classof(const FactEntry *A) {
1172 return A->getFactEntryKind() == ScopedLockable;
1173 }
1174
1175private:
1176 void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
1177 LockKind kind, SourceLocation loc,
1178 ThreadSafetyHandler *Handler) const {
1179 if (const auto It = FSet.findLockIter(FactMan, Cp); It != FSet.end()) {
1180 const auto &Fact = cast<LockableFactEntry>(FactMan[*It]);
1181 if (const FactEntry *RFact = Fact.tryReenter(FactMan, kind)) {
1182 // This capability has been reentrantly acquired.
1183 FSet.replaceLock(FactMan, It, RFact);
1184 } else if (Handler) {
1185 Handler->handleDoubleLock(Cp.getKind(), Cp.toString(), Fact.loc(), loc);
1186 }
1187 } else {
1188 FSet.removeLock(FactMan, !Cp);
1189 FSet.addLock(FactMan, FactMan.createFact<LockableFactEntry>(Cp, kind, loc,
1190 Managed));
1191 }
1192 }
1193
1194 void unlock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
1195 SourceLocation loc, ThreadSafetyHandler *Handler) const {
1196 if (const auto It = FSet.findLockIter(FactMan, Cp); It != FSet.end()) {
1197 const auto &Fact = cast<LockableFactEntry>(FactMan[*It]);
1198 if (const FactEntry *RFact = Fact.leaveReentrant(FactMan)) {
1199 // This capability remains reentrantly acquired.
1200 FSet.replaceLock(FactMan, It, RFact);
1201 return;
1202 }
1203
1204 FSet.replaceLock(
1205 FactMan, It,
1206 FactMan.createFact<LockableFactEntry>(!Cp, LK_Exclusive, loc));
1207 } else if (Handler) {
1208 SourceLocation PrevLoc;
1209 if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
1210 PrevLoc = Neg->loc();
1211 Handler->handleUnmatchedUnlock(Cp.getKind(), Cp.toString(), loc, PrevLoc);
1212 }
1213 }
1214};
1215
1216/// Class which implements the core thread safety analysis routines.
1217class ThreadSafetyAnalyzer {
1218 friend class BuildLockset;
1219 friend class threadSafety::BeforeSet;
1220
1221 llvm::BumpPtrAllocator Bpa;
1222 threadSafety::til::MemRegionRef Arena;
1223 threadSafety::SExprBuilder SxBuilder;
1224
1225 ThreadSafetyHandler &Handler;
1226 const FunctionDecl *CurrentFunction;
1227 LocalVariableMap LocalVarMap;
1228 // Maps constructed objects to `this` placeholder prior to initialization.
1229 llvm::SmallDenseMap<const Expr *, til::LiteralPtr *> ConstructedObjects;
1230 FactManager FactMan;
1231 std::vector<CFGBlockInfo> BlockInfo;
1232
1233 BeforeSet *GlobalBeforeSet;
1234
1235public:
1236 ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet *Bset)
1237 : Arena(&Bpa), SxBuilder(Arena), Handler(H), FactMan(Bpa),
1238 GlobalBeforeSet(Bset) {}
1239
1240 bool inCurrentScope(const CapabilityExpr &CapE);
1241
1242 void addLock(FactSet &FSet, const FactEntry *Entry, bool ReqAttr = false);
1243 void removeLock(FactSet &FSet, const CapabilityExpr &CapE,
1244 SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind);
1245
1246 template <typename AttrType>
1247 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1248 const NamedDecl *D, til::SExpr *Self = nullptr);
1249
1250 template <class AttrType>
1251 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1252 const NamedDecl *D,
1253 const CFGBlock *PredBlock, const CFGBlock *CurrBlock,
1254 Expr *BrE, bool Neg);
1255
1256 const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C,
1257 bool &Negate);
1258
1259 void getEdgeLockset(FactSet &Result, const FactSet &ExitSet,
1260 const CFGBlock* PredBlock,
1261 const CFGBlock *CurrBlock);
1262
1263 bool join(const FactEntry &A, const FactEntry &B, SourceLocation JoinLoc,
1264 LockErrorKind EntryLEK);
1265
1266 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1267 SourceLocation JoinLoc, LockErrorKind EntryLEK,
1268 LockErrorKind ExitLEK);
1269
1270 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1271 SourceLocation JoinLoc, LockErrorKind LEK) {
1272 intersectAndWarn(EntrySet, ExitSet, JoinLoc, LEK, LEK);
1273 }
1274
1275 void runAnalysis(AnalysisDeclContext &AC);
1276
1277 void warnIfMutexNotHeld(const FactSet &FSet, const NamedDecl *D,
1278 const Expr *Exp, AccessKind AK, Expr *MutexExp,
1279 ProtectedOperationKind POK, til::SExpr *Self,
1280 SourceLocation Loc);
1281 void warnIfMutexHeld(const FactSet &FSet, const NamedDecl *D, const Expr *Exp,
1282 Expr *MutexExp, til::SExpr *Self, SourceLocation Loc);
1283
1284 void checkAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
1286 void checkPtAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
1288};
1289
1290} // namespace
1291
1292/// Process acquired_before and acquired_after attributes on Vd.
1293BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
1294 ThreadSafetyAnalyzer& Analyzer) {
1295 // Create a new entry for Vd.
1296 BeforeInfo *Info = nullptr;
1297 {
1298 // Keep InfoPtr in its own scope in case BMap is modified later and the
1299 // reference becomes invalid.
1300 std::unique_ptr<BeforeInfo> &InfoPtr = BMap[Vd];
1301 if (!InfoPtr)
1302 InfoPtr.reset(new BeforeInfo());
1303 Info = InfoPtr.get();
1304 }
1305
1306 for (const auto *At : Vd->attrs()) {
1307 switch (At->getKind()) {
1308 case attr::AcquiredBefore: {
1309 const auto *A = cast<AcquiredBeforeAttr>(At);
1310
1311 // Read exprs from the attribute, and add them to BeforeVect.
1312 for (const auto *Arg : A->args()) {
1313 CapabilityExpr Cp =
1314 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
1315 if (const ValueDecl *Cpvd = Cp.valueDecl()) {
1316 Info->Vect.push_back(Cpvd);
1317 const auto It = BMap.find(Cpvd);
1318 if (It == BMap.end())
1319 insertAttrExprs(Cpvd, Analyzer);
1320 }
1321 }
1322 break;
1323 }
1324 case attr::AcquiredAfter: {
1325 const auto *A = cast<AcquiredAfterAttr>(At);
1326
1327 // Read exprs from the attribute, and add them to BeforeVect.
1328 for (const auto *Arg : A->args()) {
1329 CapabilityExpr Cp =
1330 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
1331 if (const ValueDecl *ArgVd = Cp.valueDecl()) {
1332 // Get entry for mutex listed in attribute
1333 BeforeInfo *ArgInfo = getBeforeInfoForDecl(ArgVd, Analyzer);
1334 ArgInfo->Vect.push_back(Vd);
1335 }
1336 }
1337 break;
1338 }
1339 default:
1340 break;
1341 }
1342 }
1343
1344 return Info;
1345}
1346
1347BeforeSet::BeforeInfo *
1349 ThreadSafetyAnalyzer &Analyzer) {
1350 auto It = BMap.find(Vd);
1351 BeforeInfo *Info = nullptr;
1352 if (It == BMap.end())
1353 Info = insertAttrExprs(Vd, Analyzer);
1354 else
1355 Info = It->second.get();
1356 assert(Info && "BMap contained nullptr?");
1357 return Info;
1358}
1359
1360/// Return true if any mutexes in FSet are in the acquired_before set of Vd.
1362 const FactSet& FSet,
1363 ThreadSafetyAnalyzer& Analyzer,
1364 SourceLocation Loc, StringRef CapKind) {
1366
1367 // Do a depth-first traversal of Vd.
1368 // Return true if there are cycles.
1369 std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) {
1370 if (!Vd)
1371 return false;
1372
1373 BeforeSet::BeforeInfo *Info = getBeforeInfoForDecl(Vd, Analyzer);
1374
1375 if (Info->Visited == 1)
1376 return true;
1377
1378 if (Info->Visited == 2)
1379 return false;
1380
1381 if (Info->Vect.empty())
1382 return false;
1383
1384 InfoVect.push_back(Info);
1385 Info->Visited = 1;
1386 for (const auto *Vdb : Info->Vect) {
1387 // Exclude mutexes in our immediate before set.
1388 if (FSet.containsMutexDecl(Analyzer.FactMan, Vdb)) {
1389 StringRef L1 = StartVd->getName();
1390 StringRef L2 = Vdb->getName();
1391 Analyzer.Handler.handleLockAcquiredBefore(CapKind, L1, L2, Loc);
1392 }
1393 // Transitively search other before sets, and warn on cycles.
1394 if (traverse(Vdb)) {
1395 if (CycMap.try_emplace(Vd, true).second) {
1396 StringRef L1 = Vd->getName();
1397 Analyzer.Handler.handleBeforeAfterCycle(L1, Vd->getLocation());
1398 }
1399 }
1400 }
1401 Info->Visited = 2;
1402 return false;
1403 };
1404
1405 traverse(StartVd);
1406
1407 for (auto *Info : InfoVect)
1408 Info->Visited = 0;
1409}
1410
1411/// Gets the value decl pointer from DeclRefExprs or MemberExprs.
1412static const ValueDecl *getValueDecl(const Expr *Exp) {
1413 if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp))
1414 return getValueDecl(CE->getSubExpr());
1415
1416 if (const auto *DR = dyn_cast<DeclRefExpr>(Exp))
1417 return DR->getDecl();
1418
1419 if (const auto *ME = dyn_cast<MemberExpr>(Exp))
1420 return ME->getMemberDecl();
1421
1422 return nullptr;
1423}
1424
1425bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
1426 const threadSafety::til::SExpr *SExp = CapE.sexpr();
1427 assert(SExp && "Null expressions should be ignored");
1428
1429 if (const auto *LP = dyn_cast<til::LiteralPtr>(SExp)) {
1430 const ValueDecl *VD = LP->clangDecl();
1431 // Variables defined in a function are always inaccessible.
1432 if (!VD || !VD->isDefinedOutsideFunctionOrMethod())
1433 return false;
1434 // For now we consider static class members to be inaccessible.
1436 return false;
1437 // Global variables are always in scope.
1438 return true;
1439 }
1440
1441 // Members are in scope from methods of the same class.
1442 if (const auto *P = dyn_cast<til::Project>(SExp)) {
1443 if (!isa_and_nonnull<CXXMethodDecl>(CurrentFunction))
1444 return false;
1445 const ValueDecl *VD = P->clangDecl();
1446 return VD->getDeclContext() == CurrentFunction->getDeclContext();
1447 }
1448
1449 return false;
1450}
1451
1452/// Add a new lock to the lockset, warning if the lock is already there.
1453/// \param ReqAttr -- true if this is part of an initial Requires attribute.
1454void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const FactEntry *Entry,
1455 bool ReqAttr) {
1456 if (Entry->shouldIgnore())
1457 return;
1458
1459 if (!ReqAttr && !Entry->negative()) {
1460 // look for the negative capability, and remove it from the fact set.
1461 CapabilityExpr NegC = !*Entry;
1462 const FactEntry *Nen = FSet.findLock(FactMan, NegC);
1463 if (Nen) {
1464 FSet.removeLock(FactMan, NegC);
1465 }
1466 else {
1467 if (inCurrentScope(*Entry) && !Entry->asserted() && !Entry->reentrant())
1468 Handler.handleNegativeNotHeld(Entry->getKind(), Entry->toString(),
1469 NegC.toString(), Entry->loc());
1470 }
1471 }
1472
1473 // Check before/after constraints
1474 if (!Entry->asserted() && !Entry->declared()) {
1475 GlobalBeforeSet->checkBeforeAfter(Entry->valueDecl(), FSet, *this,
1476 Entry->loc(), Entry->getKind());
1477 }
1478
1479 if (const FactEntry *Cp = FSet.findLock(FactMan, *Entry)) {
1480 if (!Entry->asserted())
1481 Cp->handleLock(FSet, FactMan, *Entry, Handler);
1482 } else {
1483 FSet.addLock(FactMan, Entry);
1484 }
1485}
1486
1487/// Remove a lock from the lockset, warning if the lock is not there.
1488/// \param UnlockLoc The source location of the unlock (only used in error msg)
1489void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
1490 SourceLocation UnlockLoc,
1491 bool FullyRemove, LockKind ReceivedKind) {
1492 if (Cp.shouldIgnore())
1493 return;
1494
1495 const FactEntry *LDat = FSet.findLock(FactMan, Cp);
1496 if (!LDat) {
1497 SourceLocation PrevLoc;
1498 if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
1499 PrevLoc = Neg->loc();
1500 Handler.handleUnmatchedUnlock(Cp.getKind(), Cp.toString(), UnlockLoc,
1501 PrevLoc);
1502 return;
1503 }
1504
1505 // Generic lock removal doesn't care about lock kind mismatches, but
1506 // otherwise diagnose when the lock kinds are mismatched.
1507 if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) {
1508 Handler.handleIncorrectUnlockKind(Cp.getKind(), Cp.toString(), LDat->kind(),
1509 ReceivedKind, LDat->loc(), UnlockLoc);
1510 }
1511
1512 LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler);
1513}
1514
1515/// Extract the list of mutexIDs from the attribute on an expression,
1516/// and push them onto Mtxs, discarding any duplicates.
1517template <typename AttrType>
1518void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1519 const Expr *Exp, const NamedDecl *D,
1520 til::SExpr *Self) {
1521 if (Attr->args_size() == 0) {
1522 // The mutex held is the "this" object.
1523 CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, Self);
1524 if (Cp.isInvalid()) {
1525 warnInvalidLock(Handler, nullptr, D, Exp, Cp.getKind());
1526 return;
1527 }
1528 //else
1529 if (!Cp.shouldIgnore())
1530 Mtxs.push_back_nodup(Cp);
1531 return;
1532 }
1533
1534 for (const auto *Arg : Attr->args()) {
1535 CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, Self);
1536 if (Cp.isInvalid()) {
1537 warnInvalidLock(Handler, nullptr, D, Exp, Cp.getKind());
1538 continue;
1539 }
1540 //else
1541 if (!Cp.shouldIgnore())
1542 Mtxs.push_back_nodup(Cp);
1543 }
1544}
1545
1546/// Extract the list of mutexIDs from a trylock attribute. If the
1547/// trylock applies to the given edge, then push them onto Mtxs, discarding
1548/// any duplicates.
1549template <class AttrType>
1550void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1551 const Expr *Exp, const NamedDecl *D,
1552 const CFGBlock *PredBlock,
1553 const CFGBlock *CurrBlock,
1554 Expr *BrE, bool Neg) {
1555 // Find out which branch has the lock
1556 bool branch = false;
1557 if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE))
1558 branch = BLE->getValue();
1559 else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(BrE))
1560 branch = ILE->getValue().getBoolValue();
1561
1562 int branchnum = branch ? 0 : 1;
1563 if (Neg)
1564 branchnum = !branchnum;
1565
1566 // If we've taken the trylock branch, then add the lock
1567 int i = 0;
1568 for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
1569 SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
1570 if (*SI == CurrBlock && i == branchnum)
1571 getMutexIDs(Mtxs, Attr, Exp, D);
1572 }
1573}
1574
1575static bool getStaticBooleanValue(Expr *E, bool &TCond) {
1577 TCond = false;
1578 return true;
1579 } else if (const auto *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) {
1580 TCond = BLE->getValue();
1581 return true;
1582 } else if (const auto *ILE = dyn_cast<IntegerLiteral>(E)) {
1583 TCond = ILE->getValue().getBoolValue();
1584 return true;
1585 } else if (auto *CE = dyn_cast<ImplicitCastExpr>(E))
1586 return getStaticBooleanValue(CE->getSubExpr(), TCond);
1587 return false;
1588}
1589
1590// If Cond can be traced back to a function call, return the call expression.
1591// The negate variable should be called with false, and will be set to true
1592// if the function call is negated, e.g. if (!mu.tryLock(...))
1593const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
1594 LocalVarContext C,
1595 bool &Negate) {
1596 if (!Cond)
1597 return nullptr;
1598
1599 if (const auto *CallExp = dyn_cast<CallExpr>(Cond)) {
1600 if (CallExp->getBuiltinCallee() == Builtin::BI__builtin_expect)
1601 return getTrylockCallExpr(CallExp->getArg(0), C, Negate);
1602 return CallExp;
1603 }
1604 else if (const auto *PE = dyn_cast<ParenExpr>(Cond))
1605 return getTrylockCallExpr(PE->getSubExpr(), C, Negate);
1606 else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Cond))
1607 return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
1608 else if (const auto *FE = dyn_cast<FullExpr>(Cond))
1609 return getTrylockCallExpr(FE->getSubExpr(), C, Negate);
1610 else if (const auto *DRE = dyn_cast<DeclRefExpr>(Cond)) {
1611 const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
1612 return getTrylockCallExpr(E, C, Negate);
1613 }
1614 else if (const auto *UOP = dyn_cast<UnaryOperator>(Cond)) {
1615 if (UOP->getOpcode() == UO_LNot) {
1616 Negate = !Negate;
1617 return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
1618 }
1619 return nullptr;
1620 }
1621 else if (const auto *BOP = dyn_cast<BinaryOperator>(Cond)) {
1622 if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) {
1623 if (BOP->getOpcode() == BO_NE)
1624 Negate = !Negate;
1625
1626 bool TCond = false;
1627 if (getStaticBooleanValue(BOP->getRHS(), TCond)) {
1628 if (!TCond) Negate = !Negate;
1629 return getTrylockCallExpr(BOP->getLHS(), C, Negate);
1630 }
1631 TCond = false;
1632 if (getStaticBooleanValue(BOP->getLHS(), TCond)) {
1633 if (!TCond) Negate = !Negate;
1634 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1635 }
1636 return nullptr;
1637 }
1638 if (BOP->getOpcode() == BO_LAnd) {
1639 // LHS must have been evaluated in a different block.
1640 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1641 }
1642 if (BOP->getOpcode() == BO_LOr)
1643 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1644 return nullptr;
1645 } else if (const auto *COP = dyn_cast<ConditionalOperator>(Cond)) {
1646 bool TCond, FCond;
1647 if (getStaticBooleanValue(COP->getTrueExpr(), TCond) &&
1648 getStaticBooleanValue(COP->getFalseExpr(), FCond)) {
1649 if (TCond && !FCond)
1650 return getTrylockCallExpr(COP->getCond(), C, Negate);
1651 if (!TCond && FCond) {
1652 Negate = !Negate;
1653 return getTrylockCallExpr(COP->getCond(), C, Negate);
1654 }
1655 }
1656 }
1657 return nullptr;
1658}
1659
1660/// Find the lockset that holds on the edge between PredBlock
1661/// and CurrBlock. The edge set is the exit set of PredBlock (passed
1662/// as the ExitSet parameter) plus any trylocks, which are conditionally held.
1663void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
1664 const FactSet &ExitSet,
1665 const CFGBlock *PredBlock,
1666 const CFGBlock *CurrBlock) {
1667 Result = ExitSet;
1668
1669 const Stmt *Cond = PredBlock->getTerminatorCondition();
1670 // We don't acquire try-locks on ?: branches, only when its result is used.
1671 if (!Cond || isa<ConditionalOperator>(PredBlock->getTerminatorStmt()))
1672 return;
1673
1674 bool Negate = false;
1675 const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
1676 const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
1677
1678 if (Handler.issueBetaWarnings()) {
1679 // Temporarily set the lookup context for SExprBuilder.
1680 SxBuilder.setLookupLocalVarExpr(
1681 [this, Ctx = LVarCtx](const NamedDecl *D) mutable -> const Expr * {
1682 return LocalVarMap.lookupExpr(D, Ctx);
1683 });
1684 }
1685 auto Cleanup = llvm::make_scope_exit(
1686 [this] { SxBuilder.setLookupLocalVarExpr(nullptr); });
1687
1688 const auto *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate);
1689 if (!Exp)
1690 return;
1691
1692 auto *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
1693 if (!FunDecl || !FunDecl->hasAttr<TryAcquireCapabilityAttr>())
1694 return;
1695
1696 CapExprSet ExclusiveLocksToAdd;
1697 CapExprSet SharedLocksToAdd;
1698
1699 // If the condition is a call to a Trylock function, then grab the attributes
1700 for (const auto *Attr : FunDecl->specific_attrs<TryAcquireCapabilityAttr>())
1701 getMutexIDs(Attr->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, Attr,
1702 Exp, FunDecl, PredBlock, CurrBlock, Attr->getSuccessValue(),
1703 Negate);
1704
1705 // Add and remove locks.
1706 SourceLocation Loc = Exp->getExprLoc();
1707 for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
1708 addLock(Result, FactMan.createFact<LockableFactEntry>(ExclusiveLockToAdd,
1709 LK_Exclusive, Loc));
1710 for (const auto &SharedLockToAdd : SharedLocksToAdd)
1711 addLock(Result, FactMan.createFact<LockableFactEntry>(SharedLockToAdd,
1712 LK_Shared, Loc));
1713}
1714
1715namespace {
1716
1717/// We use this class to visit different types of expressions in
1718/// CFGBlocks, and build up the lockset.
1719/// An expression may cause us to add or remove locks from the lockset, or else
1720/// output error messages related to missing locks.
1721/// FIXME: In future, we may be able to not inherit from a visitor.
1722class BuildLockset : public ConstStmtVisitor<BuildLockset> {
1723 friend class ThreadSafetyAnalyzer;
1724
1725 ThreadSafetyAnalyzer *Analyzer;
1726 FactSet FSet;
1727 // The fact set for the function on exit.
1728 const FactSet &FunctionExitFSet;
1729 LocalVariableMap::Context LVarCtx;
1730 unsigned CtxIndex;
1731
1732 // To update and adjust the context.
1733 void updateLocalVarMapCtx(const Stmt *S) {
1734 if (S)
1735 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
1736 if (!Analyzer->Handler.issueBetaWarnings())
1737 return;
1738 // The lookup closure needs to be reconstructed with the refreshed LVarCtx.
1739 Analyzer->SxBuilder.setLookupLocalVarExpr(
1740 [this, Ctx = LVarCtx](const NamedDecl *D) mutable -> const Expr * {
1741 return Analyzer->LocalVarMap.lookupExpr(D, Ctx);
1742 });
1743 }
1744
1745 // helper functions
1746
1747 void checkAccess(const Expr *Exp, AccessKind AK,
1749 Analyzer->checkAccess(FSet, Exp, AK, POK);
1750 }
1751 void checkPtAccess(const Expr *Exp, AccessKind AK,
1753 Analyzer->checkPtAccess(FSet, Exp, AK, POK);
1754 }
1755
1756 void handleCall(const Expr *Exp, const NamedDecl *D,
1757 til::SExpr *Self = nullptr,
1758 SourceLocation Loc = SourceLocation());
1759 void examineArguments(const FunctionDecl *FD,
1762 bool SkipFirstParam = false);
1763
1764public:
1765 BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info,
1766 const FactSet &FunctionExitFSet)
1767 : ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet),
1768 FunctionExitFSet(FunctionExitFSet), LVarCtx(Info.EntryContext),
1769 CtxIndex(Info.EntryIndex) {
1770 updateLocalVarMapCtx(nullptr);
1771 }
1772
1773 ~BuildLockset() { Analyzer->SxBuilder.setLookupLocalVarExpr(nullptr); }
1774
1775 void VisitUnaryOperator(const UnaryOperator *UO);
1776 void VisitBinaryOperator(const BinaryOperator *BO);
1777 void VisitCastExpr(const CastExpr *CE);
1778 void VisitCallExpr(const CallExpr *Exp);
1779 void VisitCXXConstructExpr(const CXXConstructExpr *Exp);
1780 void VisitDeclStmt(const DeclStmt *S);
1781 void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Exp);
1782 void VisitReturnStmt(const ReturnStmt *S);
1783};
1784
1785} // namespace
1786
1787/// Warn if the LSet does not contain a lock sufficient to protect access
1788/// of at least the passed in AccessKind.
1789void ThreadSafetyAnalyzer::warnIfMutexNotHeld(
1790 const FactSet &FSet, const NamedDecl *D, const Expr *Exp, AccessKind AK,
1791 Expr *MutexExp, ProtectedOperationKind POK, til::SExpr *Self,
1792 SourceLocation Loc) {
1794 CapabilityExpr Cp = SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self);
1795 if (Cp.isInvalid()) {
1796 warnInvalidLock(Handler, MutexExp, D, Exp, Cp.getKind());
1797 return;
1798 } else if (Cp.shouldIgnore()) {
1799 return;
1800 }
1801
1802 if (Cp.negative()) {
1803 // Negative capabilities act like locks excluded
1804 const FactEntry *LDat = FSet.findLock(FactMan, !Cp);
1805 if (LDat) {
1807 (!Cp).toString(), Loc);
1808 return;
1809 }
1810
1811 // If this does not refer to a negative capability in the same class,
1812 // then stop here.
1813 if (!inCurrentScope(Cp))
1814 return;
1815
1816 // Otherwise the negative requirement must be propagated to the caller.
1817 LDat = FSet.findLock(FactMan, Cp);
1818 if (!LDat) {
1819 Handler.handleNegativeNotHeld(D, Cp.toString(), Loc);
1820 }
1821 return;
1822 }
1823
1824 const FactEntry *LDat = FSet.findLockUniv(FactMan, Cp);
1825 bool NoError = true;
1826 if (!LDat) {
1827 // No exact match found. Look for a partial match.
1828 LDat = FSet.findPartialMatch(FactMan, Cp);
1829 if (LDat) {
1830 // Warn that there's no precise match.
1831 std::string PartMatchStr = LDat->toString();
1832 StringRef PartMatchName(PartMatchStr);
1833 Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc,
1834 &PartMatchName);
1835 } else {
1836 // Warn that there's no match at all.
1837 Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc);
1838 }
1839 NoError = false;
1840 }
1841 // Make sure the mutex we found is the right kind.
1842 if (NoError && LDat && !LDat->isAtLeast(LK)) {
1843 Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc);
1844 }
1845}
1846
1847/// Warn if the LSet contains the given lock.
1848void ThreadSafetyAnalyzer::warnIfMutexHeld(const FactSet &FSet,
1849 const NamedDecl *D, const Expr *Exp,
1850 Expr *MutexExp, til::SExpr *Self,
1851 SourceLocation Loc) {
1852 CapabilityExpr Cp = SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self);
1853 if (Cp.isInvalid()) {
1854 warnInvalidLock(Handler, MutexExp, D, Exp, Cp.getKind());
1855 return;
1856 } else if (Cp.shouldIgnore()) {
1857 return;
1858 }
1859
1860 const FactEntry *LDat = FSet.findLock(FactMan, Cp);
1861 if (LDat) {
1863 Cp.toString(), Loc);
1864 }
1865}
1866
1867/// Checks guarded_by and pt_guarded_by attributes.
1868/// Whenever we identify an access (read or write) to a DeclRefExpr that is
1869/// marked with guarded_by, we must ensure the appropriate mutexes are held.
1870/// Similarly, we check if the access is to an expression that dereferences
1871/// a pointer marked with pt_guarded_by.
1872void ThreadSafetyAnalyzer::checkAccess(const FactSet &FSet, const Expr *Exp,
1873 AccessKind AK,
1875 Exp = Exp->IgnoreImplicit()->IgnoreParenCasts();
1876
1877 SourceLocation Loc = Exp->getExprLoc();
1878
1879 // Local variables of reference type cannot be re-assigned;
1880 // map them to their initializer.
1881 while (const auto *DRE = dyn_cast<DeclRefExpr>(Exp)) {
1882 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()->getCanonicalDecl());
1883 if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) {
1884 if (const auto *E = VD->getInit()) {
1885 // Guard against self-initialization. e.g., int &i = i;
1886 if (E == Exp)
1887 break;
1888 Exp = E->IgnoreImplicit()->IgnoreParenCasts();
1889 continue;
1890 }
1891 }
1892 break;
1893 }
1894
1895 if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) {
1896 // For dereferences
1897 if (UO->getOpcode() == UO_Deref)
1898 checkPtAccess(FSet, UO->getSubExpr(), AK, POK);
1899 return;
1900 }
1901
1902 if (const auto *BO = dyn_cast<BinaryOperator>(Exp)) {
1903 switch (BO->getOpcode()) {
1904 case BO_PtrMemD: // .*
1905 return checkAccess(FSet, BO->getLHS(), AK, POK);
1906 case BO_PtrMemI: // ->*
1907 return checkPtAccess(FSet, BO->getLHS(), AK, POK);
1908 default:
1909 return;
1910 }
1911 }
1912
1913 if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Exp)) {
1914 checkPtAccess(FSet, AE->getLHS(), AK, POK);
1915 return;
1916 }
1917
1918 if (const auto *ME = dyn_cast<MemberExpr>(Exp)) {
1919 if (ME->isArrow())
1920 checkPtAccess(FSet, ME->getBase(), AK, POK);
1921 else
1922 checkAccess(FSet, ME->getBase(), AK, POK);
1923 }
1924
1925 const ValueDecl *D = getValueDecl(Exp);
1926 if (!D || !D->hasAttrs())
1927 return;
1928
1929 if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(FactMan)) {
1930 Handler.handleNoMutexHeld(D, POK, AK, Loc);
1931 }
1932
1933 for (const auto *I : D->specific_attrs<GuardedByAttr>())
1934 warnIfMutexNotHeld(FSet, D, Exp, AK, I->getArg(), POK, nullptr, Loc);
1935}
1936
1937/// Checks pt_guarded_by and pt_guarded_var attributes.
1938/// POK is the same operationKind that was passed to checkAccess.
1939void ThreadSafetyAnalyzer::checkPtAccess(const FactSet &FSet, const Expr *Exp,
1940 AccessKind AK,
1942 // Strip off paren- and cast-expressions, checking if we encounter any other
1943 // operator that should be delegated to checkAccess() instead.
1944 while (true) {
1945 if (const auto *PE = dyn_cast<ParenExpr>(Exp)) {
1946 Exp = PE->getSubExpr();
1947 continue;
1948 }
1949 if (const auto *CE = dyn_cast<CastExpr>(Exp)) {
1950 if (CE->getCastKind() == CK_ArrayToPointerDecay) {
1951 // If it's an actual array, and not a pointer, then it's elements
1952 // are protected by GUARDED_BY, not PT_GUARDED_BY;
1953 checkAccess(FSet, CE->getSubExpr(), AK, POK);
1954 return;
1955 }
1956 Exp = CE->getSubExpr();
1957 continue;
1958 }
1959 break;
1960 }
1961
1962 if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) {
1963 if (UO->getOpcode() == UO_AddrOf) {
1964 // Pointer access via pointer taken of variable, so the dereferenced
1965 // variable is not actually a pointer.
1966 checkAccess(FSet, UO->getSubExpr(), AK, POK);
1967 return;
1968 }
1969 }
1970
1971 // Pass by reference/pointer warnings are under a different flag.
1973 switch (POK) {
1974 case POK_PassByRef:
1975 PtPOK = POK_PtPassByRef;
1976 break;
1977 case POK_ReturnByRef:
1978 PtPOK = POK_PtReturnByRef;
1979 break;
1980 case POK_PassPointer:
1981 PtPOK = POK_PtPassPointer;
1982 break;
1983 case POK_ReturnPointer:
1984 PtPOK = POK_PtReturnPointer;
1985 break;
1986 default:
1987 break;
1988 }
1989
1990 const ValueDecl *D = getValueDecl(Exp);
1991 if (!D || !D->hasAttrs())
1992 return;
1993
1994 if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(FactMan))
1995 Handler.handleNoMutexHeld(D, PtPOK, AK, Exp->getExprLoc());
1996
1997 for (auto const *I : D->specific_attrs<PtGuardedByAttr>())
1998 warnIfMutexNotHeld(FSet, D, Exp, AK, I->getArg(), PtPOK, nullptr,
1999 Exp->getExprLoc());
2000}
2001
2002/// Process a function call, method call, constructor call,
2003/// or destructor call. This involves looking at the attributes on the
2004/// corresponding function/method/constructor/destructor, issuing warnings,
2005/// and updating the locksets accordingly.
2006///
2007/// FIXME: For classes annotated with one of the guarded annotations, we need
2008/// to treat const method calls as reads and non-const method calls as writes,
2009/// and check that the appropriate locks are held. Non-const method calls with
2010/// the same signature as const method calls can be also treated as reads.
2011///
2012/// \param Exp The call expression.
2013/// \param D The callee declaration.
2014/// \param Self If \p Exp = nullptr, the implicit this argument or the argument
2015/// of an implicitly called cleanup function.
2016/// \param Loc If \p Exp = nullptr, the location.
2017void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
2018 til::SExpr *Self, SourceLocation Loc) {
2019 CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
2020 CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
2021 CapExprSet ScopedReqsAndExcludes;
2022
2023 // Figure out if we're constructing an object of scoped lockable class
2024 CapabilityExpr Scp;
2025 if (Exp) {
2026 assert(!Self);
2027 const auto *TagT = Exp->getType()->getAs<TagType>();
2028 if (D->hasAttrs() && TagT && Exp->isPRValue()) {
2029 til::LiteralPtr *Placeholder =
2030 Analyzer->SxBuilder.createThisPlaceholder();
2031 [[maybe_unused]] auto inserted =
2032 Analyzer->ConstructedObjects.insert({Exp, Placeholder});
2033 assert(inserted.second && "Are we visiting the same expression again?");
2034 if (isa<CXXConstructExpr>(Exp))
2035 Self = Placeholder;
2036 if (TagT->getDecl()->getMostRecentDecl()->hasAttr<ScopedLockableAttr>())
2037 Scp = CapabilityExpr(Placeholder, Exp->getType(), /*Neg=*/false);
2038 }
2039
2040 assert(Loc.isInvalid());
2041 Loc = Exp->getExprLoc();
2042 }
2043
2044 for(const Attr *At : D->attrs()) {
2045 switch (At->getKind()) {
2046 // When we encounter a lock function, we need to add the lock to our
2047 // lockset.
2048 case attr::AcquireCapability: {
2049 const auto *A = cast<AcquireCapabilityAttr>(At);
2050 Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
2051 : ExclusiveLocksToAdd,
2052 A, Exp, D, Self);
2053 break;
2054 }
2055
2056 // An assert will add a lock to the lockset, but will not generate
2057 // a warning if it is already there, and will not generate a warning
2058 // if it is not removed.
2059 case attr::AssertCapability: {
2060 const auto *A = cast<AssertCapabilityAttr>(At);
2061 CapExprSet AssertLocks;
2062 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, Self);
2063 for (const auto &AssertLock : AssertLocks)
2064 Analyzer->addLock(
2065 FSet, Analyzer->FactMan.createFact<LockableFactEntry>(
2066 AssertLock, A->isShared() ? LK_Shared : LK_Exclusive,
2067 Loc, FactEntry::Asserted));
2068 break;
2069 }
2070
2071 // When we encounter an unlock function, we need to remove unlocked
2072 // mutexes from the lockset, and flag a warning if they are not there.
2073 case attr::ReleaseCapability: {
2074 const auto *A = cast<ReleaseCapabilityAttr>(At);
2075 if (A->isGeneric())
2076 Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, Self);
2077 else if (A->isShared())
2078 Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, Self);
2079 else
2080 Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, Self);
2081 break;
2082 }
2083
2084 case attr::RequiresCapability: {
2085 const auto *A = cast<RequiresCapabilityAttr>(At);
2086 for (auto *Arg : A->args()) {
2087 Analyzer->warnIfMutexNotHeld(FSet, D, Exp,
2088 A->isShared() ? AK_Read : AK_Written,
2089 Arg, POK_FunctionCall, Self, Loc);
2090 // use for adopting a lock
2091 if (!Scp.shouldIgnore())
2092 Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, Self);
2093 }
2094 break;
2095 }
2096
2097 case attr::LocksExcluded: {
2098 const auto *A = cast<LocksExcludedAttr>(At);
2099 for (auto *Arg : A->args()) {
2100 Analyzer->warnIfMutexHeld(FSet, D, Exp, Arg, Self, Loc);
2101 // use for deferring a lock
2102 if (!Scp.shouldIgnore())
2103 Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, Self);
2104 }
2105 break;
2106 }
2107
2108 // Ignore attributes unrelated to thread-safety
2109 default:
2110 break;
2111 }
2112 }
2113
2114 std::optional<CallExpr::const_arg_range> Args;
2115 if (Exp) {
2116 if (const auto *CE = dyn_cast<CallExpr>(Exp))
2117 Args = CE->arguments();
2118 else if (const auto *CE = dyn_cast<CXXConstructExpr>(Exp))
2119 Args = CE->arguments();
2120 else
2121 llvm_unreachable("Unknown call kind");
2122 }
2123 const auto *CalledFunction = dyn_cast<FunctionDecl>(D);
2124 if (CalledFunction && Args.has_value()) {
2125 for (auto [Param, Arg] : zip(CalledFunction->parameters(), *Args)) {
2126 CapExprSet DeclaredLocks;
2127 for (const Attr *At : Param->attrs()) {
2128 switch (At->getKind()) {
2129 case attr::AcquireCapability: {
2130 const auto *A = cast<AcquireCapabilityAttr>(At);
2131 Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
2132 : ExclusiveLocksToAdd,
2133 A, Exp, D, Self);
2134 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
2135 break;
2136 }
2137
2138 case attr::ReleaseCapability: {
2139 const auto *A = cast<ReleaseCapabilityAttr>(At);
2140 if (A->isGeneric())
2141 Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, Self);
2142 else if (A->isShared())
2143 Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, Self);
2144 else
2145 Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, Self);
2146 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
2147 break;
2148 }
2149
2150 case attr::RequiresCapability: {
2151 const auto *A = cast<RequiresCapabilityAttr>(At);
2152 for (auto *Arg : A->args())
2153 Analyzer->warnIfMutexNotHeld(FSet, D, Exp,
2154 A->isShared() ? AK_Read : AK_Written,
2155 Arg, POK_FunctionCall, Self, Loc);
2156 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
2157 break;
2158 }
2159
2160 case attr::LocksExcluded: {
2161 const auto *A = cast<LocksExcludedAttr>(At);
2162 for (auto *Arg : A->args())
2163 Analyzer->warnIfMutexHeld(FSet, D, Exp, Arg, Self, Loc);
2164 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
2165 break;
2166 }
2167
2168 default:
2169 break;
2170 }
2171 }
2172 if (DeclaredLocks.empty())
2173 continue;
2174 CapabilityExpr Cp(Analyzer->SxBuilder.translate(Arg, nullptr),
2175 StringRef("mutex"), /*Neg=*/false, /*Reentrant=*/false);
2176 if (const auto *CBTE = dyn_cast<CXXBindTemporaryExpr>(Arg->IgnoreCasts());
2177 Cp.isInvalid() && CBTE) {
2178 if (auto Object = Analyzer->ConstructedObjects.find(CBTE->getSubExpr());
2179 Object != Analyzer->ConstructedObjects.end())
2180 Cp = CapabilityExpr(Object->second, StringRef("mutex"), /*Neg=*/false,
2181 /*Reentrant=*/false);
2182 }
2183 const FactEntry *Fact = FSet.findLock(Analyzer->FactMan, Cp);
2184 if (!Fact) {
2185 Analyzer->Handler.handleMutexNotHeld(Cp.getKind(), D, POK_FunctionCall,
2186 Cp.toString(), LK_Exclusive,
2187 Exp->getExprLoc());
2188 continue;
2189 }
2190 const auto *Scope = cast<ScopedLockableFactEntry>(Fact);
2191 for (const auto &[a, b] :
2192 zip_longest(DeclaredLocks, Scope->getUnderlyingMutexes())) {
2193 if (!a.has_value()) {
2194 Analyzer->Handler.handleExpectFewerUnderlyingMutexes(
2195 Exp->getExprLoc(), D->getLocation(), Scope->toString(),
2196 b.value().getKind(), b.value().toString());
2197 } else if (!b.has_value()) {
2198 Analyzer->Handler.handleExpectMoreUnderlyingMutexes(
2199 Exp->getExprLoc(), D->getLocation(), Scope->toString(),
2200 a.value().getKind(), a.value().toString());
2201 } else if (!a.value().equals(b.value())) {
2202 Analyzer->Handler.handleUnmatchedUnderlyingMutexes(
2203 Exp->getExprLoc(), D->getLocation(), Scope->toString(),
2204 a.value().getKind(), a.value().toString(), b.value().toString());
2205 break;
2206 }
2207 }
2208 }
2209 }
2210 // Remove locks first to allow lock upgrading/downgrading.
2211 // FIXME -- should only fully remove if the attribute refers to 'this'.
2212 bool Dtor = isa<CXXDestructorDecl>(D);
2213 for (const auto &M : ExclusiveLocksToRemove)
2214 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive);
2215 for (const auto &M : SharedLocksToRemove)
2216 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared);
2217 for (const auto &M : GenericLocksToRemove)
2218 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic);
2219
2220 // Add locks.
2221 FactEntry::SourceKind Source =
2222 !Scp.shouldIgnore() ? FactEntry::Managed : FactEntry::Acquired;
2223 for (const auto &M : ExclusiveLocksToAdd)
2224 Analyzer->addLock(FSet, Analyzer->FactMan.createFact<LockableFactEntry>(
2225 M, LK_Exclusive, Loc, Source));
2226 for (const auto &M : SharedLocksToAdd)
2227 Analyzer->addLock(FSet, Analyzer->FactMan.createFact<LockableFactEntry>(
2228 M, LK_Shared, Loc, Source));
2229
2230 if (!Scp.shouldIgnore()) {
2231 // Add the managing object as a dummy mutex, mapped to the underlying mutex.
2232 auto *ScopedEntry = Analyzer->FactMan.createFact<ScopedLockableFactEntry>(
2233 Scp, Loc, FactEntry::Acquired,
2234 ExclusiveLocksToAdd.size() + SharedLocksToAdd.size() +
2235 ScopedReqsAndExcludes.size() + ExclusiveLocksToRemove.size() +
2236 SharedLocksToRemove.size());
2237 for (const auto &M : ExclusiveLocksToAdd)
2238 ScopedEntry->addLock(M);
2239 for (const auto &M : SharedLocksToAdd)
2240 ScopedEntry->addLock(M);
2241 for (const auto &M : ScopedReqsAndExcludes)
2242 ScopedEntry->addLock(M);
2243 for (const auto &M : ExclusiveLocksToRemove)
2244 ScopedEntry->addExclusiveUnlock(M);
2245 for (const auto &M : SharedLocksToRemove)
2246 ScopedEntry->addSharedUnlock(M);
2247 Analyzer->addLock(FSet, ScopedEntry);
2248 }
2249}
2250
2251/// For unary operations which read and write a variable, we need to
2252/// check whether we hold any required mutexes. Reads are checked in
2253/// VisitCastExpr.
2254void BuildLockset::VisitUnaryOperator(const UnaryOperator *UO) {
2255 switch (UO->getOpcode()) {
2256 case UO_PostDec:
2257 case UO_PostInc:
2258 case UO_PreDec:
2259 case UO_PreInc:
2260 checkAccess(UO->getSubExpr(), AK_Written);
2261 break;
2262 default:
2263 break;
2264 }
2265}
2266
2267/// For binary operations which assign to a variable (writes), we need to check
2268/// whether we hold any required mutexes.
2269/// FIXME: Deal with non-primitive types.
2270void BuildLockset::VisitBinaryOperator(const BinaryOperator *BO) {
2271 if (!BO->isAssignmentOp())
2272 return;
2273
2274 updateLocalVarMapCtx(BO);
2275 checkAccess(BO->getLHS(), AK_Written);
2276}
2277
2278/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
2279/// need to ensure we hold any required mutexes.
2280/// FIXME: Deal with non-primitive types.
2281void BuildLockset::VisitCastExpr(const CastExpr *CE) {
2282 if (CE->getCastKind() != CK_LValueToRValue)
2283 return;
2284 checkAccess(CE->getSubExpr(), AK_Read);
2285}
2286
2287void BuildLockset::examineArguments(const FunctionDecl *FD,
2290 bool SkipFirstParam) {
2291 // Currently we can't do anything if we don't know the function declaration.
2292 if (!FD)
2293 return;
2294
2295 // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it
2296 // only turns off checking within the body of a function, but we also
2297 // use it to turn off checking in arguments to the function. This
2298 // could result in some false negatives, but the alternative is to
2299 // create yet another attribute.
2300 if (FD->hasAttr<NoThreadSafetyAnalysisAttr>())
2301 return;
2302
2303 const ArrayRef<ParmVarDecl *> Params = FD->parameters();
2304 auto Param = Params.begin();
2305 if (SkipFirstParam)
2306 ++Param;
2307
2308 // There can be default arguments, so we stop when one iterator is at end().
2309 for (auto Arg = ArgBegin; Param != Params.end() && Arg != ArgEnd;
2310 ++Param, ++Arg) {
2311 QualType Qt = (*Param)->getType();
2312 if (Qt->isReferenceType())
2313 checkAccess(*Arg, AK_Read, POK_PassByRef);
2314 else if (Qt->isPointerType())
2315 checkPtAccess(*Arg, AK_Read, POK_PassPointer);
2316 }
2317}
2318
2319void BuildLockset::VisitCallExpr(const CallExpr *Exp) {
2320 updateLocalVarMapCtx(Exp);
2321
2322 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Exp)) {
2323 const auto *ME = dyn_cast<MemberExpr>(CE->getCallee());
2324 // ME can be null when calling a method pointer
2325 const CXXMethodDecl *MD = CE->getMethodDecl();
2326
2327 if (ME && MD) {
2328 if (ME->isArrow()) {
2329 // Should perhaps be AK_Written if !MD->isConst().
2330 checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
2331 } else {
2332 // Should perhaps be AK_Written if !MD->isConst().
2333 checkAccess(CE->getImplicitObjectArgument(), AK_Read);
2334 }
2335 }
2336
2337 examineArguments(CE->getDirectCallee(), CE->arg_begin(), CE->arg_end());
2338 } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
2339 OverloadedOperatorKind OEop = OE->getOperator();
2340 switch (OEop) {
2341 case OO_Equal:
2342 case OO_PlusEqual:
2343 case OO_MinusEqual:
2344 case OO_StarEqual:
2345 case OO_SlashEqual:
2346 case OO_PercentEqual:
2347 case OO_CaretEqual:
2348 case OO_AmpEqual:
2349 case OO_PipeEqual:
2350 case OO_LessLessEqual:
2351 case OO_GreaterGreaterEqual:
2352 checkAccess(OE->getArg(1), AK_Read);
2353 [[fallthrough]];
2354 case OO_PlusPlus:
2355 case OO_MinusMinus:
2356 checkAccess(OE->getArg(0), AK_Written);
2357 break;
2358 case OO_Star:
2359 case OO_ArrowStar:
2360 case OO_Arrow:
2361 case OO_Subscript:
2362 if (!(OEop == OO_Star && OE->getNumArgs() > 1)) {
2363 // Grrr. operator* can be multiplication...
2364 checkPtAccess(OE->getArg(0), AK_Read);
2365 }
2366 [[fallthrough]];
2367 default: {
2368 // TODO: get rid of this, and rely on pass-by-ref instead.
2369 const Expr *Obj = OE->getArg(0);
2370 checkAccess(Obj, AK_Read);
2371 // Check the remaining arguments. For method operators, the first
2372 // argument is the implicit self argument, and doesn't appear in the
2373 // FunctionDecl, but for non-methods it does.
2374 const FunctionDecl *FD = OE->getDirectCallee();
2375 examineArguments(FD, std::next(OE->arg_begin()), OE->arg_end(),
2376 /*SkipFirstParam*/ !isa<CXXMethodDecl>(FD));
2377 break;
2378 }
2379 }
2380 } else {
2381 examineArguments(Exp->getDirectCallee(), Exp->arg_begin(), Exp->arg_end());
2382 }
2383
2384 auto *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
2385 if (!D)
2386 return;
2387 handleCall(Exp, D);
2388}
2389
2390void BuildLockset::VisitCXXConstructExpr(const CXXConstructExpr *Exp) {
2391 const CXXConstructorDecl *D = Exp->getConstructor();
2392 if (D && D->isCopyConstructor()) {
2393 const Expr* Source = Exp->getArg(0);
2394 checkAccess(Source, AK_Read);
2395 } else {
2396 examineArguments(D, Exp->arg_begin(), Exp->arg_end());
2397 }
2398 if (D && D->hasAttrs())
2399 handleCall(Exp, D);
2400}
2401
2402static const Expr *UnpackConstruction(const Expr *E) {
2403 if (auto *CE = dyn_cast<CastExpr>(E))
2404 if (CE->getCastKind() == CK_NoOp)
2405 E = CE->getSubExpr()->IgnoreParens();
2406 if (auto *CE = dyn_cast<CastExpr>(E))
2407 if (CE->getCastKind() == CK_ConstructorConversion ||
2408 CE->getCastKind() == CK_UserDefinedConversion)
2409 E = CE->getSubExpr();
2410 if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E))
2411 E = BTE->getSubExpr();
2412 return E;
2413}
2414
2415void BuildLockset::VisitDeclStmt(const DeclStmt *S) {
2416 updateLocalVarMapCtx(S);
2417
2418 for (auto *D : S->getDeclGroup()) {
2419 if (auto *VD = dyn_cast_or_null<VarDecl>(D)) {
2420 const Expr *E = VD->getInit();
2421 if (!E)
2422 continue;
2423 E = E->IgnoreParens();
2424
2425 // handle constructors that involve temporaries
2426 if (auto *EWC = dyn_cast<ExprWithCleanups>(E))
2427 E = EWC->getSubExpr()->IgnoreParens();
2428 E = UnpackConstruction(E);
2429
2430 if (auto Object = Analyzer->ConstructedObjects.find(E);
2431 Object != Analyzer->ConstructedObjects.end()) {
2432 Object->second->setClangDecl(VD);
2433 Analyzer->ConstructedObjects.erase(Object);
2434 }
2435 }
2436 }
2437}
2438
2439void BuildLockset::VisitMaterializeTemporaryExpr(
2440 const MaterializeTemporaryExpr *Exp) {
2441 if (const ValueDecl *ExtD = Exp->getExtendingDecl()) {
2442 if (auto Object = Analyzer->ConstructedObjects.find(
2444 Object != Analyzer->ConstructedObjects.end()) {
2445 Object->second->setClangDecl(ExtD);
2446 Analyzer->ConstructedObjects.erase(Object);
2447 }
2448 }
2449}
2450
2451void BuildLockset::VisitReturnStmt(const ReturnStmt *S) {
2452 if (Analyzer->CurrentFunction == nullptr)
2453 return;
2454 const Expr *RetVal = S->getRetValue();
2455 if (!RetVal)
2456 return;
2457
2458 // If returning by reference or pointer, check that the function requires the
2459 // appropriate capabilities.
2460 const QualType ReturnType =
2461 Analyzer->CurrentFunction->getReturnType().getCanonicalType();
2462 if (ReturnType->isLValueReferenceType()) {
2463 Analyzer->checkAccess(
2464 FunctionExitFSet, RetVal,
2467 } else if (ReturnType->isPointerType()) {
2468 Analyzer->checkPtAccess(
2469 FunctionExitFSet, RetVal,
2472 }
2473}
2474
2475/// Given two facts merging on a join point, possibly warn and decide whether to
2476/// keep or replace.
2477///
2478/// \return false if we should keep \p A, true if we should take \p B.
2479bool ThreadSafetyAnalyzer::join(const FactEntry &A, const FactEntry &B,
2480 SourceLocation JoinLoc,
2481 LockErrorKind EntryLEK) {
2482 // Whether we can replace \p A by \p B.
2483 const bool CanModify = EntryLEK != LEK_LockedSomeLoopIterations;
2484 unsigned int ReentrancyDepthA = 0;
2485 unsigned int ReentrancyDepthB = 0;
2486
2487 if (const auto *LFE = dyn_cast<LockableFactEntry>(&A))
2488 ReentrancyDepthA = LFE->getReentrancyDepth();
2489 if (const auto *LFE = dyn_cast<LockableFactEntry>(&B))
2490 ReentrancyDepthB = LFE->getReentrancyDepth();
2491
2492 if (ReentrancyDepthA != ReentrancyDepthB) {
2493 Handler.handleMutexHeldEndOfScope(B.getKind(), B.toString(), B.loc(),
2494 JoinLoc, EntryLEK,
2495 /*ReentrancyMismatch=*/true);
2496 // Pick the FactEntry with the greater reentrancy depth as the "good"
2497 // fact to reduce potential later warnings.
2498 return CanModify && ReentrancyDepthA < ReentrancyDepthB;
2499 } else if (A.kind() != B.kind()) {
2500 // For managed capabilities, the destructor should unlock in the right mode
2501 // anyway. For asserted capabilities no unlocking is needed.
2502 if ((A.managed() || A.asserted()) && (B.managed() || B.asserted())) {
2503 // The shared capability subsumes the exclusive capability, if possible.
2504 bool ShouldTakeB = B.kind() == LK_Shared;
2505 if (CanModify || !ShouldTakeB)
2506 return ShouldTakeB;
2507 }
2508 Handler.handleExclusiveAndShared(B.getKind(), B.toString(), B.loc(),
2509 A.loc());
2510 // Take the exclusive capability to reduce further warnings.
2511 return CanModify && B.kind() == LK_Exclusive;
2512 } else {
2513 // The non-asserted capability is the one we want to track.
2514 return CanModify && A.asserted() && !B.asserted();
2515 }
2516}
2517
2518/// Compute the intersection of two locksets and issue warnings for any
2519/// locks in the symmetric difference.
2520///
2521/// This function is used at a merge point in the CFG when comparing the lockset
2522/// of each branch being merged. For example, given the following sequence:
2523/// A; if () then B; else C; D; we need to check that the lockset after B and C
2524/// are the same. In the event of a difference, we use the intersection of these
2525/// two locksets at the start of D.
2526///
2527/// \param EntrySet A lockset for entry into a (possibly new) block.
2528/// \param ExitSet The lockset on exiting a preceding block.
2529/// \param JoinLoc The location of the join point for error reporting
2530/// \param EntryLEK The warning if a mutex is missing from \p EntrySet.
2531/// \param ExitLEK The warning if a mutex is missing from \p ExitSet.
2532void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &EntrySet,
2533 const FactSet &ExitSet,
2534 SourceLocation JoinLoc,
2535 LockErrorKind EntryLEK,
2536 LockErrorKind ExitLEK) {
2537 FactSet EntrySetOrig = EntrySet;
2538
2539 // Find locks in ExitSet that conflict or are not in EntrySet, and warn.
2540 for (const auto &Fact : ExitSet) {
2541 const FactEntry &ExitFact = FactMan[Fact];
2542
2543 FactSet::iterator EntryIt = EntrySet.findLockIter(FactMan, ExitFact);
2544 if (EntryIt != EntrySet.end()) {
2545 if (join(FactMan[*EntryIt], ExitFact, JoinLoc, EntryLEK))
2546 *EntryIt = Fact;
2547 } else if (!ExitFact.managed() || EntryLEK == LEK_LockedAtEndOfFunction) {
2548 ExitFact.handleRemovalFromIntersection(ExitSet, FactMan, JoinLoc,
2549 EntryLEK, Handler);
2550 }
2551 }
2552
2553 // Find locks in EntrySet that are not in ExitSet, and remove them.
2554 for (const auto &Fact : EntrySetOrig) {
2555 const FactEntry *EntryFact = &FactMan[Fact];
2556 const FactEntry *ExitFact = ExitSet.findLock(FactMan, *EntryFact);
2557
2558 if (!ExitFact) {
2559 if (!EntryFact->managed() || ExitLEK == LEK_LockedSomeLoopIterations ||
2561 EntryFact->handleRemovalFromIntersection(EntrySetOrig, FactMan, JoinLoc,
2562 ExitLEK, Handler);
2563 if (ExitLEK == LEK_LockedSomePredecessors)
2564 EntrySet.removeLock(FactMan, *EntryFact);
2565 }
2566 }
2567}
2568
2569// Return true if block B never continues to its successors.
2570static bool neverReturns(const CFGBlock *B) {
2571 if (B->hasNoReturnElement())
2572 return true;
2573 if (B->empty())
2574 return false;
2575
2576 CFGElement Last = B->back();
2577 if (std::optional<CFGStmt> S = Last.getAs<CFGStmt>()) {
2578 if (isa<CXXThrowExpr>(S->getStmt()))
2579 return true;
2580 }
2581 return false;
2582}
2583
2584/// Check a function's CFG for thread-safety violations.
2585///
2586/// We traverse the blocks in the CFG, compute the set of mutexes that are held
2587/// at the end of each block, and issue warnings for thread safety violations.
2588/// Each block in the CFG is traversed exactly once.
2589void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
2590 // TODO: this whole function needs be rewritten as a visitor for CFGWalker.
2591 // For now, we just use the walker to set things up.
2592 threadSafety::CFGWalker walker;
2593 if (!walker.init(AC))
2594 return;
2595
2596 // AC.dumpCFG(true);
2597 // threadSafety::printSCFG(walker);
2598
2599 CFG *CFGraph = walker.getGraph();
2600 const NamedDecl *D = walker.getDecl();
2601 CurrentFunction = dyn_cast<FunctionDecl>(D);
2602
2603 if (D->hasAttr<NoThreadSafetyAnalysisAttr>())
2604 return;
2605
2606 // FIXME: Do something a bit more intelligent inside constructor and
2607 // destructor code. Constructors and destructors must assume unique access
2608 // to 'this', so checks on member variable access is disabled, but we should
2609 // still enable checks on other objects.
2611 return; // Don't check inside constructors.
2613 return; // Don't check inside destructors.
2614
2615 Handler.enterFunction(CurrentFunction);
2616
2617 BlockInfo.resize(CFGraph->getNumBlockIDs(),
2618 CFGBlockInfo::getEmptyBlockInfo(LocalVarMap));
2619
2620 // We need to explore the CFG via a "topological" ordering.
2621 // That way, we will be guaranteed to have information about required
2622 // predecessor locksets when exploring a new block.
2623 const PostOrderCFGView *SortedGraph = walker.getSortedGraph();
2624 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
2625
2626 CFGBlockInfo &Initial = BlockInfo[CFGraph->getEntry().getBlockID()];
2627 CFGBlockInfo &Final = BlockInfo[CFGraph->getExit().getBlockID()];
2628
2629 // Mark entry block as reachable
2630 Initial.Reachable = true;
2631
2632 // Compute SSA names for local variables
2633 LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo);
2634
2635 // Fill in source locations for all CFGBlocks.
2636 findBlockLocations(CFGraph, SortedGraph, BlockInfo);
2637
2638 CapExprSet ExclusiveLocksAcquired;
2639 CapExprSet SharedLocksAcquired;
2640 CapExprSet LocksReleased;
2641
2642 // Add locks from exclusive_locks_required and shared_locks_required
2643 // to initial lockset. Also turn off checking for lock and unlock functions.
2644 // FIXME: is there a more intelligent way to check lock/unlock functions?
2645 if (!SortedGraph->empty()) {
2646 assert(*SortedGraph->begin() == &CFGraph->getEntry());
2647 FactSet &InitialLockset = Initial.EntrySet;
2648
2649 CapExprSet ExclusiveLocksToAdd;
2650 CapExprSet SharedLocksToAdd;
2651
2652 SourceLocation Loc = D->getLocation();
2653 for (const auto *Attr : D->attrs()) {
2654 Loc = Attr->getLocation();
2655 if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) {
2656 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2657 nullptr, D);
2658 } else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) {
2659 // UNLOCK_FUNCTION() is used to hide the underlying lock implementation.
2660 // We must ignore such methods.
2661 if (A->args_size() == 0)
2662 return;
2663 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2664 nullptr, D);
2665 getMutexIDs(LocksReleased, A, nullptr, D);
2666 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
2667 if (A->args_size() == 0)
2668 return;
2669 getMutexIDs(A->isShared() ? SharedLocksAcquired
2670 : ExclusiveLocksAcquired,
2671 A, nullptr, D);
2672 } else if (isa<TryAcquireCapabilityAttr>(Attr)) {
2673 // Don't try to check trylock functions for now.
2674 return;
2675 }
2676 }
2677 ArrayRef<ParmVarDecl *> Params;
2678 if (CurrentFunction)
2679 Params = CurrentFunction->getCanonicalDecl()->parameters();
2680 else if (auto CurrentMethod = dyn_cast<ObjCMethodDecl>(D))
2681 Params = CurrentMethod->getCanonicalDecl()->parameters();
2682 else
2683 llvm_unreachable("Unknown function kind");
2684 for (const ParmVarDecl *Param : Params) {
2685 CapExprSet UnderlyingLocks;
2686 for (const auto *Attr : Param->attrs()) {
2687 Loc = Attr->getLocation();
2688 if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) {
2689 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2690 nullptr, Param);
2691 getMutexIDs(LocksReleased, A, nullptr, Param);
2692 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2693 } else if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) {
2694 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2695 nullptr, Param);
2696 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2697 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
2698 getMutexIDs(A->isShared() ? SharedLocksAcquired
2699 : ExclusiveLocksAcquired,
2700 A, nullptr, Param);
2701 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2702 } else if (const auto *A = dyn_cast<LocksExcludedAttr>(Attr)) {
2703 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2704 }
2705 }
2706 if (UnderlyingLocks.empty())
2707 continue;
2708 CapabilityExpr Cp(SxBuilder.translateVariable(Param, nullptr),
2709 StringRef(),
2710 /*Neg=*/false, /*Reentrant=*/false);
2711 auto *ScopedEntry = FactMan.createFact<ScopedLockableFactEntry>(
2712 Cp, Param->getLocation(), FactEntry::Declared,
2713 UnderlyingLocks.size());
2714 for (const CapabilityExpr &M : UnderlyingLocks)
2715 ScopedEntry->addLock(M);
2716 addLock(InitialLockset, ScopedEntry, true);
2717 }
2718
2719 // FIXME -- Loc can be wrong here.
2720 for (const auto &Mu : ExclusiveLocksToAdd) {
2721 const auto *Entry = FactMan.createFact<LockableFactEntry>(
2722 Mu, LK_Exclusive, Loc, FactEntry::Declared);
2723 addLock(InitialLockset, Entry, true);
2724 }
2725 for (const auto &Mu : SharedLocksToAdd) {
2726 const auto *Entry = FactMan.createFact<LockableFactEntry>(
2727 Mu, LK_Shared, Loc, FactEntry::Declared);
2728 addLock(InitialLockset, Entry, true);
2729 }
2730 }
2731
2732 // Compute the expected exit set.
2733 // By default, we expect all locks held on entry to be held on exit.
2734 FactSet ExpectedFunctionExitSet = Initial.EntrySet;
2735
2736 // Adjust the expected exit set by adding or removing locks, as declared
2737 // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then
2738 // issue the appropriate warning.
2739 // FIXME: the location here is not quite right.
2740 for (const auto &Lock : ExclusiveLocksAcquired)
2741 ExpectedFunctionExitSet.addLock(
2742 FactMan, FactMan.createFact<LockableFactEntry>(Lock, LK_Exclusive,
2743 D->getLocation()));
2744 for (const auto &Lock : SharedLocksAcquired)
2745 ExpectedFunctionExitSet.addLock(
2746 FactMan, FactMan.createFact<LockableFactEntry>(Lock, LK_Shared,
2747 D->getLocation()));
2748 for (const auto &Lock : LocksReleased)
2749 ExpectedFunctionExitSet.removeLock(FactMan, Lock);
2750
2751 for (const auto *CurrBlock : *SortedGraph) {
2752 unsigned CurrBlockID = CurrBlock->getBlockID();
2753 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
2754
2755 // Use the default initial lockset in case there are no predecessors.
2756 VisitedBlocks.insert(CurrBlock);
2757
2758 // Iterate through the predecessor blocks and warn if the lockset for all
2759 // predecessors is not the same. We take the entry lockset of the current
2760 // block to be the intersection of all previous locksets.
2761 // FIXME: By keeping the intersection, we may output more errors in future
2762 // for a lock which is not in the intersection, but was in the union. We
2763 // may want to also keep the union in future. As an example, let's say
2764 // the intersection contains Mutex L, and the union contains L and M.
2765 // Later we unlock M. At this point, we would output an error because we
2766 // never locked M; although the real error is probably that we forgot to
2767 // lock M on all code paths. Conversely, let's say that later we lock M.
2768 // In this case, we should compare against the intersection instead of the
2769 // union because the real error is probably that we forgot to unlock M on
2770 // all code paths.
2771 bool LocksetInitialized = false;
2772 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
2773 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
2774 // if *PI -> CurrBlock is a back edge
2775 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI))
2776 continue;
2777
2778 unsigned PrevBlockID = (*PI)->getBlockID();
2779 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
2780
2781 // Ignore edges from blocks that can't return.
2782 if (neverReturns(*PI) || !PrevBlockInfo->Reachable)
2783 continue;
2784
2785 // Okay, we can reach this block from the entry.
2786 CurrBlockInfo->Reachable = true;
2787
2788 FactSet PrevLockset;
2789 getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock);
2790
2791 if (!LocksetInitialized) {
2792 CurrBlockInfo->EntrySet = PrevLockset;
2793 LocksetInitialized = true;
2794 } else {
2795 // Surprisingly 'continue' doesn't always produce back edges, because
2796 // the CFG has empty "transition" blocks where they meet with the end
2797 // of the regular loop body. We still want to diagnose them as loop.
2798 intersectAndWarn(
2799 CurrBlockInfo->EntrySet, PrevLockset, CurrBlockInfo->EntryLoc,
2800 isa_and_nonnull<ContinueStmt>((*PI)->getTerminatorStmt())
2803 }
2804 }
2805
2806 // Skip rest of block if it's not reachable.
2807 if (!CurrBlockInfo->Reachable)
2808 continue;
2809
2810 BuildLockset LocksetBuilder(this, *CurrBlockInfo, ExpectedFunctionExitSet);
2811
2812 // Visit all the statements in the basic block.
2813 for (const auto &BI : *CurrBlock) {
2814 switch (BI.getKind()) {
2815 case CFGElement::Statement: {
2816 CFGStmt CS = BI.castAs<CFGStmt>();
2817 LocksetBuilder.Visit(CS.getStmt());
2818 break;
2819 }
2820 // Ignore BaseDtor and MemberDtor for now.
2822 CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>();
2823 const auto *DD = AD.getDestructorDecl(AC.getASTContext());
2824 // Function parameters as they are constructed in caller's context and
2825 // the CFG does not contain the ctors. Ignore them as their
2826 // capabilities cannot be analysed because of this missing
2827 // information.
2828 if (isa_and_nonnull<ParmVarDecl>(AD.getVarDecl()))
2829 break;
2830 if (!DD || !DD->hasAttrs())
2831 break;
2832
2833 LocksetBuilder.handleCall(
2834 nullptr, DD,
2835 SxBuilder.translateVariable(AD.getVarDecl(), nullptr),
2836 AD.getTriggerStmt()->getEndLoc());
2837 break;
2838 }
2839
2841 const CFGCleanupFunction &CF = BI.castAs<CFGCleanupFunction>();
2842 LocksetBuilder.handleCall(
2843 /*Exp=*/nullptr, CF.getFunctionDecl(),
2844 SxBuilder.translateVariable(CF.getVarDecl(), nullptr),
2845 CF.getVarDecl()->getLocation());
2846 break;
2847 }
2848
2850 auto TD = BI.castAs<CFGTemporaryDtor>();
2851
2852 // Clean up constructed object even if there are no attributes to
2853 // keep the number of objects in limbo as small as possible.
2854 if (auto Object = ConstructedObjects.find(
2855 TD.getBindTemporaryExpr()->getSubExpr());
2856 Object != ConstructedObjects.end()) {
2857 const auto *DD = TD.getDestructorDecl(AC.getASTContext());
2858 if (DD->hasAttrs())
2859 // TODO: the location here isn't quite correct.
2860 LocksetBuilder.handleCall(nullptr, DD, Object->second,
2861 TD.getBindTemporaryExpr()->getEndLoc());
2862 ConstructedObjects.erase(Object);
2863 }
2864 break;
2865 }
2866 default:
2867 break;
2868 }
2869 }
2870 CurrBlockInfo->ExitSet = LocksetBuilder.FSet;
2871
2872 // For every back edge from CurrBlock (the end of the loop) to another block
2873 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to
2874 // the one held at the beginning of FirstLoopBlock. We can look up the
2875 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
2876 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
2877 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
2878 // if CurrBlock -> *SI is *not* a back edge
2879 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
2880 continue;
2881
2882 CFGBlock *FirstLoopBlock = *SI;
2883 CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()];
2884 CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID];
2885 intersectAndWarn(PreLoop->EntrySet, LoopEnd->ExitSet, PreLoop->EntryLoc,
2887 }
2888 }
2889
2890 // Skip the final check if the exit block is unreachable.
2891 if (!Final.Reachable)
2892 return;
2893
2894 // FIXME: Should we call this function for all blocks which exit the function?
2895 intersectAndWarn(ExpectedFunctionExitSet, Final.ExitSet, Final.ExitLoc,
2897
2898 Handler.leaveFunction(CurrentFunction);
2899}
2900
2901/// Check a function's CFG for thread-safety violations.
2902///
2903/// We traverse the blocks in the CFG, compute the set of mutexes that are held
2904/// at the end of each block, and issue warnings for thread safety violations.
2905/// Each block in the CFG is traversed exactly once.
2907 ThreadSafetyHandler &Handler,
2908 BeforeSet **BSet) {
2909 if (!*BSet)
2910 *BSet = new BeforeSet;
2911 ThreadSafetyAnalyzer Analyzer(Handler, *BSet);
2912 Analyzer.runAnalysis(AC);
2913}
2914
2916
2917/// Helper function that returns a LockKind required for the given level
2918/// of access.
2920 switch (AK) {
2921 case AK_Read :
2922 return LK_Shared;
2923 case AK_Written :
2924 return LK_Exclusive;
2925 }
2926 llvm_unreachable("Unknown AccessKind");
2927}
This file defines AnalysisDeclContext, a class that manages the analysis context data for context sen...
Defines enum values for all the target-independent builtin functions.
static void dump(llvm::raw_ostream &OS, StringRef FunctionName, ArrayRef< CounterExpression > Expressions, ArrayRef< CounterMappingRegion > Regions)
static Decl::Kind getKind(const Decl *D)
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Defines the clang::Expr interface and subclasses for C++ expressions.
Forward-declares and imports various common LLVM datatypes that clang wants to use unqualified.
Defines an enumeration for C++ overloaded operators.
static std::string toString(const clang::SanitizerSet &Sanitizers)
Produce a string containing comma-separated names of sanitizers in Sanitizers set.
Defines the clang::SourceLocation class and associated facilities.
Defines various enumerations that describe declaration and type specifiers.
static void warnInvalidLock(ThreadSafetyHandler &Handler, const Expr *MutexExp, const NamedDecl *D, const Expr *DeclExp, StringRef Kind)
Issue a warning about an invalid lock expression.
static bool getStaticBooleanValue(Expr *E, bool &TCond)
static bool neverReturns(const CFGBlock *B)
static void findBlockLocations(CFG *CFGraph, const PostOrderCFGView *SortedGraph, std::vector< CFGBlockInfo > &BlockInfo)
Find the appropriate source locations to use when producing diagnostics for each block in the CFG.
static const ValueDecl * getValueDecl(const Expr *Exp)
Gets the value decl pointer from DeclRefExprs or MemberExprs.
static const Expr * UnpackConstruction(const Expr *E)
C Language Family Type Representation.
__device__ __2f16 b
AnalysisDeclContext contains the context data for the function, method or block under analysis.
ASTContext & getASTContext() const
Expr * getLHS() const
Definition Expr.h:4022
Expr * getRHS() const
Definition Expr.h:4024
static bool isAssignmentOp(Opcode Opc)
Definition Expr.h:4108
Opcode getOpcode() const
Definition Expr.h:4017
const VarDecl * getVarDecl() const
Definition CFG.h:423
const Stmt * getTriggerStmt() const
Definition CFG.h:428
Represents a single basic block in a source-level CFG.
Definition CFG.h:605
pred_iterator pred_end()
Definition CFG.h:973
succ_iterator succ_end()
Definition CFG.h:991
bool hasNoReturnElement() const
Definition CFG.h:1109
CFGElement back() const
Definition CFG.h:908
ElementList::const_reverse_iterator const_reverse_iterator
Definition CFG.h:903
bool empty() const
Definition CFG.h:953
succ_iterator succ_begin()
Definition CFG.h:990
Stmt * getTerminatorStmt()
Definition CFG.h:1087
AdjacentBlocks::const_iterator const_pred_iterator
Definition CFG.h:959
pred_iterator pred_begin()
Definition CFG.h:972
unsigned getBlockID() const
Definition CFG.h:1111
Stmt * getTerminatorCondition(bool StripParens=true)
Definition CFG.cpp:6399
AdjacentBlocks::const_iterator const_succ_iterator
Definition CFG.h:966
Represents a top-level expression in a basic block.
Definition CFG.h:55
@ CleanupFunction
Definition CFG.h:79
@ AutomaticObjectDtor
Definition CFG.h:72
const CXXDestructorDecl * getDestructorDecl(ASTContext &astContext) const
Definition CFG.cpp:5412
const Stmt * getStmt() const
Definition CFG.h:139
Represents a source-level, intra-procedural CFG that represents the control-flow of a Stmt.
Definition CFG.h:1222
CFGBlock & getExit()
Definition CFG.h:1333
CFGBlock & getEntry()
Definition CFG.h:1331
unsigned getNumBlockIDs() const
Returns the total number of BlockIDs allocated (which start at 0).
Definition CFG.h:1410
arg_iterator arg_begin()
Definition ExprCXX.h:1677
Expr * getArg(unsigned Arg)
Return the specified argument.
Definition ExprCXX.h:1691
arg_iterator arg_end()
Definition ExprCXX.h:1678
CXXConstructorDecl * getConstructor() const
Get the constructor that this expression will (ultimately) call.
Definition ExprCXX.h:1611
bool isCopyConstructor(unsigned &TypeQuals) const
Whether this constructor is a copy constructor (C++ [class.copy]p2, which can be used to copy the cla...
Definition DeclCXX.cpp:3008
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3081
ConstExprIterator const_arg_iterator
Definition Expr.h:3125
arg_iterator arg_begin()
Definition Expr.h:3134
arg_iterator arg_end()
Definition Expr.h:3137
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3060
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3068
Decl * getCalleeDecl()
Definition Expr.h:3054
CastKind getCastKind() const
Definition Expr.h:3654
Expr * getSubExpr()
Definition Expr.h:3660
const DeclGroupRef getDeclGroup() const
Definition Stmt.h:1628
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.h:1636
bool hasAttrs() const
Definition DeclBase.h:518
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition DeclBase.h:559
SourceLocation getLocation() const
Definition DeclBase.h:439
bool isDefinedOutsideFunctionOrMethod() const
isDefinedOutsideFunctionOrMethod - This predicate returns true if this scoped decl is defined outside...
Definition DeclBase.h:949
DeclContext * getDeclContext()
Definition DeclBase.h:448
attr_range attrs() const
Definition DeclBase.h:535
bool hasAttr() const
Definition DeclBase.h:577
This represents one expression.
Definition Expr.h:112
Expr * IgnoreParenCasts() LLVM_READONLY
Skip past any parentheses and casts which might surround this expression until reaching a fixed point...
Definition Expr.cpp:3091
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3086
Expr * IgnoreImplicit() LLVM_READONLY
Skip past any implicit AST nodes which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3074
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3082
bool isPRValue() const
Definition Expr.h:285
Expr * IgnoreCasts() LLVM_READONLY
Skip past any casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3070
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
const ParmVarDecl * getParamDecl(unsigned i) const
Definition Decl.h:2797
QualType getReturnType() const
Definition Decl.h:2845
ArrayRef< ParmVarDecl * > parameters() const
Definition Decl.h:2774
FunctionDecl * getCanonicalDecl() override
Retrieves the "canonical" declaration of the given declaration.
Definition Decl.cpp:3743
unsigned getNumParams() const
Return the number of parameters this function must have based on its FunctionType.
Definition Decl.cpp:3822
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4937
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition ExprCXX.h:4970
This represents a decl that may have a name.
Definition Decl.h:274
IdentifierInfo * getIdentifier() const
Get the identifier that names this declaration, if there is one.
Definition Decl.h:295
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
std::string getNameAsString() const
Get a human-readable name for the declaration, even if it is one of the special kinds of names (C++ c...
Definition Decl.h:317
virtual void printName(raw_ostream &OS, const PrintingPolicy &Policy) const
Pretty-print the unqualified name of this declaration.
Definition Decl.cpp:1672
QualType getCanonicalType() const
Definition TypeBase.h:8330
bool isConstQualified() const
Determine whether this type is const-qualified.
Definition TypeBase.h:8351
Expr * getRetValue()
Definition Stmt.h:3166
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
Stmt - This represents one statement.
Definition Stmt.h:85
SourceLocation getEndLoc() const LLVM_READONLY
Definition Stmt.cpp:362
void dump() const
Dumps the specified AST fragment and all subtrees to llvm::errs().
bool isPointerType() const
Definition TypeBase.h:8515
bool isReferenceType() const
Definition TypeBase.h:8539
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isLValueReferenceType() const
Definition TypeBase.h:8543
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9091
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
void checkBeforeAfter(const ValueDecl *Vd, const FactSet &FSet, ThreadSafetyAnalyzer &Analyzer, SourceLocation Loc, StringRef CapKind)
Return true if any mutexes in FSet are in the acquired_before set of Vd.
BeforeInfo * insertAttrExprs(const ValueDecl *Vd, ThreadSafetyAnalyzer &Analyzer)
Process acquired_before and acquired_after attributes on Vd.
BeforeInfo * getBeforeInfoForDecl(const ValueDecl *Vd, ThreadSafetyAnalyzer &Analyzer)
const PostOrderCFGView * getSortedGraph() const
const NamedDecl * getDecl() const
bool init(AnalysisDeclContext &AC)
bool equals(const CapabilityExpr &other) const
CapabilityExpr translateAttrExpr(const Expr *AttrExp, const NamedDecl *D, const Expr *DeclExp, til::SExpr *Self=nullptr)
Translate a clang expression in an attribute to a til::SExpr.
void setLookupLocalVarExpr(std::function< const Expr *(const NamedDecl *)> F)
til::SExpr * translate(const Stmt *S, CallingContext *Ctx)
til::SExpr * translateVariable(const VarDecl *VD, CallingContext *Ctx)
Handler class for thread safety warnings.
virtual void handleExpectMoreUnderlyingMutexes(SourceLocation Loc, SourceLocation DLoc, Name ScopeName, StringRef Kind, Name Expected)
Warn when we get fewer underlying mutexes than expected.
virtual void handleInvalidLockExp(SourceLocation Loc)
Warn about lock expressions which fail to resolve to lockable objects.
virtual void handleUnmatchedUnderlyingMutexes(SourceLocation Loc, SourceLocation DLoc, Name ScopeName, StringRef Kind, Name Expected, Name Actual)
Warn when an actual underlying mutex of a scoped lockable does not match the expected.
virtual void handleExpectFewerUnderlyingMutexes(SourceLocation Loc, SourceLocation DLoc, Name ScopeName, StringRef Kind, Name Actual)
Warn when we get more underlying mutexes than expected.
virtual void enterFunction(const FunctionDecl *FD)
Called by the analysis when starting analysis of a function.
virtual void handleIncorrectUnlockKind(StringRef Kind, Name LockName, LockKind Expected, LockKind Received, SourceLocation LocLocked, SourceLocation LocUnlock)
Warn about an unlock function call that attempts to unlock a lock with the incorrect lock kind.
virtual void handleMutexHeldEndOfScope(StringRef Kind, Name LockName, SourceLocation LocLocked, SourceLocation LocEndOfScope, LockErrorKind LEK, bool ReentrancyMismatch=false)
Warn about situations where a mutex is sometimes held and sometimes not.
virtual void leaveFunction(const FunctionDecl *FD)
Called by the analysis when finishing analysis of a function.
virtual void handleExclusiveAndShared(StringRef Kind, Name LockName, SourceLocation Loc1, SourceLocation Loc2)
Warn when a mutex is held exclusively and shared at the same point.
virtual void handleMutexNotHeld(StringRef Kind, const NamedDecl *D, ProtectedOperationKind POK, Name LockName, LockKind LK, SourceLocation Loc, Name *PossibleMatch=nullptr)
Warn when a protected operation occurs while the specific mutex protecting the operation is not locke...
virtual void handleFunExcludesLock(StringRef Kind, Name FunName, Name LockName, SourceLocation Loc)
Warn when a function is called while an excluded mutex is locked.
virtual void handleNoMutexHeld(const NamedDecl *D, ProtectedOperationKind POK, AccessKind AK, SourceLocation Loc)
Warn when a protected operation occurs while no locks are held.
virtual void handleUnmatchedUnlock(StringRef Kind, Name LockName, SourceLocation Loc, SourceLocation LocPreviousUnlock)
Warn about unlock function calls that do not have a prior matching lock expression.
virtual void handleNegativeNotHeld(StringRef Kind, Name LockName, Name Neg, SourceLocation Loc)
Warn when acquiring a lock that the negative capability is not held.
virtual void handleDoubleLock(StringRef Kind, Name LockName, SourceLocation LocLocked, SourceLocation LocDoubleLock)
Warn about lock function calls for locks which are already held.
#define bool
Definition gpuintrin.h:32
internal::Matcher< T > traverse(TraversalKind TK, const internal::Matcher< T > &InnerMatcher)
Causes all nested matchers to be matched with the specified traversal kind.
unsigned kind
All of the diagnostics that can be emitted by the frontend.
@ CF
Indicates that the tracked object is a CF object.
bool Alloc(InterpState &S, CodePtr OpPC, const Descriptor *Desc)
Definition Interp.h:3522
bool Dec(InterpState &S, CodePtr OpPC, bool CanOverflow)
1) Pops a pointer from the stack 2) Load the value from the pointer 3) Writes the value decreased by ...
Definition Interp.h:844
bool Neg(InterpState &S, CodePtr OpPC)
Definition Interp.h:654
utils::ID< struct FactTag > FactID
Definition Facts.h:28
std::unique_ptr< DiagnosticConsumer > create(StringRef OutputFile, DiagnosticOptions &DiagOpts, bool MergeChildRecords=false)
Returns a DiagnosticConsumer that serializes diagnostics to a bitcode file.
bool matches(const til::SExpr *E1, const til::SExpr *E2)
LockKind getLockKindFromAccessKind(AccessKind AK)
Helper function that returns a LockKind required for the given level of access.
LockErrorKind
This enum distinguishes between different situations where we warn due to inconsistent locking.
@ LEK_NotLockedAtEndOfFunction
Expecting a capability to be held at the end of function.
@ LEK_LockedSomePredecessors
A capability is locked in some but not all predecessors of a CFGBlock.
@ LEK_LockedAtEndOfFunction
A capability is still locked at the end of a function.
@ LEK_LockedSomeLoopIterations
A capability is locked for some but not all loop iterations.
void threadSafetyCleanup(BeforeSet *Cache)
AccessKind
This enum distinguishes between different ways to access (read or write) a variable.
@ AK_Written
Writing a variable.
@ AK_Read
Reading a variable.
LockKind
This enum distinguishes between different kinds of lock actions.
@ LK_Shared
Shared/reader lock of a mutex.
@ LK_Exclusive
Exclusive/writer lock of a mutex.
@ LK_Generic
Can be either Shared or Exclusive.
void runThreadSafetyAnalysis(AnalysisDeclContext &AC, ThreadSafetyHandler &Handler, BeforeSet **Bset)
Check a function's CFG for thread-safety violations.
ProtectedOperationKind
This enum distinguishes between different kinds of operations that may need to be protected by locks.
@ POK_PtPassByRef
Passing a pt-guarded variable by reference.
@ POK_PassPointer
Passing pointer to a guarded variable.
@ POK_VarDereference
Dereferencing a variable (e.g. p in *p = 5;)
@ POK_PassByRef
Passing a guarded variable by reference.
@ POK_ReturnByRef
Returning a guarded variable by reference.
@ POK_PtPassPointer
Passing a pt-guarded pointer.
@ POK_PtReturnPointer
Returning a pt-guarded pointer.
@ POK_VarAccess
Reading or writing a variable (e.g. x in x = 5;)
@ POK_FunctionCall
Making a function call (e.g. fool())
@ POK_ReturnPointer
Returning pointer to a guarded variable.
@ POK_PtReturnByRef
Returning a pt-guarded variable by reference.
The JSON file list parser is used to communicate input to InstallAPI.
OverloadedOperatorKind
Enumeration specifying the different kinds of C++ overloaded operators.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ Self
'self' clause, allowed on Compute and Combined Constructs, plus 'update'.
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Expr * Cond
};
static bool classof(const Stmt *T)
@ Result
The result type of a method or function.
Definition TypeBase.h:905
const FunctionProtoType * T
U cast(CodeGen::Address addr)
Definition Address.h:327
@ Other
Other implicit parameter.
Definition Decl.h:1746
int const char * function
Definition c++config.h:31