clang 22.0.0git
ThreadSafety.cpp
Go to the documentation of this file.
1//===- ThreadSafety.cpp ---------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// A intra-procedural analysis for thread safety (e.g. deadlocks and race
10// conditions), based off of an annotation system.
11//
12// See http://clang.llvm.org/docs/ThreadSafetyAnalysis.html
13// for more information.
14//
15//===----------------------------------------------------------------------===//
16
18#include "clang/AST/Attr.h"
19#include "clang/AST/Decl.h"
20#include "clang/AST/DeclCXX.h"
21#include "clang/AST/DeclGroup.h"
22#include "clang/AST/Expr.h"
23#include "clang/AST/ExprCXX.h"
25#include "clang/AST/Stmt.h"
27#include "clang/AST/Type.h"
33#include "clang/Analysis/CFG.h"
35#include "clang/Basic/LLVM.h"
39#include "llvm/ADT/DenseMap.h"
40#include "llvm/ADT/ImmutableMap.h"
41#include "llvm/ADT/STLExtras.h"
42#include "llvm/ADT/ScopeExit.h"
43#include "llvm/ADT/SmallVector.h"
44#include "llvm/ADT/StringRef.h"
45#include "llvm/Support/Allocator.h"
46#include "llvm/Support/ErrorHandling.h"
47#include "llvm/Support/TrailingObjects.h"
48#include "llvm/Support/raw_ostream.h"
49#include <cassert>
50#include <functional>
51#include <iterator>
52#include <memory>
53#include <optional>
54#include <string>
55#include <utility>
56#include <vector>
57
58using namespace clang;
59using namespace threadSafety;
60
61// Key method definition
63
64/// Issue a warning about an invalid lock expression
66 const Expr *MutexExp, const NamedDecl *D,
67 const Expr *DeclExp, StringRef Kind) {
69 if (DeclExp)
70 Loc = DeclExp->getExprLoc();
71
72 // FIXME: add a note about the attribute location in MutexExp or D
73 if (Loc.isValid())
74 Handler.handleInvalidLockExp(Loc);
75}
76
77namespace {
78
79/// A set of CapabilityExpr objects, which are compiled from thread safety
80/// attributes on a function.
81class CapExprSet : public SmallVector<CapabilityExpr, 4> {
82public:
83 /// Push M onto list, but discard duplicates.
84 void push_back_nodup(const CapabilityExpr &CapE) {
85 if (llvm::none_of(*this, [=](const CapabilityExpr &CapE2) {
86 return CapE.equals(CapE2);
87 }))
88 push_back(CapE);
89 }
90};
91
92class FactManager;
93class FactSet;
94
95/// This is a helper class that stores a fact that is known at a
96/// particular point in program execution. Currently, a fact is a capability,
97/// along with additional information, such as where it was acquired, whether
98/// it is exclusive or shared, etc.
99class FactEntry : public CapabilityExpr {
100public:
101 enum FactEntryKind { Lockable, ScopedLockable };
102
103 /// Where a fact comes from.
104 enum SourceKind {
105 Acquired, ///< The fact has been directly acquired.
106 Asserted, ///< The fact has been asserted to be held.
107 Declared, ///< The fact is assumed to be held by callers.
108 Managed, ///< The fact has been acquired through a scoped capability.
109 };
110
111private:
112 const FactEntryKind Kind : 8;
113
114 /// Exclusive or shared.
115 LockKind LKind : 8;
116
117 /// How it was acquired.
118 SourceKind Source : 8;
119
120 /// Where it was acquired.
121 SourceLocation AcquireLoc;
122
123protected:
124 ~FactEntry() = default;
125
126public:
127 FactEntry(FactEntryKind FK, const CapabilityExpr &CE, LockKind LK,
128 SourceLocation Loc, SourceKind Src)
129 : CapabilityExpr(CE), Kind(FK), LKind(LK), Source(Src), AcquireLoc(Loc) {}
130
131 LockKind kind() const { return LKind; }
132 SourceLocation loc() const { return AcquireLoc; }
133 FactEntryKind getFactEntryKind() const { return Kind; }
134
135 bool asserted() const { return Source == Asserted; }
136 bool declared() const { return Source == Declared; }
137 bool managed() const { return Source == Managed; }
138
139 virtual void
140 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
141 SourceLocation JoinLoc, LockErrorKind LEK,
142 ThreadSafetyHandler &Handler) const = 0;
143 virtual void handleLock(FactSet &FSet, FactManager &FactMan,
144 const FactEntry &entry,
145 ThreadSafetyHandler &Handler) const = 0;
146 virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
147 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
148 bool FullyRemove,
149 ThreadSafetyHandler &Handler) const = 0;
150
151 // Return true if LKind >= LK, where exclusive > shared
152 bool isAtLeast(LockKind LK) const {
153 return (LKind == LK_Exclusive) || (LK == LK_Shared);
154 }
155};
156
157using FactID = unsigned short;
158
159/// FactManager manages the memory for all facts that are created during
160/// the analysis of a single routine.
161class FactManager {
162private:
163 llvm::BumpPtrAllocator &Alloc;
164 std::vector<const FactEntry *> Facts;
165
166public:
167 FactManager(llvm::BumpPtrAllocator &Alloc) : Alloc(Alloc) {}
168
169 template <typename T, typename... ArgTypes>
170 T *createFact(ArgTypes &&...Args) {
171 static_assert(std::is_trivially_destructible_v<T>);
172 return T::create(Alloc, std::forward<ArgTypes>(Args)...);
173 }
174
175 FactID newFact(const FactEntry *Entry) {
176 Facts.push_back(Entry);
177 assert(Facts.size() - 1 <= std::numeric_limits<FactID>::max() &&
178 "FactID space exhausted");
179 return static_cast<unsigned short>(Facts.size() - 1);
180 }
181
182 const FactEntry &operator[](FactID F) const { return *Facts[F]; }
183};
184
185/// A FactSet is the set of facts that are known to be true at a
186/// particular program point. FactSets must be small, because they are
187/// frequently copied, and are thus implemented as a set of indices into a
188/// table maintained by a FactManager. A typical FactSet only holds 1 or 2
189/// locks, so we can get away with doing a linear search for lookup. Note
190/// that a hashtable or map is inappropriate in this case, because lookups
191/// may involve partial pattern matches, rather than exact matches.
192class FactSet {
193private:
194 using FactVec = SmallVector<FactID, 4>;
195
196 FactVec FactIDs;
197
198public:
199 using iterator = FactVec::iterator;
200 using const_iterator = FactVec::const_iterator;
201
202 iterator begin() { return FactIDs.begin(); }
203 const_iterator begin() const { return FactIDs.begin(); }
204
205 iterator end() { return FactIDs.end(); }
206 const_iterator end() const { return FactIDs.end(); }
207
208 bool isEmpty() const { return FactIDs.size() == 0; }
209
210 // Return true if the set contains only negative facts
211 bool isEmpty(FactManager &FactMan) const {
212 for (const auto FID : *this) {
213 if (!FactMan[FID].negative())
214 return false;
215 }
216 return true;
217 }
218
219 void addLockByID(FactID ID) { FactIDs.push_back(ID); }
220
221 FactID addLock(FactManager &FM, const FactEntry *Entry) {
222 FactID F = FM.newFact(Entry);
223 FactIDs.push_back(F);
224 return F;
225 }
226
227 bool removeLock(FactManager& FM, const CapabilityExpr &CapE) {
228 unsigned n = FactIDs.size();
229 if (n == 0)
230 return false;
231
232 for (unsigned i = 0; i < n-1; ++i) {
233 if (FM[FactIDs[i]].matches(CapE)) {
234 FactIDs[i] = FactIDs[n-1];
235 FactIDs.pop_back();
236 return true;
237 }
238 }
239 if (FM[FactIDs[n-1]].matches(CapE)) {
240 FactIDs.pop_back();
241 return true;
242 }
243 return false;
244 }
245
246 std::optional<FactID> replaceLock(FactManager &FM, iterator It,
247 const FactEntry *Entry) {
248 if (It == end())
249 return std::nullopt;
250 FactID F = FM.newFact(Entry);
251 *It = F;
252 return F;
253 }
254
255 std::optional<FactID> replaceLock(FactManager &FM, const CapabilityExpr &CapE,
256 const FactEntry *Entry) {
257 return replaceLock(FM, findLockIter(FM, CapE), Entry);
258 }
259
260 iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) {
261 return llvm::find_if(*this,
262 [&](FactID ID) { return FM[ID].matches(CapE); });
263 }
264
265 const FactEntry *findLock(FactManager &FM, const CapabilityExpr &CapE) const {
266 auto I =
267 llvm::find_if(*this, [&](FactID ID) { return FM[ID].matches(CapE); });
268 return I != end() ? &FM[*I] : nullptr;
269 }
270
271 const FactEntry *findLockUniv(FactManager &FM,
272 const CapabilityExpr &CapE) const {
273 auto I = llvm::find_if(
274 *this, [&](FactID ID) -> bool { return FM[ID].matchesUniv(CapE); });
275 return I != end() ? &FM[*I] : nullptr;
276 }
277
278 const FactEntry *findPartialMatch(FactManager &FM,
279 const CapabilityExpr &CapE) const {
280 auto I = llvm::find_if(*this, [&](FactID ID) -> bool {
281 return FM[ID].partiallyMatches(CapE);
282 });
283 return I != end() ? &FM[*I] : nullptr;
284 }
285
286 bool containsMutexDecl(FactManager &FM, const ValueDecl* Vd) const {
287 auto I = llvm::find_if(
288 *this, [&](FactID ID) -> bool { return FM[ID].valueDecl() == Vd; });
289 return I != end();
290 }
291};
292
293class ThreadSafetyAnalyzer;
294
295} // namespace
296
297namespace clang {
298namespace threadSafety {
299
301private:
302 using BeforeVect = SmallVector<const ValueDecl *, 4>;
303
304 struct BeforeInfo {
305 BeforeVect Vect;
306 int Visited = 0;
307
308 BeforeInfo() = default;
309 BeforeInfo(BeforeInfo &&) = default;
310 };
311
312 using BeforeMap =
313 llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>;
314 using CycleMap = llvm::DenseMap<const ValueDecl *, bool>;
315
316public:
317 BeforeSet() = default;
318
319 BeforeInfo* insertAttrExprs(const ValueDecl* Vd,
320 ThreadSafetyAnalyzer& Analyzer);
321
322 BeforeInfo *getBeforeInfoForDecl(const ValueDecl *Vd,
323 ThreadSafetyAnalyzer &Analyzer);
324
325 void checkBeforeAfter(const ValueDecl* Vd,
326 const FactSet& FSet,
327 ThreadSafetyAnalyzer& Analyzer,
328 SourceLocation Loc, StringRef CapKind);
329
330private:
331 BeforeMap BMap;
332 CycleMap CycMap;
333};
334
335} // namespace threadSafety
336} // namespace clang
337
338namespace {
339
340class LocalVariableMap;
341
342using LocalVarContext = llvm::ImmutableMap<const NamedDecl *, unsigned>;
343
344/// A side (entry or exit) of a CFG node.
345enum CFGBlockSide { CBS_Entry, CBS_Exit };
346
347/// CFGBlockInfo is a struct which contains all the information that is
348/// maintained for each block in the CFG. See LocalVariableMap for more
349/// information about the contexts.
350struct CFGBlockInfo {
351 // Lockset held at entry to block
352 FactSet EntrySet;
353
354 // Lockset held at exit from block
355 FactSet ExitSet;
356
357 // Context held at entry to block
358 LocalVarContext EntryContext;
359
360 // Context held at exit from block
361 LocalVarContext ExitContext;
362
363 // Location of first statement in block
364 SourceLocation EntryLoc;
365
366 // Location of last statement in block.
367 SourceLocation ExitLoc;
368
369 // Used to replay contexts later
370 unsigned EntryIndex;
371
372 // Is this block reachable?
373 bool Reachable = false;
374
375 const FactSet &getSet(CFGBlockSide Side) const {
376 return Side == CBS_Entry ? EntrySet : ExitSet;
377 }
378
379 SourceLocation getLocation(CFGBlockSide Side) const {
380 return Side == CBS_Entry ? EntryLoc : ExitLoc;
381 }
382
383private:
384 CFGBlockInfo(LocalVarContext EmptyCtx)
385 : EntryContext(EmptyCtx), ExitContext(EmptyCtx) {}
386
387public:
388 static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M);
389};
390
391// A LocalVariableMap maintains a map from local variables to their currently
392// valid definitions. It provides SSA-like functionality when traversing the
393// CFG. Like SSA, each definition or assignment to a variable is assigned a
394// unique name (an integer), which acts as the SSA name for that definition.
395// The total set of names is shared among all CFG basic blocks.
396// Unlike SSA, we do not rewrite expressions to replace local variables declrefs
397// with their SSA-names. Instead, we compute a Context for each point in the
398// code, which maps local variables to the appropriate SSA-name. This map
399// changes with each assignment.
400//
401// The map is computed in a single pass over the CFG. Subsequent analyses can
402// then query the map to find the appropriate Context for a statement, and use
403// that Context to look up the definitions of variables.
404class LocalVariableMap {
405public:
406 using Context = LocalVarContext;
407
408 /// A VarDefinition consists of an expression, representing the value of the
409 /// variable, along with the context in which that expression should be
410 /// interpreted. A reference VarDefinition does not itself contain this
411 /// information, but instead contains a pointer to a previous VarDefinition.
412 struct VarDefinition {
413 public:
414 friend class LocalVariableMap;
415
416 // The original declaration for this variable.
417 const NamedDecl *Dec;
418
419 // The expression for this variable, OR
420 const Expr *Exp = nullptr;
421
422 // Direct reference to another VarDefinition
423 unsigned DirectRef = 0;
424
425 // Reference to underlying canonical non-reference VarDefinition.
426 unsigned CanonicalRef = 0;
427
428 // The map with which Exp should be interpreted.
429 Context Ctx;
430
431 bool isReference() const { return !Exp; }
432
433 void invalidateRef() { DirectRef = CanonicalRef = 0; }
434
435 private:
436 // Create ordinary variable definition
437 VarDefinition(const NamedDecl *D, const Expr *E, Context C)
438 : Dec(D), Exp(E), Ctx(C) {}
439
440 // Create reference to previous definition
441 VarDefinition(const NamedDecl *D, unsigned DirectRef, unsigned CanonicalRef,
442 Context C)
443 : Dec(D), DirectRef(DirectRef), CanonicalRef(CanonicalRef), Ctx(C) {}
444 };
445
446private:
447 Context::Factory ContextFactory;
448 std::vector<VarDefinition> VarDefinitions;
449 std::vector<std::pair<const Stmt *, Context>> SavedContexts;
450
451public:
452 LocalVariableMap() {
453 // index 0 is a placeholder for undefined variables (aka phi-nodes).
454 VarDefinitions.push_back(VarDefinition(nullptr, 0, 0, getEmptyContext()));
455 }
456
457 /// Look up a definition, within the given context.
458 const VarDefinition* lookup(const NamedDecl *D, Context Ctx) {
459 const unsigned *i = Ctx.lookup(D);
460 if (!i)
461 return nullptr;
462 assert(*i < VarDefinitions.size());
463 return &VarDefinitions[*i];
464 }
465
466 /// Look up the definition for D within the given context. Returns
467 /// NULL if the expression is not statically known. If successful, also
468 /// modifies Ctx to hold the context of the return Expr.
469 const Expr* lookupExpr(const NamedDecl *D, Context &Ctx) {
470 const unsigned *P = Ctx.lookup(D);
471 if (!P)
472 return nullptr;
473
474 unsigned i = *P;
475 while (i > 0) {
476 if (VarDefinitions[i].Exp) {
477 Ctx = VarDefinitions[i].Ctx;
478 return VarDefinitions[i].Exp;
479 }
480 i = VarDefinitions[i].DirectRef;
481 }
482 return nullptr;
483 }
484
485 Context getEmptyContext() { return ContextFactory.getEmptyMap(); }
486
487 /// Return the next context after processing S. This function is used by
488 /// clients of the class to get the appropriate context when traversing the
489 /// CFG. It must be called for every assignment or DeclStmt.
490 Context getNextContext(unsigned &CtxIndex, const Stmt *S, Context C) {
491 if (SavedContexts[CtxIndex+1].first == S) {
492 CtxIndex++;
493 Context Result = SavedContexts[CtxIndex].second;
494 return Result;
495 }
496 return C;
497 }
498
499 void dumpVarDefinitionName(unsigned i) {
500 if (i == 0) {
501 llvm::errs() << "Undefined";
502 return;
503 }
504 const NamedDecl *Dec = VarDefinitions[i].Dec;
505 if (!Dec) {
506 llvm::errs() << "<<NULL>>";
507 return;
508 }
509 Dec->printName(llvm::errs());
510 llvm::errs() << "." << i << " " << ((const void*) Dec);
511 }
512
513 /// Dumps an ASCII representation of the variable map to llvm::errs()
514 void dump() {
515 for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
516 const Expr *Exp = VarDefinitions[i].Exp;
517 unsigned Ref = VarDefinitions[i].DirectRef;
518
519 dumpVarDefinitionName(i);
520 llvm::errs() << " = ";
521 if (Exp) Exp->dump();
522 else {
523 dumpVarDefinitionName(Ref);
524 llvm::errs() << "\n";
525 }
526 }
527 }
528
529 /// Dumps an ASCII representation of a Context to llvm::errs()
530 void dumpContext(Context C) {
531 for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
532 const NamedDecl *D = I.getKey();
533 D->printName(llvm::errs());
534 llvm::errs() << " -> ";
535 dumpVarDefinitionName(I.getData());
536 llvm::errs() << "\n";
537 }
538 }
539
540 /// Builds the variable map.
541 void traverseCFG(CFG *CFGraph, const PostOrderCFGView *SortedGraph,
542 std::vector<CFGBlockInfo> &BlockInfo);
543
544protected:
545 friend class VarMapBuilder;
546
547 // Resolve any definition ID down to its non-reference base ID.
548 unsigned getCanonicalDefinitionID(unsigned ID) const {
549 while (ID > 0 && VarDefinitions[ID].isReference())
550 ID = VarDefinitions[ID].CanonicalRef;
551 return ID;
552 }
553
554 // Get the current context index
555 unsigned getContextIndex() { return SavedContexts.size()-1; }
556
557 // Save the current context for later replay
558 void saveContext(const Stmt *S, Context C) {
559 SavedContexts.push_back(std::make_pair(S, C));
560 }
561
562 // Adds a new definition to the given context, and returns a new context.
563 // This method should be called when declaring a new variable.
564 Context addDefinition(const NamedDecl *D, const Expr *Exp, Context Ctx) {
565 assert(!Ctx.contains(D));
566 unsigned newID = VarDefinitions.size();
567 Context NewCtx = ContextFactory.add(Ctx, D, newID);
568 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
569 return NewCtx;
570 }
571
572 // Add a new reference to an existing definition.
573 Context addReference(const NamedDecl *D, unsigned Ref, Context Ctx) {
574 unsigned newID = VarDefinitions.size();
575 Context NewCtx = ContextFactory.add(Ctx, D, newID);
576 VarDefinitions.push_back(
577 VarDefinition(D, Ref, getCanonicalDefinitionID(Ref), Ctx));
578 return NewCtx;
579 }
580
581 // Updates a definition only if that definition is already in the map.
582 // This method should be called when assigning to an existing variable.
583 Context updateDefinition(const NamedDecl *D, Expr *Exp, Context Ctx) {
584 if (Ctx.contains(D)) {
585 unsigned newID = VarDefinitions.size();
586 Context NewCtx = ContextFactory.remove(Ctx, D);
587 NewCtx = ContextFactory.add(NewCtx, D, newID);
588 VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
589 return NewCtx;
590 }
591 return Ctx;
592 }
593
594 // Removes a definition from the context, but keeps the variable name
595 // as a valid variable. The index 0 is a placeholder for cleared definitions.
596 Context clearDefinition(const NamedDecl *D, Context Ctx) {
597 Context NewCtx = Ctx;
598 if (NewCtx.contains(D)) {
599 NewCtx = ContextFactory.remove(NewCtx, D);
600 NewCtx = ContextFactory.add(NewCtx, D, 0);
601 }
602 return NewCtx;
603 }
604
605 // Remove a definition entirely frmo the context.
606 Context removeDefinition(const NamedDecl *D, Context Ctx) {
607 Context NewCtx = Ctx;
608 if (NewCtx.contains(D)) {
609 NewCtx = ContextFactory.remove(NewCtx, D);
610 }
611 return NewCtx;
612 }
613
614 Context intersectContexts(Context C1, Context C2);
615 Context createReferenceContext(Context C);
616 void intersectBackEdge(Context C1, Context C2);
617};
618
619} // namespace
620
621// This has to be defined after LocalVariableMap.
622CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) {
623 return CFGBlockInfo(M.getEmptyContext());
624}
625
626namespace {
627
628/// Visitor which builds a LocalVariableMap
629class VarMapBuilder : public ConstStmtVisitor<VarMapBuilder> {
630public:
631 LocalVariableMap* VMap;
632 LocalVariableMap::Context Ctx;
633
634 VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
635 : VMap(VM), Ctx(C) {}
636
637 void VisitDeclStmt(const DeclStmt *S);
638 void VisitBinaryOperator(const BinaryOperator *BO);
639 void VisitCallExpr(const CallExpr *CE);
640};
641
642} // namespace
643
644// Add new local variables to the variable map
645void VarMapBuilder::VisitDeclStmt(const DeclStmt *S) {
646 bool modifiedCtx = false;
647 const DeclGroupRef DGrp = S->getDeclGroup();
648 for (const auto *D : DGrp) {
649 if (const auto *VD = dyn_cast_or_null<VarDecl>(D)) {
650 const Expr *E = VD->getInit();
651
652 // Add local variables with trivial type to the variable map
653 QualType T = VD->getType();
654 if (T.isTrivialType(VD->getASTContext())) {
655 Ctx = VMap->addDefinition(VD, E, Ctx);
656 modifiedCtx = true;
657 }
658 }
659 }
660 if (modifiedCtx)
661 VMap->saveContext(S, Ctx);
662}
663
664// Update local variable definitions in variable map
665void VarMapBuilder::VisitBinaryOperator(const BinaryOperator *BO) {
666 if (!BO->isAssignmentOp())
667 return;
668
669 Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
670
671 // Update the variable map and current context.
672 if (const auto *DRE = dyn_cast<DeclRefExpr>(LHSExp)) {
673 const ValueDecl *VDec = DRE->getDecl();
674 if (Ctx.lookup(VDec)) {
675 if (BO->getOpcode() == BO_Assign)
676 Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx);
677 else
678 // FIXME -- handle compound assignment operators
679 Ctx = VMap->clearDefinition(VDec, Ctx);
680 VMap->saveContext(BO, Ctx);
681 }
682 }
683}
684
685// Invalidates local variable definitions if variable escaped.
686void VarMapBuilder::VisitCallExpr(const CallExpr *CE) {
687 const FunctionDecl *FD = CE->getDirectCallee();
688 if (!FD)
689 return;
690
691 // Heuristic for likely-benign functions that pass by mutable reference. This
692 // is needed to avoid a slew of false positives due to mutable reference
693 // passing where the captured reference is usually passed on by-value.
694 if (const IdentifierInfo *II = FD->getIdentifier()) {
695 // Any kind of std::bind-like functions.
696 if (II->isStr("bind") || II->isStr("bind_front"))
697 return;
698 }
699
700 // Invalidate local variable definitions that are passed by non-const
701 // reference or non-const pointer.
702 for (unsigned Idx = 0; Idx < CE->getNumArgs(); ++Idx) {
703 if (Idx >= FD->getNumParams())
704 break;
705
706 const Expr *Arg = CE->getArg(Idx)->IgnoreParenImpCasts();
707 const ParmVarDecl *PVD = FD->getParamDecl(Idx);
708 QualType ParamType = PVD->getType();
709
710 // Potential reassignment if passed by non-const reference / pointer.
711 const ValueDecl *VDec = nullptr;
712 if (ParamType->isReferenceType() &&
713 !ParamType->getPointeeType().isConstQualified()) {
714 if (const auto *DRE = dyn_cast<DeclRefExpr>(Arg))
715 VDec = DRE->getDecl();
716 } else if (ParamType->isPointerType() &&
717 !ParamType->getPointeeType().isConstQualified()) {
718 Arg = Arg->IgnoreParenCasts();
719 if (const auto *UO = dyn_cast<UnaryOperator>(Arg)) {
720 if (UO->getOpcode() == UO_AddrOf) {
721 const Expr *SubE = UO->getSubExpr()->IgnoreParenCasts();
722 if (const auto *DRE = dyn_cast<DeclRefExpr>(SubE))
723 VDec = DRE->getDecl();
724 }
725 }
726 }
727
728 if (VDec && Ctx.lookup(VDec)) {
729 Ctx = VMap->clearDefinition(VDec, Ctx);
730 VMap->saveContext(CE, Ctx);
731 }
732 }
733}
734
735// Computes the intersection of two contexts. The intersection is the
736// set of variables which have the same definition in both contexts;
737// variables with different definitions are discarded.
738LocalVariableMap::Context
739LocalVariableMap::intersectContexts(Context C1, Context C2) {
740 Context Result = C1;
741 for (const auto &P : C1) {
742 const NamedDecl *Dec = P.first;
743 const unsigned *I2 = C2.lookup(Dec);
744 if (!I2) {
745 // The variable doesn't exist on second path.
746 Result = removeDefinition(Dec, Result);
747 } else if (getCanonicalDefinitionID(P.second) !=
748 getCanonicalDefinitionID(*I2)) {
749 // If canonical definitions mismatch the underlying definitions are
750 // different, invalidate.
751 Result = clearDefinition(Dec, Result);
752 }
753 }
754 return Result;
755}
756
757// For every variable in C, create a new variable that refers to the
758// definition in C. Return a new context that contains these new variables.
759// (We use this for a naive implementation of SSA on loop back-edges.)
760LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
761 Context Result = getEmptyContext();
762 for (const auto &P : C)
763 Result = addReference(P.first, P.second, Result);
764 return Result;
765}
766
767// This routine also takes the intersection of C1 and C2, but it does so by
768// altering the VarDefinitions. C1 must be the result of an earlier call to
769// createReferenceContext.
770void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
771 for (const auto &P : C1) {
772 const unsigned I1 = P.second;
773 VarDefinition *VDef = &VarDefinitions[I1];
774 assert(VDef->isReference());
775
776 const unsigned *I2 = C2.lookup(P.first);
777 if (!I2) {
778 // Variable does not exist at the end of the loop, invalidate.
779 VDef->invalidateRef();
780 continue;
781 }
782
783 // Compare the canonical IDs. This correctly handles chains of references
784 // and determines if the variable is truly loop-invariant.
785 if (VDef->CanonicalRef != getCanonicalDefinitionID(*I2))
786 VDef->invalidateRef(); // Mark this variable as undefined
787 }
788}
789
790// Traverse the CFG in topological order, so all predecessors of a block
791// (excluding back-edges) are visited before the block itself. At
792// each point in the code, we calculate a Context, which holds the set of
793// variable definitions which are visible at that point in execution.
794// Visible variables are mapped to their definitions using an array that
795// contains all definitions.
796//
797// At join points in the CFG, the set is computed as the intersection of
798// the incoming sets along each edge, E.g.
799//
800// { Context | VarDefinitions }
801// int x = 0; { x -> x1 | x1 = 0 }
802// int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
803// if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... }
804// else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... }
805// ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... }
806//
807// This is essentially a simpler and more naive version of the standard SSA
808// algorithm. Those definitions that remain in the intersection are from blocks
809// that strictly dominate the current block. We do not bother to insert proper
810// phi nodes, because they are not used in our analysis; instead, wherever
811// a phi node would be required, we simply remove that definition from the
812// context (E.g. x above).
813//
814// The initial traversal does not capture back-edges, so those need to be
815// handled on a separate pass. Whenever the first pass encounters an
816// incoming back edge, it duplicates the context, creating new definitions
817// that refer back to the originals. (These correspond to places where SSA
818// might have to insert a phi node.) On the second pass, these definitions are
819// set to NULL if the variable has changed on the back-edge (i.e. a phi
820// node was actually required.) E.g.
821//
822// { Context | VarDefinitions }
823// int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
824// while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; }
825// x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... }
826// ... { y -> y1 | x3 = 2, x2 = 1, ... }
827void LocalVariableMap::traverseCFG(CFG *CFGraph,
828 const PostOrderCFGView *SortedGraph,
829 std::vector<CFGBlockInfo> &BlockInfo) {
830 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
831
832 for (const auto *CurrBlock : *SortedGraph) {
833 unsigned CurrBlockID = CurrBlock->getBlockID();
834 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
835
836 VisitedBlocks.insert(CurrBlock);
837
838 // Calculate the entry context for the current block
839 bool HasBackEdges = false;
840 bool CtxInit = true;
841 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
842 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
843 // if *PI -> CurrBlock is a back edge, so skip it
844 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI)) {
845 HasBackEdges = true;
846 continue;
847 }
848
849 unsigned PrevBlockID = (*PI)->getBlockID();
850 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
851
852 if (CtxInit) {
853 CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext;
854 CtxInit = false;
855 }
856 else {
857 CurrBlockInfo->EntryContext =
858 intersectContexts(CurrBlockInfo->EntryContext,
859 PrevBlockInfo->ExitContext);
860 }
861 }
862
863 // Duplicate the context if we have back-edges, so we can call
864 // intersectBackEdges later.
865 if (HasBackEdges)
866 CurrBlockInfo->EntryContext =
867 createReferenceContext(CurrBlockInfo->EntryContext);
868
869 // Create a starting context index for the current block
870 saveContext(nullptr, CurrBlockInfo->EntryContext);
871 CurrBlockInfo->EntryIndex = getContextIndex();
872
873 // Visit all the statements in the basic block.
874 VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext);
875 for (const auto &BI : *CurrBlock) {
876 switch (BI.getKind()) {
878 CFGStmt CS = BI.castAs<CFGStmt>();
879 VMapBuilder.Visit(CS.getStmt());
880 break;
881 }
882 default:
883 break;
884 }
885 }
886 CurrBlockInfo->ExitContext = VMapBuilder.Ctx;
887
888 // Mark variables on back edges as "unknown" if they've been changed.
889 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
890 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
891 // if CurrBlock -> *SI is *not* a back edge
892 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
893 continue;
894
895 CFGBlock *FirstLoopBlock = *SI;
896 Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext;
897 Context LoopEnd = CurrBlockInfo->ExitContext;
898 intersectBackEdge(LoopBegin, LoopEnd);
899 }
900 }
901
902 // Put an extra entry at the end of the indexed context array
903 unsigned exitID = CFGraph->getExit().getBlockID();
904 saveContext(nullptr, BlockInfo[exitID].ExitContext);
905}
906
907/// Find the appropriate source locations to use when producing diagnostics for
908/// each block in the CFG.
909static void findBlockLocations(CFG *CFGraph,
910 const PostOrderCFGView *SortedGraph,
911 std::vector<CFGBlockInfo> &BlockInfo) {
912 for (const auto *CurrBlock : *SortedGraph) {
913 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()];
914
915 // Find the source location of the last statement in the block, if the
916 // block is not empty.
917 if (const Stmt *S = CurrBlock->getTerminatorStmt()) {
918 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getBeginLoc();
919 } else {
920 for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
921 BE = CurrBlock->rend(); BI != BE; ++BI) {
922 // FIXME: Handle other CFGElement kinds.
923 if (std::optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
924 CurrBlockInfo->ExitLoc = CS->getStmt()->getBeginLoc();
925 break;
926 }
927 }
928 }
929
930 if (CurrBlockInfo->ExitLoc.isValid()) {
931 // This block contains at least one statement. Find the source location
932 // of the first statement in the block.
933 for (const auto &BI : *CurrBlock) {
934 // FIXME: Handle other CFGElement kinds.
935 if (std::optional<CFGStmt> CS = BI.getAs<CFGStmt>()) {
936 CurrBlockInfo->EntryLoc = CS->getStmt()->getBeginLoc();
937 break;
938 }
939 }
940 } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() &&
941 CurrBlock != &CFGraph->getExit()) {
942 // The block is empty, and has a single predecessor. Use its exit
943 // location.
944 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
945 BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc;
946 } else if (CurrBlock->succ_size() == 1 && *CurrBlock->succ_begin()) {
947 // The block is empty, and has a single successor. Use its entry
948 // location.
949 CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
950 BlockInfo[(*CurrBlock->succ_begin())->getBlockID()].EntryLoc;
951 }
952 }
953}
954
955namespace {
956
957class LockableFactEntry final : public FactEntry {
958private:
959 /// Reentrancy depth: incremented when a capability has been acquired
960 /// reentrantly (after initial acquisition). Always 0 for non-reentrant
961 /// capabilities.
962 unsigned int ReentrancyDepth = 0;
963
964 LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
965 SourceKind Src)
966 : FactEntry(Lockable, CE, LK, Loc, Src) {}
967
968public:
969 static LockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
970 const LockableFactEntry &Other) {
971 return new (Alloc) LockableFactEntry(Other);
972 }
973
974 static LockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
975 const CapabilityExpr &CE, LockKind LK,
976 SourceLocation Loc,
977 SourceKind Src = Acquired) {
978 return new (Alloc) LockableFactEntry(CE, LK, Loc, Src);
979 }
980
981 unsigned int getReentrancyDepth() const { return ReentrancyDepth; }
982
983 void
984 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
985 SourceLocation JoinLoc, LockErrorKind LEK,
986 ThreadSafetyHandler &Handler) const override {
987 if (!asserted() && !negative() && !isUniversal()) {
988 Handler.handleMutexHeldEndOfScope(getKind(), toString(), loc(), JoinLoc,
989 LEK);
990 }
991 }
992
993 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
994 ThreadSafetyHandler &Handler) const override {
995 if (const FactEntry *RFact = tryReenter(FactMan, entry.kind())) {
996 // This capability has been reentrantly acquired.
997 FSet.replaceLock(FactMan, entry, RFact);
998 } else {
999 Handler.handleDoubleLock(entry.getKind(), entry.toString(), loc(),
1000 entry.loc());
1001 }
1002 }
1003
1004 void handleUnlock(FactSet &FSet, FactManager &FactMan,
1005 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
1006 bool FullyRemove,
1007 ThreadSafetyHandler &Handler) const override {
1008 FSet.removeLock(FactMan, Cp);
1009
1010 if (const FactEntry *RFact = leaveReentrant(FactMan)) {
1011 // This capability remains reentrantly acquired.
1012 FSet.addLock(FactMan, RFact);
1013 } else if (!Cp.negative()) {
1014 FSet.addLock(FactMan, FactMan.createFact<LockableFactEntry>(
1015 !Cp, LK_Exclusive, UnlockLoc));
1016 }
1017 }
1018
1019 // Return an updated FactEntry if we can acquire this capability reentrant,
1020 // nullptr otherwise.
1021 const FactEntry *tryReenter(FactManager &FactMan,
1022 LockKind ReenterKind) const {
1023 if (!reentrant())
1024 return nullptr;
1025 if (kind() != ReenterKind)
1026 return nullptr;
1027 auto *NewFact = FactMan.createFact<LockableFactEntry>(*this);
1028 NewFact->ReentrancyDepth++;
1029 return NewFact;
1030 }
1031
1032 // Return an updated FactEntry if we are releasing a capability previously
1033 // acquired reentrant, nullptr otherwise.
1034 const FactEntry *leaveReentrant(FactManager &FactMan) const {
1035 if (!ReentrancyDepth)
1036 return nullptr;
1037 assert(reentrant());
1038 auto *NewFact = FactMan.createFact<LockableFactEntry>(*this);
1039 NewFact->ReentrancyDepth--;
1040 return NewFact;
1041 }
1042
1043 static bool classof(const FactEntry *A) {
1044 return A->getFactEntryKind() == Lockable;
1045 }
1046};
1047
1048enum UnderlyingCapabilityKind {
1049 UCK_Acquired, ///< Any kind of acquired capability.
1050 UCK_ReleasedShared, ///< Shared capability that was released.
1051 UCK_ReleasedExclusive, ///< Exclusive capability that was released.
1052};
1053
1054struct UnderlyingCapability {
1055 CapabilityExpr Cap;
1056 UnderlyingCapabilityKind Kind;
1057};
1058
1059class ScopedLockableFactEntry final
1060 : public FactEntry,
1061 private llvm::TrailingObjects<ScopedLockableFactEntry,
1062 UnderlyingCapability> {
1063 friend TrailingObjects;
1064
1065private:
1066 const unsigned ManagedCapacity;
1067 unsigned ManagedSize = 0;
1068
1069 ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc,
1070 SourceKind Src, unsigned ManagedCapacity)
1071 : FactEntry(ScopedLockable, CE, LK_Exclusive, Loc, Src),
1072 ManagedCapacity(ManagedCapacity) {}
1073
1074 void addManaged(const CapabilityExpr &M, UnderlyingCapabilityKind UCK) {
1075 assert(ManagedSize < ManagedCapacity);
1076 new (getTrailingObjects() + ManagedSize) UnderlyingCapability{M, UCK};
1077 ++ManagedSize;
1078 }
1079
1080 ArrayRef<UnderlyingCapability> getManaged() const {
1081 return getTrailingObjects(ManagedSize);
1082 }
1083
1084public:
1085 static ScopedLockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
1086 const CapabilityExpr &CE,
1087 SourceLocation Loc, SourceKind Src,
1088 unsigned ManagedCapacity) {
1089 void *Storage =
1090 Alloc.Allocate(totalSizeToAlloc<UnderlyingCapability>(ManagedCapacity),
1091 alignof(ScopedLockableFactEntry));
1092 return new (Storage) ScopedLockableFactEntry(CE, Loc, Src, ManagedCapacity);
1093 }
1094
1095 CapExprSet getUnderlyingMutexes() const {
1096 CapExprSet UnderlyingMutexesSet;
1097 for (const UnderlyingCapability &UnderlyingMutex : getManaged())
1098 UnderlyingMutexesSet.push_back(UnderlyingMutex.Cap);
1099 return UnderlyingMutexesSet;
1100 }
1101
1102 /// \name Adding managed locks
1103 /// Capacity for managed locks must have been allocated via \ref create.
1104 /// There is no reallocation in case the capacity is exceeded!
1105 /// \{
1106 void addLock(const CapabilityExpr &M) { addManaged(M, UCK_Acquired); }
1107
1108 void addExclusiveUnlock(const CapabilityExpr &M) {
1109 addManaged(M, UCK_ReleasedExclusive);
1110 }
1111
1112 void addSharedUnlock(const CapabilityExpr &M) {
1113 addManaged(M, UCK_ReleasedShared);
1114 }
1115 /// \}
1116
1117 void
1118 handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
1119 SourceLocation JoinLoc, LockErrorKind LEK,
1120 ThreadSafetyHandler &Handler) const override {
1122 return;
1123
1124 for (const auto &UnderlyingMutex : getManaged()) {
1125 const auto *Entry = FSet.findLock(FactMan, UnderlyingMutex.Cap);
1126 if ((UnderlyingMutex.Kind == UCK_Acquired && Entry) ||
1127 (UnderlyingMutex.Kind != UCK_Acquired && !Entry)) {
1128 // If this scoped lock manages another mutex, and if the underlying
1129 // mutex is still/not held, then warn about the underlying mutex.
1130 Handler.handleMutexHeldEndOfScope(UnderlyingMutex.Cap.getKind(),
1131 UnderlyingMutex.Cap.toString(), loc(),
1132 JoinLoc, LEK);
1133 }
1134 }
1135 }
1136
1137 void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
1138 ThreadSafetyHandler &Handler) const override {
1139 for (const auto &UnderlyingMutex : getManaged()) {
1140 if (UnderlyingMutex.Kind == UCK_Acquired)
1141 lock(FSet, FactMan, UnderlyingMutex.Cap, entry.kind(), entry.loc(),
1142 &Handler);
1143 else
1144 unlock(FSet, FactMan, UnderlyingMutex.Cap, entry.loc(), &Handler);
1145 }
1146 }
1147
1148 void handleUnlock(FactSet &FSet, FactManager &FactMan,
1149 const CapabilityExpr &Cp, SourceLocation UnlockLoc,
1150 bool FullyRemove,
1151 ThreadSafetyHandler &Handler) const override {
1152 assert(!Cp.negative() && "Managing object cannot be negative.");
1153 for (const auto &UnderlyingMutex : getManaged()) {
1154 // Remove/lock the underlying mutex if it exists/is still unlocked; warn
1155 // on double unlocking/locking if we're not destroying the scoped object.
1156 ThreadSafetyHandler *TSHandler = FullyRemove ? nullptr : &Handler;
1157 if (UnderlyingMutex.Kind == UCK_Acquired) {
1158 unlock(FSet, FactMan, UnderlyingMutex.Cap, UnlockLoc, TSHandler);
1159 } else {
1160 LockKind kind = UnderlyingMutex.Kind == UCK_ReleasedShared
1161 ? LK_Shared
1162 : LK_Exclusive;
1163 lock(FSet, FactMan, UnderlyingMutex.Cap, kind, UnlockLoc, TSHandler);
1164 }
1165 }
1166 if (FullyRemove)
1167 FSet.removeLock(FactMan, Cp);
1168 }
1169
1170 static bool classof(const FactEntry *A) {
1171 return A->getFactEntryKind() == ScopedLockable;
1172 }
1173
1174private:
1175 void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
1176 LockKind kind, SourceLocation loc,
1177 ThreadSafetyHandler *Handler) const {
1178 if (const auto It = FSet.findLockIter(FactMan, Cp); It != FSet.end()) {
1179 const auto &Fact = cast<LockableFactEntry>(FactMan[*It]);
1180 if (const FactEntry *RFact = Fact.tryReenter(FactMan, kind)) {
1181 // This capability has been reentrantly acquired.
1182 FSet.replaceLock(FactMan, It, RFact);
1183 } else if (Handler) {
1184 Handler->handleDoubleLock(Cp.getKind(), Cp.toString(), Fact.loc(), loc);
1185 }
1186 } else {
1187 FSet.removeLock(FactMan, !Cp);
1188 FSet.addLock(FactMan, FactMan.createFact<LockableFactEntry>(Cp, kind, loc,
1189 Managed));
1190 }
1191 }
1192
1193 void unlock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
1194 SourceLocation loc, ThreadSafetyHandler *Handler) const {
1195 if (const auto It = FSet.findLockIter(FactMan, Cp); It != FSet.end()) {
1196 const auto &Fact = cast<LockableFactEntry>(FactMan[*It]);
1197 if (const FactEntry *RFact = Fact.leaveReentrant(FactMan)) {
1198 // This capability remains reentrantly acquired.
1199 FSet.replaceLock(FactMan, It, RFact);
1200 return;
1201 }
1202
1203 FSet.replaceLock(
1204 FactMan, It,
1205 FactMan.createFact<LockableFactEntry>(!Cp, LK_Exclusive, loc));
1206 } else if (Handler) {
1207 SourceLocation PrevLoc;
1208 if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
1209 PrevLoc = Neg->loc();
1210 Handler->handleUnmatchedUnlock(Cp.getKind(), Cp.toString(), loc, PrevLoc);
1211 }
1212 }
1213};
1214
1215/// Class which implements the core thread safety analysis routines.
1216class ThreadSafetyAnalyzer {
1217 friend class BuildLockset;
1218 friend class threadSafety::BeforeSet;
1219
1220 llvm::BumpPtrAllocator Bpa;
1221 threadSafety::til::MemRegionRef Arena;
1222 threadSafety::SExprBuilder SxBuilder;
1223
1224 ThreadSafetyHandler &Handler;
1225 const FunctionDecl *CurrentFunction;
1226 LocalVariableMap LocalVarMap;
1227 // Maps constructed objects to `this` placeholder prior to initialization.
1228 llvm::SmallDenseMap<const Expr *, til::LiteralPtr *> ConstructedObjects;
1229 FactManager FactMan;
1230 std::vector<CFGBlockInfo> BlockInfo;
1231
1232 BeforeSet *GlobalBeforeSet;
1233
1234public:
1235 ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet *Bset)
1236 : Arena(&Bpa), SxBuilder(Arena), Handler(H), FactMan(Bpa),
1237 GlobalBeforeSet(Bset) {}
1238
1239 bool inCurrentScope(const CapabilityExpr &CapE);
1240
1241 void addLock(FactSet &FSet, const FactEntry *Entry, bool ReqAttr = false);
1242 void removeLock(FactSet &FSet, const CapabilityExpr &CapE,
1243 SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind);
1244
1245 template <typename AttrType>
1246 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1247 const NamedDecl *D, til::SExpr *Self = nullptr);
1248
1249 template <class AttrType>
1250 void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
1251 const NamedDecl *D,
1252 const CFGBlock *PredBlock, const CFGBlock *CurrBlock,
1253 Expr *BrE, bool Neg);
1254
1255 const CallExpr* getTrylockCallExpr(const Stmt *Cond, LocalVarContext C,
1256 bool &Negate);
1257
1258 void getEdgeLockset(FactSet &Result, const FactSet &ExitSet,
1259 const CFGBlock* PredBlock,
1260 const CFGBlock *CurrBlock);
1261
1262 bool join(const FactEntry &A, const FactEntry &B, SourceLocation JoinLoc,
1263 LockErrorKind EntryLEK);
1264
1265 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1266 SourceLocation JoinLoc, LockErrorKind EntryLEK,
1267 LockErrorKind ExitLEK);
1268
1269 void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
1270 SourceLocation JoinLoc, LockErrorKind LEK) {
1271 intersectAndWarn(EntrySet, ExitSet, JoinLoc, LEK, LEK);
1272 }
1273
1274 void runAnalysis(AnalysisDeclContext &AC);
1275
1276 void warnIfMutexNotHeld(const FactSet &FSet, const NamedDecl *D,
1277 const Expr *Exp, AccessKind AK, Expr *MutexExp,
1278 ProtectedOperationKind POK, til::SExpr *Self,
1279 SourceLocation Loc);
1280 void warnIfMutexHeld(const FactSet &FSet, const NamedDecl *D, const Expr *Exp,
1281 Expr *MutexExp, til::SExpr *Self, SourceLocation Loc);
1282
1283 void checkAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
1285 void checkPtAccess(const FactSet &FSet, const Expr *Exp, AccessKind AK,
1287};
1288
1289} // namespace
1290
1291/// Process acquired_before and acquired_after attributes on Vd.
1292BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
1293 ThreadSafetyAnalyzer& Analyzer) {
1294 // Create a new entry for Vd.
1295 BeforeInfo *Info = nullptr;
1296 {
1297 // Keep InfoPtr in its own scope in case BMap is modified later and the
1298 // reference becomes invalid.
1299 std::unique_ptr<BeforeInfo> &InfoPtr = BMap[Vd];
1300 if (!InfoPtr)
1301 InfoPtr.reset(new BeforeInfo());
1302 Info = InfoPtr.get();
1303 }
1304
1305 for (const auto *At : Vd->attrs()) {
1306 switch (At->getKind()) {
1307 case attr::AcquiredBefore: {
1308 const auto *A = cast<AcquiredBeforeAttr>(At);
1309
1310 // Read exprs from the attribute, and add them to BeforeVect.
1311 for (const auto *Arg : A->args()) {
1312 CapabilityExpr Cp =
1313 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
1314 if (const ValueDecl *Cpvd = Cp.valueDecl()) {
1315 Info->Vect.push_back(Cpvd);
1316 const auto It = BMap.find(Cpvd);
1317 if (It == BMap.end())
1318 insertAttrExprs(Cpvd, Analyzer);
1319 }
1320 }
1321 break;
1322 }
1323 case attr::AcquiredAfter: {
1324 const auto *A = cast<AcquiredAfterAttr>(At);
1325
1326 // Read exprs from the attribute, and add them to BeforeVect.
1327 for (const auto *Arg : A->args()) {
1328 CapabilityExpr Cp =
1329 Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
1330 if (const ValueDecl *ArgVd = Cp.valueDecl()) {
1331 // Get entry for mutex listed in attribute
1332 BeforeInfo *ArgInfo = getBeforeInfoForDecl(ArgVd, Analyzer);
1333 ArgInfo->Vect.push_back(Vd);
1334 }
1335 }
1336 break;
1337 }
1338 default:
1339 break;
1340 }
1341 }
1342
1343 return Info;
1344}
1345
1346BeforeSet::BeforeInfo *
1348 ThreadSafetyAnalyzer &Analyzer) {
1349 auto It = BMap.find(Vd);
1350 BeforeInfo *Info = nullptr;
1351 if (It == BMap.end())
1352 Info = insertAttrExprs(Vd, Analyzer);
1353 else
1354 Info = It->second.get();
1355 assert(Info && "BMap contained nullptr?");
1356 return Info;
1357}
1358
1359/// Return true if any mutexes in FSet are in the acquired_before set of Vd.
1361 const FactSet& FSet,
1362 ThreadSafetyAnalyzer& Analyzer,
1363 SourceLocation Loc, StringRef CapKind) {
1365
1366 // Do a depth-first traversal of Vd.
1367 // Return true if there are cycles.
1368 std::function<bool (const ValueDecl*)> traverse = [&](const ValueDecl* Vd) {
1369 if (!Vd)
1370 return false;
1371
1372 BeforeSet::BeforeInfo *Info = getBeforeInfoForDecl(Vd, Analyzer);
1373
1374 if (Info->Visited == 1)
1375 return true;
1376
1377 if (Info->Visited == 2)
1378 return false;
1379
1380 if (Info->Vect.empty())
1381 return false;
1382
1383 InfoVect.push_back(Info);
1384 Info->Visited = 1;
1385 for (const auto *Vdb : Info->Vect) {
1386 // Exclude mutexes in our immediate before set.
1387 if (FSet.containsMutexDecl(Analyzer.FactMan, Vdb)) {
1388 StringRef L1 = StartVd->getName();
1389 StringRef L2 = Vdb->getName();
1390 Analyzer.Handler.handleLockAcquiredBefore(CapKind, L1, L2, Loc);
1391 }
1392 // Transitively search other before sets, and warn on cycles.
1393 if (traverse(Vdb)) {
1394 if (CycMap.try_emplace(Vd, true).second) {
1395 StringRef L1 = Vd->getName();
1396 Analyzer.Handler.handleBeforeAfterCycle(L1, Vd->getLocation());
1397 }
1398 }
1399 }
1400 Info->Visited = 2;
1401 return false;
1402 };
1403
1404 traverse(StartVd);
1405
1406 for (auto *Info : InfoVect)
1407 Info->Visited = 0;
1408}
1409
1410/// Gets the value decl pointer from DeclRefExprs or MemberExprs.
1411static const ValueDecl *getValueDecl(const Expr *Exp) {
1412 if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp))
1413 return getValueDecl(CE->getSubExpr());
1414
1415 if (const auto *DR = dyn_cast<DeclRefExpr>(Exp))
1416 return DR->getDecl();
1417
1418 if (const auto *ME = dyn_cast<MemberExpr>(Exp))
1419 return ME->getMemberDecl();
1420
1421 return nullptr;
1422}
1423
1424bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
1425 const threadSafety::til::SExpr *SExp = CapE.sexpr();
1426 assert(SExp && "Null expressions should be ignored");
1427
1428 if (const auto *LP = dyn_cast<til::LiteralPtr>(SExp)) {
1429 const ValueDecl *VD = LP->clangDecl();
1430 // Variables defined in a function are always inaccessible.
1431 if (!VD || !VD->isDefinedOutsideFunctionOrMethod())
1432 return false;
1433 // For now we consider static class members to be inaccessible.
1435 return false;
1436 // Global variables are always in scope.
1437 return true;
1438 }
1439
1440 // Members are in scope from methods of the same class.
1441 if (const auto *P = dyn_cast<til::Project>(SExp)) {
1442 if (!isa_and_nonnull<CXXMethodDecl>(CurrentFunction))
1443 return false;
1444 const ValueDecl *VD = P->clangDecl();
1445 return VD->getDeclContext() == CurrentFunction->getDeclContext();
1446 }
1447
1448 return false;
1449}
1450
1451/// Add a new lock to the lockset, warning if the lock is already there.
1452/// \param ReqAttr -- true if this is part of an initial Requires attribute.
1453void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const FactEntry *Entry,
1454 bool ReqAttr) {
1455 if (Entry->shouldIgnore())
1456 return;
1457
1458 if (!ReqAttr && !Entry->negative()) {
1459 // look for the negative capability, and remove it from the fact set.
1460 CapabilityExpr NegC = !*Entry;
1461 const FactEntry *Nen = FSet.findLock(FactMan, NegC);
1462 if (Nen) {
1463 FSet.removeLock(FactMan, NegC);
1464 }
1465 else {
1466 if (inCurrentScope(*Entry) && !Entry->asserted() && !Entry->reentrant())
1467 Handler.handleNegativeNotHeld(Entry->getKind(), Entry->toString(),
1468 NegC.toString(), Entry->loc());
1469 }
1470 }
1471
1472 // Check before/after constraints
1473 if (!Entry->asserted() && !Entry->declared()) {
1474 GlobalBeforeSet->checkBeforeAfter(Entry->valueDecl(), FSet, *this,
1475 Entry->loc(), Entry->getKind());
1476 }
1477
1478 if (const FactEntry *Cp = FSet.findLock(FactMan, *Entry)) {
1479 if (!Entry->asserted())
1480 Cp->handleLock(FSet, FactMan, *Entry, Handler);
1481 } else {
1482 FSet.addLock(FactMan, Entry);
1483 }
1484}
1485
1486/// Remove a lock from the lockset, warning if the lock is not there.
1487/// \param UnlockLoc The source location of the unlock (only used in error msg)
1488void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
1489 SourceLocation UnlockLoc,
1490 bool FullyRemove, LockKind ReceivedKind) {
1491 if (Cp.shouldIgnore())
1492 return;
1493
1494 const FactEntry *LDat = FSet.findLock(FactMan, Cp);
1495 if (!LDat) {
1496 SourceLocation PrevLoc;
1497 if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
1498 PrevLoc = Neg->loc();
1499 Handler.handleUnmatchedUnlock(Cp.getKind(), Cp.toString(), UnlockLoc,
1500 PrevLoc);
1501 return;
1502 }
1503
1504 // Generic lock removal doesn't care about lock kind mismatches, but
1505 // otherwise diagnose when the lock kinds are mismatched.
1506 if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) {
1507 Handler.handleIncorrectUnlockKind(Cp.getKind(), Cp.toString(), LDat->kind(),
1508 ReceivedKind, LDat->loc(), UnlockLoc);
1509 }
1510
1511 LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler);
1512}
1513
1514/// Extract the list of mutexIDs from the attribute on an expression,
1515/// and push them onto Mtxs, discarding any duplicates.
1516template <typename AttrType>
1517void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1518 const Expr *Exp, const NamedDecl *D,
1519 til::SExpr *Self) {
1520 if (Attr->args_size() == 0) {
1521 // The mutex held is the "this" object.
1522 CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, Self);
1523 if (Cp.isInvalid()) {
1524 warnInvalidLock(Handler, nullptr, D, Exp, Cp.getKind());
1525 return;
1526 }
1527 //else
1528 if (!Cp.shouldIgnore())
1529 Mtxs.push_back_nodup(Cp);
1530 return;
1531 }
1532
1533 for (const auto *Arg : Attr->args()) {
1534 CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, Self);
1535 if (Cp.isInvalid()) {
1536 warnInvalidLock(Handler, nullptr, D, Exp, Cp.getKind());
1537 continue;
1538 }
1539 //else
1540 if (!Cp.shouldIgnore())
1541 Mtxs.push_back_nodup(Cp);
1542 }
1543}
1544
1545/// Extract the list of mutexIDs from a trylock attribute. If the
1546/// trylock applies to the given edge, then push them onto Mtxs, discarding
1547/// any duplicates.
1548template <class AttrType>
1549void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
1550 const Expr *Exp, const NamedDecl *D,
1551 const CFGBlock *PredBlock,
1552 const CFGBlock *CurrBlock,
1553 Expr *BrE, bool Neg) {
1554 // Find out which branch has the lock
1555 bool branch = false;
1556 if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE))
1557 branch = BLE->getValue();
1558 else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(BrE))
1559 branch = ILE->getValue().getBoolValue();
1560
1561 int branchnum = branch ? 0 : 1;
1562 if (Neg)
1563 branchnum = !branchnum;
1564
1565 // If we've taken the trylock branch, then add the lock
1566 int i = 0;
1567 for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
1568 SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
1569 if (*SI == CurrBlock && i == branchnum)
1570 getMutexIDs(Mtxs, Attr, Exp, D);
1571 }
1572}
1573
1574static bool getStaticBooleanValue(Expr *E, bool &TCond) {
1576 TCond = false;
1577 return true;
1578 } else if (const auto *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) {
1579 TCond = BLE->getValue();
1580 return true;
1581 } else if (const auto *ILE = dyn_cast<IntegerLiteral>(E)) {
1582 TCond = ILE->getValue().getBoolValue();
1583 return true;
1584 } else if (auto *CE = dyn_cast<ImplicitCastExpr>(E))
1585 return getStaticBooleanValue(CE->getSubExpr(), TCond);
1586 return false;
1587}
1588
1589// If Cond can be traced back to a function call, return the call expression.
1590// The negate variable should be called with false, and will be set to true
1591// if the function call is negated, e.g. if (!mu.tryLock(...))
1592const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
1593 LocalVarContext C,
1594 bool &Negate) {
1595 if (!Cond)
1596 return nullptr;
1597
1598 if (const auto *CallExp = dyn_cast<CallExpr>(Cond)) {
1599 if (CallExp->getBuiltinCallee() == Builtin::BI__builtin_expect)
1600 return getTrylockCallExpr(CallExp->getArg(0), C, Negate);
1601 return CallExp;
1602 }
1603 else if (const auto *PE = dyn_cast<ParenExpr>(Cond))
1604 return getTrylockCallExpr(PE->getSubExpr(), C, Negate);
1605 else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Cond))
1606 return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
1607 else if (const auto *FE = dyn_cast<FullExpr>(Cond))
1608 return getTrylockCallExpr(FE->getSubExpr(), C, Negate);
1609 else if (const auto *DRE = dyn_cast<DeclRefExpr>(Cond)) {
1610 const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
1611 return getTrylockCallExpr(E, C, Negate);
1612 }
1613 else if (const auto *UOP = dyn_cast<UnaryOperator>(Cond)) {
1614 if (UOP->getOpcode() == UO_LNot) {
1615 Negate = !Negate;
1616 return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
1617 }
1618 return nullptr;
1619 }
1620 else if (const auto *BOP = dyn_cast<BinaryOperator>(Cond)) {
1621 if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) {
1622 if (BOP->getOpcode() == BO_NE)
1623 Negate = !Negate;
1624
1625 bool TCond = false;
1626 if (getStaticBooleanValue(BOP->getRHS(), TCond)) {
1627 if (!TCond) Negate = !Negate;
1628 return getTrylockCallExpr(BOP->getLHS(), C, Negate);
1629 }
1630 TCond = false;
1631 if (getStaticBooleanValue(BOP->getLHS(), TCond)) {
1632 if (!TCond) Negate = !Negate;
1633 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1634 }
1635 return nullptr;
1636 }
1637 if (BOP->getOpcode() == BO_LAnd) {
1638 // LHS must have been evaluated in a different block.
1639 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1640 }
1641 if (BOP->getOpcode() == BO_LOr)
1642 return getTrylockCallExpr(BOP->getRHS(), C, Negate);
1643 return nullptr;
1644 } else if (const auto *COP = dyn_cast<ConditionalOperator>(Cond)) {
1645 bool TCond, FCond;
1646 if (getStaticBooleanValue(COP->getTrueExpr(), TCond) &&
1647 getStaticBooleanValue(COP->getFalseExpr(), FCond)) {
1648 if (TCond && !FCond)
1649 return getTrylockCallExpr(COP->getCond(), C, Negate);
1650 if (!TCond && FCond) {
1651 Negate = !Negate;
1652 return getTrylockCallExpr(COP->getCond(), C, Negate);
1653 }
1654 }
1655 }
1656 return nullptr;
1657}
1658
1659/// Find the lockset that holds on the edge between PredBlock
1660/// and CurrBlock. The edge set is the exit set of PredBlock (passed
1661/// as the ExitSet parameter) plus any trylocks, which are conditionally held.
1662void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
1663 const FactSet &ExitSet,
1664 const CFGBlock *PredBlock,
1665 const CFGBlock *CurrBlock) {
1666 Result = ExitSet;
1667
1668 const Stmt *Cond = PredBlock->getTerminatorCondition();
1669 // We don't acquire try-locks on ?: branches, only when its result is used.
1670 if (!Cond || isa<ConditionalOperator>(PredBlock->getTerminatorStmt()))
1671 return;
1672
1673 bool Negate = false;
1674 const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
1675 const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
1676
1677 if (Handler.issueBetaWarnings()) {
1678 // Temporarily set the lookup context for SExprBuilder.
1679 SxBuilder.setLookupLocalVarExpr(
1680 [this, Ctx = LVarCtx](const NamedDecl *D) mutable -> const Expr * {
1681 return LocalVarMap.lookupExpr(D, Ctx);
1682 });
1683 }
1684 auto Cleanup = llvm::make_scope_exit(
1685 [this] { SxBuilder.setLookupLocalVarExpr(nullptr); });
1686
1687 const auto *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate);
1688 if (!Exp)
1689 return;
1690
1691 auto *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
1692 if (!FunDecl || !FunDecl->hasAttr<TryAcquireCapabilityAttr>())
1693 return;
1694
1695 CapExprSet ExclusiveLocksToAdd;
1696 CapExprSet SharedLocksToAdd;
1697
1698 // If the condition is a call to a Trylock function, then grab the attributes
1699 for (const auto *Attr : FunDecl->specific_attrs<TryAcquireCapabilityAttr>())
1700 getMutexIDs(Attr->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, Attr,
1701 Exp, FunDecl, PredBlock, CurrBlock, Attr->getSuccessValue(),
1702 Negate);
1703
1704 // Add and remove locks.
1705 SourceLocation Loc = Exp->getExprLoc();
1706 for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
1707 addLock(Result, FactMan.createFact<LockableFactEntry>(ExclusiveLockToAdd,
1708 LK_Exclusive, Loc));
1709 for (const auto &SharedLockToAdd : SharedLocksToAdd)
1710 addLock(Result, FactMan.createFact<LockableFactEntry>(SharedLockToAdd,
1711 LK_Shared, Loc));
1712}
1713
1714namespace {
1715
1716/// We use this class to visit different types of expressions in
1717/// CFGBlocks, and build up the lockset.
1718/// An expression may cause us to add or remove locks from the lockset, or else
1719/// output error messages related to missing locks.
1720/// FIXME: In future, we may be able to not inherit from a visitor.
1721class BuildLockset : public ConstStmtVisitor<BuildLockset> {
1722 friend class ThreadSafetyAnalyzer;
1723
1724 ThreadSafetyAnalyzer *Analyzer;
1725 FactSet FSet;
1726 // The fact set for the function on exit.
1727 const FactSet &FunctionExitFSet;
1728 LocalVariableMap::Context LVarCtx;
1729 unsigned CtxIndex;
1730
1731 // To update and adjust the context.
1732 void updateLocalVarMapCtx(const Stmt *S) {
1733 if (S)
1734 LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
1735 if (!Analyzer->Handler.issueBetaWarnings())
1736 return;
1737 // The lookup closure needs to be reconstructed with the refreshed LVarCtx.
1738 Analyzer->SxBuilder.setLookupLocalVarExpr(
1739 [this, Ctx = LVarCtx](const NamedDecl *D) mutable -> const Expr * {
1740 return Analyzer->LocalVarMap.lookupExpr(D, Ctx);
1741 });
1742 }
1743
1744 // helper functions
1745
1746 void checkAccess(const Expr *Exp, AccessKind AK,
1748 Analyzer->checkAccess(FSet, Exp, AK, POK);
1749 }
1750 void checkPtAccess(const Expr *Exp, AccessKind AK,
1752 Analyzer->checkPtAccess(FSet, Exp, AK, POK);
1753 }
1754
1755 void handleCall(const Expr *Exp, const NamedDecl *D,
1756 til::SExpr *Self = nullptr,
1757 SourceLocation Loc = SourceLocation());
1758 void examineArguments(const FunctionDecl *FD,
1761 bool SkipFirstParam = false);
1762
1763public:
1764 BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info,
1765 const FactSet &FunctionExitFSet)
1766 : ConstStmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet),
1767 FunctionExitFSet(FunctionExitFSet), LVarCtx(Info.EntryContext),
1768 CtxIndex(Info.EntryIndex) {
1769 updateLocalVarMapCtx(nullptr);
1770 }
1771
1772 ~BuildLockset() { Analyzer->SxBuilder.setLookupLocalVarExpr(nullptr); }
1773
1774 void VisitUnaryOperator(const UnaryOperator *UO);
1775 void VisitBinaryOperator(const BinaryOperator *BO);
1776 void VisitCastExpr(const CastExpr *CE);
1777 void VisitCallExpr(const CallExpr *Exp);
1778 void VisitCXXConstructExpr(const CXXConstructExpr *Exp);
1779 void VisitDeclStmt(const DeclStmt *S);
1780 void VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *Exp);
1781 void VisitReturnStmt(const ReturnStmt *S);
1782};
1783
1784} // namespace
1785
1786/// Warn if the LSet does not contain a lock sufficient to protect access
1787/// of at least the passed in AccessKind.
1788void ThreadSafetyAnalyzer::warnIfMutexNotHeld(
1789 const FactSet &FSet, const NamedDecl *D, const Expr *Exp, AccessKind AK,
1790 Expr *MutexExp, ProtectedOperationKind POK, til::SExpr *Self,
1791 SourceLocation Loc) {
1793 CapabilityExpr Cp = SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self);
1794 if (Cp.isInvalid()) {
1795 warnInvalidLock(Handler, MutexExp, D, Exp, Cp.getKind());
1796 return;
1797 } else if (Cp.shouldIgnore()) {
1798 return;
1799 }
1800
1801 if (Cp.negative()) {
1802 // Negative capabilities act like locks excluded
1803 const FactEntry *LDat = FSet.findLock(FactMan, !Cp);
1804 if (LDat) {
1806 (!Cp).toString(), Loc);
1807 return;
1808 }
1809
1810 // If this does not refer to a negative capability in the same class,
1811 // then stop here.
1812 if (!inCurrentScope(Cp))
1813 return;
1814
1815 // Otherwise the negative requirement must be propagated to the caller.
1816 LDat = FSet.findLock(FactMan, Cp);
1817 if (!LDat) {
1818 Handler.handleNegativeNotHeld(D, Cp.toString(), Loc);
1819 }
1820 return;
1821 }
1822
1823 const FactEntry *LDat = FSet.findLockUniv(FactMan, Cp);
1824 bool NoError = true;
1825 if (!LDat) {
1826 // No exact match found. Look for a partial match.
1827 LDat = FSet.findPartialMatch(FactMan, Cp);
1828 if (LDat) {
1829 // Warn that there's no precise match.
1830 std::string PartMatchStr = LDat->toString();
1831 StringRef PartMatchName(PartMatchStr);
1832 Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc,
1833 &PartMatchName);
1834 } else {
1835 // Warn that there's no match at all.
1836 Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc);
1837 }
1838 NoError = false;
1839 }
1840 // Make sure the mutex we found is the right kind.
1841 if (NoError && LDat && !LDat->isAtLeast(LK)) {
1842 Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(), LK, Loc);
1843 }
1844}
1845
1846/// Warn if the LSet contains the given lock.
1847void ThreadSafetyAnalyzer::warnIfMutexHeld(const FactSet &FSet,
1848 const NamedDecl *D, const Expr *Exp,
1849 Expr *MutexExp, til::SExpr *Self,
1850 SourceLocation Loc) {
1851 CapabilityExpr Cp = SxBuilder.translateAttrExpr(MutexExp, D, Exp, Self);
1852 if (Cp.isInvalid()) {
1853 warnInvalidLock(Handler, MutexExp, D, Exp, Cp.getKind());
1854 return;
1855 } else if (Cp.shouldIgnore()) {
1856 return;
1857 }
1858
1859 const FactEntry *LDat = FSet.findLock(FactMan, Cp);
1860 if (LDat) {
1862 Cp.toString(), Loc);
1863 }
1864}
1865
1866/// Checks guarded_by and pt_guarded_by attributes.
1867/// Whenever we identify an access (read or write) to a DeclRefExpr that is
1868/// marked with guarded_by, we must ensure the appropriate mutexes are held.
1869/// Similarly, we check if the access is to an expression that dereferences
1870/// a pointer marked with pt_guarded_by.
1871void ThreadSafetyAnalyzer::checkAccess(const FactSet &FSet, const Expr *Exp,
1872 AccessKind AK,
1874 Exp = Exp->IgnoreImplicit()->IgnoreParenCasts();
1875
1876 SourceLocation Loc = Exp->getExprLoc();
1877
1878 // Local variables of reference type cannot be re-assigned;
1879 // map them to their initializer.
1880 while (const auto *DRE = dyn_cast<DeclRefExpr>(Exp)) {
1881 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()->getCanonicalDecl());
1882 if (VD && VD->isLocalVarDecl() && VD->getType()->isReferenceType()) {
1883 if (const auto *E = VD->getInit()) {
1884 // Guard against self-initialization. e.g., int &i = i;
1885 if (E == Exp)
1886 break;
1887 Exp = E->IgnoreImplicit()->IgnoreParenCasts();
1888 continue;
1889 }
1890 }
1891 break;
1892 }
1893
1894 if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) {
1895 // For dereferences
1896 if (UO->getOpcode() == UO_Deref)
1897 checkPtAccess(FSet, UO->getSubExpr(), AK, POK);
1898 return;
1899 }
1900
1901 if (const auto *BO = dyn_cast<BinaryOperator>(Exp)) {
1902 switch (BO->getOpcode()) {
1903 case BO_PtrMemD: // .*
1904 return checkAccess(FSet, BO->getLHS(), AK, POK);
1905 case BO_PtrMemI: // ->*
1906 return checkPtAccess(FSet, BO->getLHS(), AK, POK);
1907 default:
1908 return;
1909 }
1910 }
1911
1912 if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Exp)) {
1913 checkPtAccess(FSet, AE->getLHS(), AK, POK);
1914 return;
1915 }
1916
1917 if (const auto *ME = dyn_cast<MemberExpr>(Exp)) {
1918 if (ME->isArrow())
1919 checkPtAccess(FSet, ME->getBase(), AK, POK);
1920 else
1921 checkAccess(FSet, ME->getBase(), AK, POK);
1922 }
1923
1924 const ValueDecl *D = getValueDecl(Exp);
1925 if (!D || !D->hasAttrs())
1926 return;
1927
1928 if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(FactMan)) {
1929 Handler.handleNoMutexHeld(D, POK, AK, Loc);
1930 }
1931
1932 for (const auto *I : D->specific_attrs<GuardedByAttr>())
1933 warnIfMutexNotHeld(FSet, D, Exp, AK, I->getArg(), POK, nullptr, Loc);
1934}
1935
1936/// Checks pt_guarded_by and pt_guarded_var attributes.
1937/// POK is the same operationKind that was passed to checkAccess.
1938void ThreadSafetyAnalyzer::checkPtAccess(const FactSet &FSet, const Expr *Exp,
1939 AccessKind AK,
1941 // Strip off paren- and cast-expressions, checking if we encounter any other
1942 // operator that should be delegated to checkAccess() instead.
1943 while (true) {
1944 if (const auto *PE = dyn_cast<ParenExpr>(Exp)) {
1945 Exp = PE->getSubExpr();
1946 continue;
1947 }
1948 if (const auto *CE = dyn_cast<CastExpr>(Exp)) {
1949 if (CE->getCastKind() == CK_ArrayToPointerDecay) {
1950 // If it's an actual array, and not a pointer, then it's elements
1951 // are protected by GUARDED_BY, not PT_GUARDED_BY;
1952 checkAccess(FSet, CE->getSubExpr(), AK, POK);
1953 return;
1954 }
1955 Exp = CE->getSubExpr();
1956 continue;
1957 }
1958 break;
1959 }
1960
1961 if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) {
1962 if (UO->getOpcode() == UO_AddrOf) {
1963 // Pointer access via pointer taken of variable, so the dereferenced
1964 // variable is not actually a pointer.
1965 checkAccess(FSet, UO->getSubExpr(), AK, POK);
1966 return;
1967 }
1968 }
1969
1970 // Pass by reference/pointer warnings are under a different flag.
1972 switch (POK) {
1973 case POK_PassByRef:
1974 PtPOK = POK_PtPassByRef;
1975 break;
1976 case POK_ReturnByRef:
1977 PtPOK = POK_PtReturnByRef;
1978 break;
1979 case POK_PassPointer:
1980 PtPOK = POK_PtPassPointer;
1981 break;
1982 case POK_ReturnPointer:
1983 PtPOK = POK_PtReturnPointer;
1984 break;
1985 default:
1986 break;
1987 }
1988
1989 const ValueDecl *D = getValueDecl(Exp);
1990 if (!D || !D->hasAttrs())
1991 return;
1992
1993 if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(FactMan))
1994 Handler.handleNoMutexHeld(D, PtPOK, AK, Exp->getExprLoc());
1995
1996 for (auto const *I : D->specific_attrs<PtGuardedByAttr>())
1997 warnIfMutexNotHeld(FSet, D, Exp, AK, I->getArg(), PtPOK, nullptr,
1998 Exp->getExprLoc());
1999}
2000
2001/// Process a function call, method call, constructor call,
2002/// or destructor call. This involves looking at the attributes on the
2003/// corresponding function/method/constructor/destructor, issuing warnings,
2004/// and updating the locksets accordingly.
2005///
2006/// FIXME: For classes annotated with one of the guarded annotations, we need
2007/// to treat const method calls as reads and non-const method calls as writes,
2008/// and check that the appropriate locks are held. Non-const method calls with
2009/// the same signature as const method calls can be also treated as reads.
2010///
2011/// \param Exp The call expression.
2012/// \param D The callee declaration.
2013/// \param Self If \p Exp = nullptr, the implicit this argument or the argument
2014/// of an implicitly called cleanup function.
2015/// \param Loc If \p Exp = nullptr, the location.
2016void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
2017 til::SExpr *Self, SourceLocation Loc) {
2018 CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
2019 CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
2020 CapExprSet ScopedReqsAndExcludes;
2021
2022 // Figure out if we're constructing an object of scoped lockable class
2023 CapabilityExpr Scp;
2024 if (Exp) {
2025 assert(!Self);
2026 const auto *TagT = Exp->getType()->getAs<TagType>();
2027 if (D->hasAttrs() && TagT && Exp->isPRValue()) {
2028 til::LiteralPtr *Placeholder =
2029 Analyzer->SxBuilder.createThisPlaceholder();
2030 [[maybe_unused]] auto inserted =
2031 Analyzer->ConstructedObjects.insert({Exp, Placeholder});
2032 assert(inserted.second && "Are we visiting the same expression again?");
2033 if (isa<CXXConstructExpr>(Exp))
2034 Self = Placeholder;
2035 if (TagT->getOriginalDecl()
2036 ->getMostRecentDecl()
2037 ->hasAttr<ScopedLockableAttr>())
2038 Scp = CapabilityExpr(Placeholder, Exp->getType(), /*Neg=*/false);
2039 }
2040
2041 assert(Loc.isInvalid());
2042 Loc = Exp->getExprLoc();
2043 }
2044
2045 for(const Attr *At : D->attrs()) {
2046 switch (At->getKind()) {
2047 // When we encounter a lock function, we need to add the lock to our
2048 // lockset.
2049 case attr::AcquireCapability: {
2050 const auto *A = cast<AcquireCapabilityAttr>(At);
2051 Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
2052 : ExclusiveLocksToAdd,
2053 A, Exp, D, Self);
2054 break;
2055 }
2056
2057 // An assert will add a lock to the lockset, but will not generate
2058 // a warning if it is already there, and will not generate a warning
2059 // if it is not removed.
2060 case attr::AssertCapability: {
2061 const auto *A = cast<AssertCapabilityAttr>(At);
2062 CapExprSet AssertLocks;
2063 Analyzer->getMutexIDs(AssertLocks, A, Exp, D, Self);
2064 for (const auto &AssertLock : AssertLocks)
2065 Analyzer->addLock(
2066 FSet, Analyzer->FactMan.createFact<LockableFactEntry>(
2067 AssertLock, A->isShared() ? LK_Shared : LK_Exclusive,
2068 Loc, FactEntry::Asserted));
2069 break;
2070 }
2071
2072 // When we encounter an unlock function, we need to remove unlocked
2073 // mutexes from the lockset, and flag a warning if they are not there.
2074 case attr::ReleaseCapability: {
2075 const auto *A = cast<ReleaseCapabilityAttr>(At);
2076 if (A->isGeneric())
2077 Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, Self);
2078 else if (A->isShared())
2079 Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, Self);
2080 else
2081 Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, Self);
2082 break;
2083 }
2084
2085 case attr::RequiresCapability: {
2086 const auto *A = cast<RequiresCapabilityAttr>(At);
2087 for (auto *Arg : A->args()) {
2088 Analyzer->warnIfMutexNotHeld(FSet, D, Exp,
2089 A->isShared() ? AK_Read : AK_Written,
2090 Arg, POK_FunctionCall, Self, Loc);
2091 // use for adopting a lock
2092 if (!Scp.shouldIgnore())
2093 Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, Self);
2094 }
2095 break;
2096 }
2097
2098 case attr::LocksExcluded: {
2099 const auto *A = cast<LocksExcludedAttr>(At);
2100 for (auto *Arg : A->args()) {
2101 Analyzer->warnIfMutexHeld(FSet, D, Exp, Arg, Self, Loc);
2102 // use for deferring a lock
2103 if (!Scp.shouldIgnore())
2104 Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, Self);
2105 }
2106 break;
2107 }
2108
2109 // Ignore attributes unrelated to thread-safety
2110 default:
2111 break;
2112 }
2113 }
2114
2115 std::optional<CallExpr::const_arg_range> Args;
2116 if (Exp) {
2117 if (const auto *CE = dyn_cast<CallExpr>(Exp))
2118 Args = CE->arguments();
2119 else if (const auto *CE = dyn_cast<CXXConstructExpr>(Exp))
2120 Args = CE->arguments();
2121 else
2122 llvm_unreachable("Unknown call kind");
2123 }
2124 const auto *CalledFunction = dyn_cast<FunctionDecl>(D);
2125 if (CalledFunction && Args.has_value()) {
2126 for (auto [Param, Arg] : zip(CalledFunction->parameters(), *Args)) {
2127 CapExprSet DeclaredLocks;
2128 for (const Attr *At : Param->attrs()) {
2129 switch (At->getKind()) {
2130 case attr::AcquireCapability: {
2131 const auto *A = cast<AcquireCapabilityAttr>(At);
2132 Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
2133 : ExclusiveLocksToAdd,
2134 A, Exp, D, Self);
2135 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
2136 break;
2137 }
2138
2139 case attr::ReleaseCapability: {
2140 const auto *A = cast<ReleaseCapabilityAttr>(At);
2141 if (A->isGeneric())
2142 Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, Self);
2143 else if (A->isShared())
2144 Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, Self);
2145 else
2146 Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, Self);
2147 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
2148 break;
2149 }
2150
2151 case attr::RequiresCapability: {
2152 const auto *A = cast<RequiresCapabilityAttr>(At);
2153 for (auto *Arg : A->args())
2154 Analyzer->warnIfMutexNotHeld(FSet, D, Exp,
2155 A->isShared() ? AK_Read : AK_Written,
2156 Arg, POK_FunctionCall, Self, Loc);
2157 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
2158 break;
2159 }
2160
2161 case attr::LocksExcluded: {
2162 const auto *A = cast<LocksExcludedAttr>(At);
2163 for (auto *Arg : A->args())
2164 Analyzer->warnIfMutexHeld(FSet, D, Exp, Arg, Self, Loc);
2165 Analyzer->getMutexIDs(DeclaredLocks, A, Exp, D, Self);
2166 break;
2167 }
2168
2169 default:
2170 break;
2171 }
2172 }
2173 if (DeclaredLocks.empty())
2174 continue;
2175 CapabilityExpr Cp(Analyzer->SxBuilder.translate(Arg, nullptr),
2176 StringRef("mutex"), /*Neg=*/false, /*Reentrant=*/false);
2177 if (const auto *CBTE = dyn_cast<CXXBindTemporaryExpr>(Arg->IgnoreCasts());
2178 Cp.isInvalid() && CBTE) {
2179 if (auto Object = Analyzer->ConstructedObjects.find(CBTE->getSubExpr());
2180 Object != Analyzer->ConstructedObjects.end())
2181 Cp = CapabilityExpr(Object->second, StringRef("mutex"), /*Neg=*/false,
2182 /*Reentrant=*/false);
2183 }
2184 const FactEntry *Fact = FSet.findLock(Analyzer->FactMan, Cp);
2185 if (!Fact) {
2186 Analyzer->Handler.handleMutexNotHeld(Cp.getKind(), D, POK_FunctionCall,
2187 Cp.toString(), LK_Exclusive,
2188 Exp->getExprLoc());
2189 continue;
2190 }
2191 const auto *Scope = cast<ScopedLockableFactEntry>(Fact);
2192 for (const auto &[a, b] :
2193 zip_longest(DeclaredLocks, Scope->getUnderlyingMutexes())) {
2194 if (!a.has_value()) {
2195 Analyzer->Handler.handleExpectFewerUnderlyingMutexes(
2196 Exp->getExprLoc(), D->getLocation(), Scope->toString(),
2197 b.value().getKind(), b.value().toString());
2198 } else if (!b.has_value()) {
2199 Analyzer->Handler.handleExpectMoreUnderlyingMutexes(
2200 Exp->getExprLoc(), D->getLocation(), Scope->toString(),
2201 a.value().getKind(), a.value().toString());
2202 } else if (!a.value().equals(b.value())) {
2203 Analyzer->Handler.handleUnmatchedUnderlyingMutexes(
2204 Exp->getExprLoc(), D->getLocation(), Scope->toString(),
2205 a.value().getKind(), a.value().toString(), b.value().toString());
2206 break;
2207 }
2208 }
2209 }
2210 }
2211 // Remove locks first to allow lock upgrading/downgrading.
2212 // FIXME -- should only fully remove if the attribute refers to 'this'.
2213 bool Dtor = isa<CXXDestructorDecl>(D);
2214 for (const auto &M : ExclusiveLocksToRemove)
2215 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive);
2216 for (const auto &M : SharedLocksToRemove)
2217 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared);
2218 for (const auto &M : GenericLocksToRemove)
2219 Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic);
2220
2221 // Add locks.
2222 FactEntry::SourceKind Source =
2223 !Scp.shouldIgnore() ? FactEntry::Managed : FactEntry::Acquired;
2224 for (const auto &M : ExclusiveLocksToAdd)
2225 Analyzer->addLock(FSet, Analyzer->FactMan.createFact<LockableFactEntry>(
2226 M, LK_Exclusive, Loc, Source));
2227 for (const auto &M : SharedLocksToAdd)
2228 Analyzer->addLock(FSet, Analyzer->FactMan.createFact<LockableFactEntry>(
2229 M, LK_Shared, Loc, Source));
2230
2231 if (!Scp.shouldIgnore()) {
2232 // Add the managing object as a dummy mutex, mapped to the underlying mutex.
2233 auto *ScopedEntry = Analyzer->FactMan.createFact<ScopedLockableFactEntry>(
2234 Scp, Loc, FactEntry::Acquired,
2235 ExclusiveLocksToAdd.size() + SharedLocksToAdd.size() +
2236 ScopedReqsAndExcludes.size() + ExclusiveLocksToRemove.size() +
2237 SharedLocksToRemove.size());
2238 for (const auto &M : ExclusiveLocksToAdd)
2239 ScopedEntry->addLock(M);
2240 for (const auto &M : SharedLocksToAdd)
2241 ScopedEntry->addLock(M);
2242 for (const auto &M : ScopedReqsAndExcludes)
2243 ScopedEntry->addLock(M);
2244 for (const auto &M : ExclusiveLocksToRemove)
2245 ScopedEntry->addExclusiveUnlock(M);
2246 for (const auto &M : SharedLocksToRemove)
2247 ScopedEntry->addSharedUnlock(M);
2248 Analyzer->addLock(FSet, ScopedEntry);
2249 }
2250}
2251
2252/// For unary operations which read and write a variable, we need to
2253/// check whether we hold any required mutexes. Reads are checked in
2254/// VisitCastExpr.
2255void BuildLockset::VisitUnaryOperator(const UnaryOperator *UO) {
2256 switch (UO->getOpcode()) {
2257 case UO_PostDec:
2258 case UO_PostInc:
2259 case UO_PreDec:
2260 case UO_PreInc:
2261 checkAccess(UO->getSubExpr(), AK_Written);
2262 break;
2263 default:
2264 break;
2265 }
2266}
2267
2268/// For binary operations which assign to a variable (writes), we need to check
2269/// whether we hold any required mutexes.
2270/// FIXME: Deal with non-primitive types.
2271void BuildLockset::VisitBinaryOperator(const BinaryOperator *BO) {
2272 if (!BO->isAssignmentOp())
2273 return;
2274
2275 updateLocalVarMapCtx(BO);
2276 checkAccess(BO->getLHS(), AK_Written);
2277}
2278
2279/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
2280/// need to ensure we hold any required mutexes.
2281/// FIXME: Deal with non-primitive types.
2282void BuildLockset::VisitCastExpr(const CastExpr *CE) {
2283 if (CE->getCastKind() != CK_LValueToRValue)
2284 return;
2285 checkAccess(CE->getSubExpr(), AK_Read);
2286}
2287
2288void BuildLockset::examineArguments(const FunctionDecl *FD,
2291 bool SkipFirstParam) {
2292 // Currently we can't do anything if we don't know the function declaration.
2293 if (!FD)
2294 return;
2295
2296 // NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it
2297 // only turns off checking within the body of a function, but we also
2298 // use it to turn off checking in arguments to the function. This
2299 // could result in some false negatives, but the alternative is to
2300 // create yet another attribute.
2301 if (FD->hasAttr<NoThreadSafetyAnalysisAttr>())
2302 return;
2303
2304 const ArrayRef<ParmVarDecl *> Params = FD->parameters();
2305 auto Param = Params.begin();
2306 if (SkipFirstParam)
2307 ++Param;
2308
2309 // There can be default arguments, so we stop when one iterator is at end().
2310 for (auto Arg = ArgBegin; Param != Params.end() && Arg != ArgEnd;
2311 ++Param, ++Arg) {
2312 QualType Qt = (*Param)->getType();
2313 if (Qt->isReferenceType())
2314 checkAccess(*Arg, AK_Read, POK_PassByRef);
2315 else if (Qt->isPointerType())
2316 checkPtAccess(*Arg, AK_Read, POK_PassPointer);
2317 }
2318}
2319
2320void BuildLockset::VisitCallExpr(const CallExpr *Exp) {
2321 updateLocalVarMapCtx(Exp);
2322
2323 if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Exp)) {
2324 const auto *ME = dyn_cast<MemberExpr>(CE->getCallee());
2325 // ME can be null when calling a method pointer
2326 const CXXMethodDecl *MD = CE->getMethodDecl();
2327
2328 if (ME && MD) {
2329 if (ME->isArrow()) {
2330 // Should perhaps be AK_Written if !MD->isConst().
2331 checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
2332 } else {
2333 // Should perhaps be AK_Written if !MD->isConst().
2334 checkAccess(CE->getImplicitObjectArgument(), AK_Read);
2335 }
2336 }
2337
2338 examineArguments(CE->getDirectCallee(), CE->arg_begin(), CE->arg_end());
2339 } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
2340 OverloadedOperatorKind OEop = OE->getOperator();
2341 switch (OEop) {
2342 case OO_Equal:
2343 case OO_PlusEqual:
2344 case OO_MinusEqual:
2345 case OO_StarEqual:
2346 case OO_SlashEqual:
2347 case OO_PercentEqual:
2348 case OO_CaretEqual:
2349 case OO_AmpEqual:
2350 case OO_PipeEqual:
2351 case OO_LessLessEqual:
2352 case OO_GreaterGreaterEqual:
2353 checkAccess(OE->getArg(1), AK_Read);
2354 [[fallthrough]];
2355 case OO_PlusPlus:
2356 case OO_MinusMinus:
2357 checkAccess(OE->getArg(0), AK_Written);
2358 break;
2359 case OO_Star:
2360 case OO_ArrowStar:
2361 case OO_Arrow:
2362 case OO_Subscript:
2363 if (!(OEop == OO_Star && OE->getNumArgs() > 1)) {
2364 // Grrr. operator* can be multiplication...
2365 checkPtAccess(OE->getArg(0), AK_Read);
2366 }
2367 [[fallthrough]];
2368 default: {
2369 // TODO: get rid of this, and rely on pass-by-ref instead.
2370 const Expr *Obj = OE->getArg(0);
2371 checkAccess(Obj, AK_Read);
2372 // Check the remaining arguments. For method operators, the first
2373 // argument is the implicit self argument, and doesn't appear in the
2374 // FunctionDecl, but for non-methods it does.
2375 const FunctionDecl *FD = OE->getDirectCallee();
2376 examineArguments(FD, std::next(OE->arg_begin()), OE->arg_end(),
2377 /*SkipFirstParam*/ !isa<CXXMethodDecl>(FD));
2378 break;
2379 }
2380 }
2381 } else {
2382 examineArguments(Exp->getDirectCallee(), Exp->arg_begin(), Exp->arg_end());
2383 }
2384
2385 auto *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
2386 if (!D)
2387 return;
2388 handleCall(Exp, D);
2389}
2390
2391void BuildLockset::VisitCXXConstructExpr(const CXXConstructExpr *Exp) {
2392 const CXXConstructorDecl *D = Exp->getConstructor();
2393 if (D && D->isCopyConstructor()) {
2394 const Expr* Source = Exp->getArg(0);
2395 checkAccess(Source, AK_Read);
2396 } else {
2397 examineArguments(D, Exp->arg_begin(), Exp->arg_end());
2398 }
2399 if (D && D->hasAttrs())
2400 handleCall(Exp, D);
2401}
2402
2403static const Expr *UnpackConstruction(const Expr *E) {
2404 if (auto *CE = dyn_cast<CastExpr>(E))
2405 if (CE->getCastKind() == CK_NoOp)
2406 E = CE->getSubExpr()->IgnoreParens();
2407 if (auto *CE = dyn_cast<CastExpr>(E))
2408 if (CE->getCastKind() == CK_ConstructorConversion ||
2409 CE->getCastKind() == CK_UserDefinedConversion)
2410 E = CE->getSubExpr();
2411 if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E))
2412 E = BTE->getSubExpr();
2413 return E;
2414}
2415
2416void BuildLockset::VisitDeclStmt(const DeclStmt *S) {
2417 updateLocalVarMapCtx(S);
2418
2419 for (auto *D : S->getDeclGroup()) {
2420 if (auto *VD = dyn_cast_or_null<VarDecl>(D)) {
2421 const Expr *E = VD->getInit();
2422 if (!E)
2423 continue;
2424 E = E->IgnoreParens();
2425
2426 // handle constructors that involve temporaries
2427 if (auto *EWC = dyn_cast<ExprWithCleanups>(E))
2428 E = EWC->getSubExpr()->IgnoreParens();
2429 E = UnpackConstruction(E);
2430
2431 if (auto Object = Analyzer->ConstructedObjects.find(E);
2432 Object != Analyzer->ConstructedObjects.end()) {
2433 Object->second->setClangDecl(VD);
2434 Analyzer->ConstructedObjects.erase(Object);
2435 }
2436 }
2437 }
2438}
2439
2440void BuildLockset::VisitMaterializeTemporaryExpr(
2441 const MaterializeTemporaryExpr *Exp) {
2442 if (const ValueDecl *ExtD = Exp->getExtendingDecl()) {
2443 if (auto Object = Analyzer->ConstructedObjects.find(
2445 Object != Analyzer->ConstructedObjects.end()) {
2446 Object->second->setClangDecl(ExtD);
2447 Analyzer->ConstructedObjects.erase(Object);
2448 }
2449 }
2450}
2451
2452void BuildLockset::VisitReturnStmt(const ReturnStmt *S) {
2453 if (Analyzer->CurrentFunction == nullptr)
2454 return;
2455 const Expr *RetVal = S->getRetValue();
2456 if (!RetVal)
2457 return;
2458
2459 // If returning by reference or pointer, check that the function requires the
2460 // appropriate capabilities.
2461 const QualType ReturnType =
2462 Analyzer->CurrentFunction->getReturnType().getCanonicalType();
2463 if (ReturnType->isLValueReferenceType()) {
2464 Analyzer->checkAccess(
2465 FunctionExitFSet, RetVal,
2468 } else if (ReturnType->isPointerType()) {
2469 Analyzer->checkPtAccess(
2470 FunctionExitFSet, RetVal,
2473 }
2474}
2475
2476/// Given two facts merging on a join point, possibly warn and decide whether to
2477/// keep or replace.
2478///
2479/// \return false if we should keep \p A, true if we should take \p B.
2480bool ThreadSafetyAnalyzer::join(const FactEntry &A, const FactEntry &B,
2481 SourceLocation JoinLoc,
2482 LockErrorKind EntryLEK) {
2483 // Whether we can replace \p A by \p B.
2484 const bool CanModify = EntryLEK != LEK_LockedSomeLoopIterations;
2485 unsigned int ReentrancyDepthA = 0;
2486 unsigned int ReentrancyDepthB = 0;
2487
2488 if (const auto *LFE = dyn_cast<LockableFactEntry>(&A))
2489 ReentrancyDepthA = LFE->getReentrancyDepth();
2490 if (const auto *LFE = dyn_cast<LockableFactEntry>(&B))
2491 ReentrancyDepthB = LFE->getReentrancyDepth();
2492
2493 if (ReentrancyDepthA != ReentrancyDepthB) {
2494 Handler.handleMutexHeldEndOfScope(B.getKind(), B.toString(), B.loc(),
2495 JoinLoc, EntryLEK,
2496 /*ReentrancyMismatch=*/true);
2497 // Pick the FactEntry with the greater reentrancy depth as the "good"
2498 // fact to reduce potential later warnings.
2499 return CanModify && ReentrancyDepthA < ReentrancyDepthB;
2500 } else if (A.kind() != B.kind()) {
2501 // For managed capabilities, the destructor should unlock in the right mode
2502 // anyway. For asserted capabilities no unlocking is needed.
2503 if ((A.managed() || A.asserted()) && (B.managed() || B.asserted())) {
2504 // The shared capability subsumes the exclusive capability, if possible.
2505 bool ShouldTakeB = B.kind() == LK_Shared;
2506 if (CanModify || !ShouldTakeB)
2507 return ShouldTakeB;
2508 }
2509 Handler.handleExclusiveAndShared(B.getKind(), B.toString(), B.loc(),
2510 A.loc());
2511 // Take the exclusive capability to reduce further warnings.
2512 return CanModify && B.kind() == LK_Exclusive;
2513 } else {
2514 // The non-asserted capability is the one we want to track.
2515 return CanModify && A.asserted() && !B.asserted();
2516 }
2517}
2518
2519/// Compute the intersection of two locksets and issue warnings for any
2520/// locks in the symmetric difference.
2521///
2522/// This function is used at a merge point in the CFG when comparing the lockset
2523/// of each branch being merged. For example, given the following sequence:
2524/// A; if () then B; else C; D; we need to check that the lockset after B and C
2525/// are the same. In the event of a difference, we use the intersection of these
2526/// two locksets at the start of D.
2527///
2528/// \param EntrySet A lockset for entry into a (possibly new) block.
2529/// \param ExitSet The lockset on exiting a preceding block.
2530/// \param JoinLoc The location of the join point for error reporting
2531/// \param EntryLEK The warning if a mutex is missing from \p EntrySet.
2532/// \param ExitLEK The warning if a mutex is missing from \p ExitSet.
2533void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &EntrySet,
2534 const FactSet &ExitSet,
2535 SourceLocation JoinLoc,
2536 LockErrorKind EntryLEK,
2537 LockErrorKind ExitLEK) {
2538 FactSet EntrySetOrig = EntrySet;
2539
2540 // Find locks in ExitSet that conflict or are not in EntrySet, and warn.
2541 for (const auto &Fact : ExitSet) {
2542 const FactEntry &ExitFact = FactMan[Fact];
2543
2544 FactSet::iterator EntryIt = EntrySet.findLockIter(FactMan, ExitFact);
2545 if (EntryIt != EntrySet.end()) {
2546 if (join(FactMan[*EntryIt], ExitFact, JoinLoc, EntryLEK))
2547 *EntryIt = Fact;
2548 } else if (!ExitFact.managed() || EntryLEK == LEK_LockedAtEndOfFunction) {
2549 ExitFact.handleRemovalFromIntersection(ExitSet, FactMan, JoinLoc,
2550 EntryLEK, Handler);
2551 }
2552 }
2553
2554 // Find locks in EntrySet that are not in ExitSet, and remove them.
2555 for (const auto &Fact : EntrySetOrig) {
2556 const FactEntry *EntryFact = &FactMan[Fact];
2557 const FactEntry *ExitFact = ExitSet.findLock(FactMan, *EntryFact);
2558
2559 if (!ExitFact) {
2560 if (!EntryFact->managed() || ExitLEK == LEK_LockedSomeLoopIterations ||
2562 EntryFact->handleRemovalFromIntersection(EntrySetOrig, FactMan, JoinLoc,
2563 ExitLEK, Handler);
2564 if (ExitLEK == LEK_LockedSomePredecessors)
2565 EntrySet.removeLock(FactMan, *EntryFact);
2566 }
2567 }
2568}
2569
2570// Return true if block B never continues to its successors.
2571static bool neverReturns(const CFGBlock *B) {
2572 if (B->hasNoReturnElement())
2573 return true;
2574 if (B->empty())
2575 return false;
2576
2577 CFGElement Last = B->back();
2578 if (std::optional<CFGStmt> S = Last.getAs<CFGStmt>()) {
2579 if (isa<CXXThrowExpr>(S->getStmt()))
2580 return true;
2581 }
2582 return false;
2583}
2584
2585/// Check a function's CFG for thread-safety violations.
2586///
2587/// We traverse the blocks in the CFG, compute the set of mutexes that are held
2588/// at the end of each block, and issue warnings for thread safety violations.
2589/// Each block in the CFG is traversed exactly once.
2590void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
2591 // TODO: this whole function needs be rewritten as a visitor for CFGWalker.
2592 // For now, we just use the walker to set things up.
2593 threadSafety::CFGWalker walker;
2594 if (!walker.init(AC))
2595 return;
2596
2597 // AC.dumpCFG(true);
2598 // threadSafety::printSCFG(walker);
2599
2600 CFG *CFGraph = walker.getGraph();
2601 const NamedDecl *D = walker.getDecl();
2602 CurrentFunction = dyn_cast<FunctionDecl>(D);
2603
2604 if (D->hasAttr<NoThreadSafetyAnalysisAttr>())
2605 return;
2606
2607 // FIXME: Do something a bit more intelligent inside constructor and
2608 // destructor code. Constructors and destructors must assume unique access
2609 // to 'this', so checks on member variable access is disabled, but we should
2610 // still enable checks on other objects.
2612 return; // Don't check inside constructors.
2614 return; // Don't check inside destructors.
2615
2616 Handler.enterFunction(CurrentFunction);
2617
2618 BlockInfo.resize(CFGraph->getNumBlockIDs(),
2619 CFGBlockInfo::getEmptyBlockInfo(LocalVarMap));
2620
2621 // We need to explore the CFG via a "topological" ordering.
2622 // That way, we will be guaranteed to have information about required
2623 // predecessor locksets when exploring a new block.
2624 const PostOrderCFGView *SortedGraph = walker.getSortedGraph();
2625 PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
2626
2627 CFGBlockInfo &Initial = BlockInfo[CFGraph->getEntry().getBlockID()];
2628 CFGBlockInfo &Final = BlockInfo[CFGraph->getExit().getBlockID()];
2629
2630 // Mark entry block as reachable
2631 Initial.Reachable = true;
2632
2633 // Compute SSA names for local variables
2634 LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo);
2635
2636 // Fill in source locations for all CFGBlocks.
2637 findBlockLocations(CFGraph, SortedGraph, BlockInfo);
2638
2639 CapExprSet ExclusiveLocksAcquired;
2640 CapExprSet SharedLocksAcquired;
2641 CapExprSet LocksReleased;
2642
2643 // Add locks from exclusive_locks_required and shared_locks_required
2644 // to initial lockset. Also turn off checking for lock and unlock functions.
2645 // FIXME: is there a more intelligent way to check lock/unlock functions?
2646 if (!SortedGraph->empty()) {
2647 assert(*SortedGraph->begin() == &CFGraph->getEntry());
2648 FactSet &InitialLockset = Initial.EntrySet;
2649
2650 CapExprSet ExclusiveLocksToAdd;
2651 CapExprSet SharedLocksToAdd;
2652
2653 SourceLocation Loc = D->getLocation();
2654 for (const auto *Attr : D->attrs()) {
2655 Loc = Attr->getLocation();
2656 if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) {
2657 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2658 nullptr, D);
2659 } else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) {
2660 // UNLOCK_FUNCTION() is used to hide the underlying lock implementation.
2661 // We must ignore such methods.
2662 if (A->args_size() == 0)
2663 return;
2664 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2665 nullptr, D);
2666 getMutexIDs(LocksReleased, A, nullptr, D);
2667 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
2668 if (A->args_size() == 0)
2669 return;
2670 getMutexIDs(A->isShared() ? SharedLocksAcquired
2671 : ExclusiveLocksAcquired,
2672 A, nullptr, D);
2673 } else if (isa<TryAcquireCapabilityAttr>(Attr)) {
2674 // Don't try to check trylock functions for now.
2675 return;
2676 }
2677 }
2678 ArrayRef<ParmVarDecl *> Params;
2679 if (CurrentFunction)
2680 Params = CurrentFunction->getCanonicalDecl()->parameters();
2681 else if (auto CurrentMethod = dyn_cast<ObjCMethodDecl>(D))
2682 Params = CurrentMethod->getCanonicalDecl()->parameters();
2683 else
2684 llvm_unreachable("Unknown function kind");
2685 for (const ParmVarDecl *Param : Params) {
2686 CapExprSet UnderlyingLocks;
2687 for (const auto *Attr : Param->attrs()) {
2688 Loc = Attr->getLocation();
2689 if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) {
2690 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2691 nullptr, Param);
2692 getMutexIDs(LocksReleased, A, nullptr, Param);
2693 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2694 } else if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) {
2695 getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
2696 nullptr, Param);
2697 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2698 } else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
2699 getMutexIDs(A->isShared() ? SharedLocksAcquired
2700 : ExclusiveLocksAcquired,
2701 A, nullptr, Param);
2702 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2703 } else if (const auto *A = dyn_cast<LocksExcludedAttr>(Attr)) {
2704 getMutexIDs(UnderlyingLocks, A, nullptr, Param);
2705 }
2706 }
2707 if (UnderlyingLocks.empty())
2708 continue;
2709 CapabilityExpr Cp(SxBuilder.translateVariable(Param, nullptr),
2710 StringRef(),
2711 /*Neg=*/false, /*Reentrant=*/false);
2712 auto *ScopedEntry = FactMan.createFact<ScopedLockableFactEntry>(
2713 Cp, Param->getLocation(), FactEntry::Declared,
2714 UnderlyingLocks.size());
2715 for (const CapabilityExpr &M : UnderlyingLocks)
2716 ScopedEntry->addLock(M);
2717 addLock(InitialLockset, ScopedEntry, true);
2718 }
2719
2720 // FIXME -- Loc can be wrong here.
2721 for (const auto &Mu : ExclusiveLocksToAdd) {
2722 const auto *Entry = FactMan.createFact<LockableFactEntry>(
2723 Mu, LK_Exclusive, Loc, FactEntry::Declared);
2724 addLock(InitialLockset, Entry, true);
2725 }
2726 for (const auto &Mu : SharedLocksToAdd) {
2727 const auto *Entry = FactMan.createFact<LockableFactEntry>(
2728 Mu, LK_Shared, Loc, FactEntry::Declared);
2729 addLock(InitialLockset, Entry, true);
2730 }
2731 }
2732
2733 // Compute the expected exit set.
2734 // By default, we expect all locks held on entry to be held on exit.
2735 FactSet ExpectedFunctionExitSet = Initial.EntrySet;
2736
2737 // Adjust the expected exit set by adding or removing locks, as declared
2738 // by *-LOCK_FUNCTION and UNLOCK_FUNCTION. The intersect below will then
2739 // issue the appropriate warning.
2740 // FIXME: the location here is not quite right.
2741 for (const auto &Lock : ExclusiveLocksAcquired)
2742 ExpectedFunctionExitSet.addLock(
2743 FactMan, FactMan.createFact<LockableFactEntry>(Lock, LK_Exclusive,
2744 D->getLocation()));
2745 for (const auto &Lock : SharedLocksAcquired)
2746 ExpectedFunctionExitSet.addLock(
2747 FactMan, FactMan.createFact<LockableFactEntry>(Lock, LK_Shared,
2748 D->getLocation()));
2749 for (const auto &Lock : LocksReleased)
2750 ExpectedFunctionExitSet.removeLock(FactMan, Lock);
2751
2752 for (const auto *CurrBlock : *SortedGraph) {
2753 unsigned CurrBlockID = CurrBlock->getBlockID();
2754 CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
2755
2756 // Use the default initial lockset in case there are no predecessors.
2757 VisitedBlocks.insert(CurrBlock);
2758
2759 // Iterate through the predecessor blocks and warn if the lockset for all
2760 // predecessors is not the same. We take the entry lockset of the current
2761 // block to be the intersection of all previous locksets.
2762 // FIXME: By keeping the intersection, we may output more errors in future
2763 // for a lock which is not in the intersection, but was in the union. We
2764 // may want to also keep the union in future. As an example, let's say
2765 // the intersection contains Mutex L, and the union contains L and M.
2766 // Later we unlock M. At this point, we would output an error because we
2767 // never locked M; although the real error is probably that we forgot to
2768 // lock M on all code paths. Conversely, let's say that later we lock M.
2769 // In this case, we should compare against the intersection instead of the
2770 // union because the real error is probably that we forgot to unlock M on
2771 // all code paths.
2772 bool LocksetInitialized = false;
2773 for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
2774 PE = CurrBlock->pred_end(); PI != PE; ++PI) {
2775 // if *PI -> CurrBlock is a back edge
2776 if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI))
2777 continue;
2778
2779 unsigned PrevBlockID = (*PI)->getBlockID();
2780 CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
2781
2782 // Ignore edges from blocks that can't return.
2783 if (neverReturns(*PI) || !PrevBlockInfo->Reachable)
2784 continue;
2785
2786 // Okay, we can reach this block from the entry.
2787 CurrBlockInfo->Reachable = true;
2788
2789 FactSet PrevLockset;
2790 getEdgeLockset(PrevLockset, PrevBlockInfo->ExitSet, *PI, CurrBlock);
2791
2792 if (!LocksetInitialized) {
2793 CurrBlockInfo->EntrySet = PrevLockset;
2794 LocksetInitialized = true;
2795 } else {
2796 // Surprisingly 'continue' doesn't always produce back edges, because
2797 // the CFG has empty "transition" blocks where they meet with the end
2798 // of the regular loop body. We still want to diagnose them as loop.
2799 intersectAndWarn(
2800 CurrBlockInfo->EntrySet, PrevLockset, CurrBlockInfo->EntryLoc,
2801 isa_and_nonnull<ContinueStmt>((*PI)->getTerminatorStmt())
2804 }
2805 }
2806
2807 // Skip rest of block if it's not reachable.
2808 if (!CurrBlockInfo->Reachable)
2809 continue;
2810
2811 BuildLockset LocksetBuilder(this, *CurrBlockInfo, ExpectedFunctionExitSet);
2812
2813 // Visit all the statements in the basic block.
2814 for (const auto &BI : *CurrBlock) {
2815 switch (BI.getKind()) {
2816 case CFGElement::Statement: {
2817 CFGStmt CS = BI.castAs<CFGStmt>();
2818 LocksetBuilder.Visit(CS.getStmt());
2819 break;
2820 }
2821 // Ignore BaseDtor and MemberDtor for now.
2823 CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>();
2824 const auto *DD = AD.getDestructorDecl(AC.getASTContext());
2825 if (!DD->hasAttrs())
2826 break;
2827
2828 LocksetBuilder.handleCall(
2829 nullptr, DD,
2830 SxBuilder.translateVariable(AD.getVarDecl(), nullptr),
2831 AD.getTriggerStmt()->getEndLoc());
2832 break;
2833 }
2834
2836 const CFGCleanupFunction &CF = BI.castAs<CFGCleanupFunction>();
2837 LocksetBuilder.handleCall(
2838 /*Exp=*/nullptr, CF.getFunctionDecl(),
2839 SxBuilder.translateVariable(CF.getVarDecl(), nullptr),
2840 CF.getVarDecl()->getLocation());
2841 break;
2842 }
2843
2845 auto TD = BI.castAs<CFGTemporaryDtor>();
2846
2847 // Clean up constructed object even if there are no attributes to
2848 // keep the number of objects in limbo as small as possible.
2849 if (auto Object = ConstructedObjects.find(
2850 TD.getBindTemporaryExpr()->getSubExpr());
2851 Object != ConstructedObjects.end()) {
2852 const auto *DD = TD.getDestructorDecl(AC.getASTContext());
2853 if (DD->hasAttrs())
2854 // TODO: the location here isn't quite correct.
2855 LocksetBuilder.handleCall(nullptr, DD, Object->second,
2856 TD.getBindTemporaryExpr()->getEndLoc());
2857 ConstructedObjects.erase(Object);
2858 }
2859 break;
2860 }
2861 default:
2862 break;
2863 }
2864 }
2865 CurrBlockInfo->ExitSet = LocksetBuilder.FSet;
2866
2867 // For every back edge from CurrBlock (the end of the loop) to another block
2868 // (FirstLoopBlock) we need to check that the Lockset of Block is equal to
2869 // the one held at the beginning of FirstLoopBlock. We can look up the
2870 // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
2871 for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
2872 SE = CurrBlock->succ_end(); SI != SE; ++SI) {
2873 // if CurrBlock -> *SI is *not* a back edge
2874 if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
2875 continue;
2876
2877 CFGBlock *FirstLoopBlock = *SI;
2878 CFGBlockInfo *PreLoop = &BlockInfo[FirstLoopBlock->getBlockID()];
2879 CFGBlockInfo *LoopEnd = &BlockInfo[CurrBlockID];
2880 intersectAndWarn(PreLoop->EntrySet, LoopEnd->ExitSet, PreLoop->EntryLoc,
2882 }
2883 }
2884
2885 // Skip the final check if the exit block is unreachable.
2886 if (!Final.Reachable)
2887 return;
2888
2889 // FIXME: Should we call this function for all blocks which exit the function?
2890 intersectAndWarn(ExpectedFunctionExitSet, Final.ExitSet, Final.ExitLoc,
2892
2893 Handler.leaveFunction(CurrentFunction);
2894}
2895
2896/// Check a function's CFG for thread-safety violations.
2897///
2898/// We traverse the blocks in the CFG, compute the set of mutexes that are held
2899/// at the end of each block, and issue warnings for thread safety violations.
2900/// Each block in the CFG is traversed exactly once.
2902 ThreadSafetyHandler &Handler,
2903 BeforeSet **BSet) {
2904 if (!*BSet)
2905 *BSet = new BeforeSet;
2906 ThreadSafetyAnalyzer Analyzer(Handler, *BSet);
2907 Analyzer.runAnalysis(AC);
2908}
2909
2911
2912/// Helper function that returns a LockKind required for the given level
2913/// of access.
2915 switch (AK) {
2916 case AK_Read :
2917 return LK_Shared;
2918 case AK_Written :
2919 return LK_Exclusive;
2920 }
2921 llvm_unreachable("Unknown AccessKind");
2922}
This file defines AnalysisDeclContext, a class that manages the analysis context data for context sen...
Defines enum values for all the target-independent builtin functions.
static void dump(llvm::raw_ostream &OS, StringRef FunctionName, ArrayRef< CounterExpression > Expressions, ArrayRef< CounterMappingRegion > Regions)
static Decl::Kind getKind(const Decl *D)
Defines the C++ Decl subclasses, other than those for templates (found in DeclTemplate....
Defines the clang::Expr interface and subclasses for C++ expressions.
Forward-declares and imports various common LLVM datatypes that clang wants to use unqualified.
Defines an enumeration for C++ overloaded operators.
static std::string toString(const clang::SanitizerSet &Sanitizers)
Produce a string containing comma-separated names of sanitizers in Sanitizers set.
Defines the clang::SourceLocation class and associated facilities.
Defines various enumerations that describe declaration and type specifiers.
static void warnInvalidLock(ThreadSafetyHandler &Handler, const Expr *MutexExp, const NamedDecl *D, const Expr *DeclExp, StringRef Kind)
Issue a warning about an invalid lock expression.
static bool getStaticBooleanValue(Expr *E, bool &TCond)
static bool neverReturns(const CFGBlock *B)
static void findBlockLocations(CFG *CFGraph, const PostOrderCFGView *SortedGraph, std::vector< CFGBlockInfo > &BlockInfo)
Find the appropriate source locations to use when producing diagnostics for each block in the CFG.
static const ValueDecl * getValueDecl(const Expr *Exp)
Gets the value decl pointer from DeclRefExprs or MemberExprs.
static const Expr * UnpackConstruction(const Expr *E)
C Language Family Type Representation.
__device__ __2f16 b
AnalysisDeclContext contains the context data for the function, method or block under analysis.
ASTContext & getASTContext() const
Expr * getLHS() const
Definition Expr.h:4022
Expr * getRHS() const
Definition Expr.h:4024
static bool isAssignmentOp(Opcode Opc)
Definition Expr.h:4108
Opcode getOpcode() const
Definition Expr.h:4017
const VarDecl * getVarDecl() const
Definition CFG.h:423
const Stmt * getTriggerStmt() const
Definition CFG.h:428
Represents a single basic block in a source-level CFG.
Definition CFG.h:605
pred_iterator pred_end()
Definition CFG.h:973
succ_iterator succ_end()
Definition CFG.h:991
bool hasNoReturnElement() const
Definition CFG.h:1109
CFGElement back() const
Definition CFG.h:908
ElementList::const_reverse_iterator const_reverse_iterator
Definition CFG.h:903
bool empty() const
Definition CFG.h:953
succ_iterator succ_begin()
Definition CFG.h:990
Stmt * getTerminatorStmt()
Definition CFG.h:1087
AdjacentBlocks::const_iterator const_pred_iterator
Definition CFG.h:959
pred_iterator pred_begin()
Definition CFG.h:972
unsigned getBlockID() const
Definition CFG.h:1111
Stmt * getTerminatorCondition(bool StripParens=true)
Definition CFG.cpp:6381
AdjacentBlocks::const_iterator const_succ_iterator
Definition CFG.h:966
Represents a top-level expression in a basic block.
Definition CFG.h:55
@ CleanupFunction
Definition CFG.h:79
@ AutomaticObjectDtor
Definition CFG.h:72
const CXXDestructorDecl * getDestructorDecl(ASTContext &astContext) const
Definition CFG.cpp:5401
const Stmt * getStmt() const
Definition CFG.h:139
Represents a source-level, intra-procedural CFG that represents the control-flow of a Stmt.
Definition CFG.h:1222
CFGBlock & getExit()
Definition CFG.h:1333
CFGBlock & getEntry()
Definition CFG.h:1331
unsigned getNumBlockIDs() const
Returns the total number of BlockIDs allocated (which start at 0).
Definition CFG.h:1410
arg_iterator arg_begin()
Definition ExprCXX.h:1678
Expr * getArg(unsigned Arg)
Return the specified argument.
Definition ExprCXX.h:1692
arg_iterator arg_end()
Definition ExprCXX.h:1679
CXXConstructorDecl * getConstructor() const
Get the constructor that this expression will (ultimately) call.
Definition ExprCXX.h:1612
bool isCopyConstructor(unsigned &TypeQuals) const
Whether this constructor is a copy constructor (C++ [class.copy]p2, which can be used to copy the cla...
Definition DeclCXX.cpp:3008
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3081
ConstExprIterator const_arg_iterator
Definition Expr.h:3125
arg_iterator arg_begin()
Definition Expr.h:3134
arg_iterator arg_end()
Definition Expr.h:3137
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3060
unsigned getNumArgs() const
getNumArgs - Return the number of actual arguments to this call.
Definition Expr.h:3068
Decl * getCalleeDecl()
Definition Expr.h:3054
CastKind getCastKind() const
Definition Expr.h:3654
Expr * getSubExpr()
Definition Expr.h:3660
const DeclGroupRef getDeclGroup() const
Definition Stmt.h:1629
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Stmt.h:1637
bool hasAttrs() const
Definition DeclBase.h:518
llvm::iterator_range< specific_attr_iterator< T > > specific_attrs() const
Definition DeclBase.h:559
SourceLocation getLocation() const
Definition DeclBase.h:439
bool isDefinedOutsideFunctionOrMethod() const
isDefinedOutsideFunctionOrMethod - This predicate returns true if this scoped decl is defined outside...
Definition DeclBase.h:949
DeclContext * getDeclContext()
Definition DeclBase.h:448
attr_range attrs() const
Definition DeclBase.h:535
bool hasAttr() const
Definition DeclBase.h:577
This represents one expression.
Definition Expr.h:112
Expr * IgnoreParenCasts() LLVM_READONLY
Skip past any parentheses and casts which might surround this expression until reaching a fixed point...
Definition Expr.cpp:3090
Expr * IgnoreParenImpCasts() LLVM_READONLY
Skip past any parentheses and implicit casts which might surround this expression until reaching a fi...
Definition Expr.cpp:3085
Expr * IgnoreImplicit() LLVM_READONLY
Skip past any implicit AST nodes which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3073
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3081
bool isPRValue() const
Definition Expr.h:285
Expr * IgnoreCasts() LLVM_READONLY
Skip past any casts which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3069
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
const ParmVarDecl * getParamDecl(unsigned i) const
Definition Decl.h:2797
QualType getReturnType() const
Definition Decl.h:2845
ArrayRef< ParmVarDecl * > parameters() const
Definition Decl.h:2774
FunctionDecl * getCanonicalDecl() override
Retrieves the "canonical" declaration of the given declaration.
Definition Decl.cpp:3735
unsigned getNumParams() const
Return the number of parameters this function must have based on its FunctionType.
Definition Decl.cpp:3814
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4939
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition ExprCXX.h:4972
This represents a decl that may have a name.
Definition Decl.h:274
IdentifierInfo * getIdentifier() const
Get the identifier that names this declaration, if there is one.
Definition Decl.h:295
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
std::string getNameAsString() const
Get a human-readable name for the declaration, even if it is one of the special kinds of names (C++ c...
Definition Decl.h:317
virtual void printName(raw_ostream &OS, const PrintingPolicy &Policy) const
Pretty-print the unqualified name of this declaration.
Definition Decl.cpp:1672
QualType getCanonicalType() const
Definition TypeBase.h:8346
bool isConstQualified() const
Determine whether this type is const-qualified.
Definition TypeBase.h:8367
Expr * getRetValue()
Definition Stmt.h:3187
Encodes a location in the source.
bool isValid() const
Return true if this is a valid SourceLocation object.
Stmt - This represents one statement.
Definition Stmt.h:85
SourceLocation getEndLoc() const LLVM_READONLY
Definition Stmt.cpp:358
void dump() const
Dumps the specified AST fragment and all subtrees to llvm::errs().
bool isPointerType() const
Definition TypeBase.h:8531
bool isReferenceType() const
Definition TypeBase.h:8555
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isLValueReferenceType() const
Definition TypeBase.h:8559
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9107
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
void checkBeforeAfter(const ValueDecl *Vd, const FactSet &FSet, ThreadSafetyAnalyzer &Analyzer, SourceLocation Loc, StringRef CapKind)
Return true if any mutexes in FSet are in the acquired_before set of Vd.
BeforeInfo * insertAttrExprs(const ValueDecl *Vd, ThreadSafetyAnalyzer &Analyzer)
Process acquired_before and acquired_after attributes on Vd.
BeforeInfo * getBeforeInfoForDecl(const ValueDecl *Vd, ThreadSafetyAnalyzer &Analyzer)
const PostOrderCFGView * getSortedGraph() const
const NamedDecl * getDecl() const
bool init(AnalysisDeclContext &AC)
bool equals(const CapabilityExpr &other) const
CapabilityExpr translateAttrExpr(const Expr *AttrExp, const NamedDecl *D, const Expr *DeclExp, til::SExpr *Self=nullptr)
Translate a clang expression in an attribute to a til::SExpr.
void setLookupLocalVarExpr(std::function< const Expr *(const NamedDecl *)> F)
til::SExpr * translate(const Stmt *S, CallingContext *Ctx)
til::SExpr * translateVariable(const VarDecl *VD, CallingContext *Ctx)
Handler class for thread safety warnings.
virtual void handleExpectMoreUnderlyingMutexes(SourceLocation Loc, SourceLocation DLoc, Name ScopeName, StringRef Kind, Name Expected)
Warn when we get fewer underlying mutexes than expected.
virtual void handleInvalidLockExp(SourceLocation Loc)
Warn about lock expressions which fail to resolve to lockable objects.
virtual void handleUnmatchedUnderlyingMutexes(SourceLocation Loc, SourceLocation DLoc, Name ScopeName, StringRef Kind, Name Expected, Name Actual)
Warn when an actual underlying mutex of a scoped lockable does not match the expected.
virtual void handleExpectFewerUnderlyingMutexes(SourceLocation Loc, SourceLocation DLoc, Name ScopeName, StringRef Kind, Name Actual)
Warn when we get more underlying mutexes than expected.
virtual void enterFunction(const FunctionDecl *FD)
Called by the analysis when starting analysis of a function.
virtual void handleIncorrectUnlockKind(StringRef Kind, Name LockName, LockKind Expected, LockKind Received, SourceLocation LocLocked, SourceLocation LocUnlock)
Warn about an unlock function call that attempts to unlock a lock with the incorrect lock kind.
virtual void handleMutexHeldEndOfScope(StringRef Kind, Name LockName, SourceLocation LocLocked, SourceLocation LocEndOfScope, LockErrorKind LEK, bool ReentrancyMismatch=false)
Warn about situations where a mutex is sometimes held and sometimes not.
virtual void leaveFunction(const FunctionDecl *FD)
Called by the analysis when finishing analysis of a function.
virtual void handleExclusiveAndShared(StringRef Kind, Name LockName, SourceLocation Loc1, SourceLocation Loc2)
Warn when a mutex is held exclusively and shared at the same point.
virtual void handleMutexNotHeld(StringRef Kind, const NamedDecl *D, ProtectedOperationKind POK, Name LockName, LockKind LK, SourceLocation Loc, Name *PossibleMatch=nullptr)
Warn when a protected operation occurs while the specific mutex protecting the operation is not locke...
virtual void handleFunExcludesLock(StringRef Kind, Name FunName, Name LockName, SourceLocation Loc)
Warn when a function is called while an excluded mutex is locked.
virtual void handleNoMutexHeld(const NamedDecl *D, ProtectedOperationKind POK, AccessKind AK, SourceLocation Loc)
Warn when a protected operation occurs while no locks are held.
virtual void handleUnmatchedUnlock(StringRef Kind, Name LockName, SourceLocation Loc, SourceLocation LocPreviousUnlock)
Warn about unlock function calls that do not have a prior matching lock expression.
virtual void handleNegativeNotHeld(StringRef Kind, Name LockName, Name Neg, SourceLocation Loc)
Warn when acquiring a lock that the negative capability is not held.
virtual void handleDoubleLock(StringRef Kind, Name LockName, SourceLocation LocLocked, SourceLocation LocDoubleLock)
Warn about lock function calls for locks which are already held.
#define bool
Definition gpuintrin.h:32
internal::Matcher< T > traverse(TraversalKind TK, const internal::Matcher< T > &InnerMatcher)
Causes all nested matchers to be matched with the specified traversal kind.
unsigned kind
All of the diagnostics that can be emitted by the frontend.
@ CF
Indicates that the tracked object is a CF object.
bool Alloc(InterpState &S, CodePtr OpPC, const Descriptor *Desc)
Definition Interp.h:3488
bool Dec(InterpState &S, CodePtr OpPC, bool CanOverflow)
1) Pops a pointer from the stack 2) Load the value from the pointer 3) Writes the value decreased by ...
Definition Interp.h:900
bool Neg(InterpState &S, CodePtr OpPC)
Definition Interp.h:749
std::unique_ptr< DiagnosticConsumer > create(StringRef OutputFile, DiagnosticOptions &DiagOpts, bool MergeChildRecords=false)
Returns a DiagnosticConsumer that serializes diagnostics to a bitcode file.
bool matches(const til::SExpr *E1, const til::SExpr *E2)
LockKind getLockKindFromAccessKind(AccessKind AK)
Helper function that returns a LockKind required for the given level of access.
LockErrorKind
This enum distinguishes between different situations where we warn due to inconsistent locking.
@ LEK_NotLockedAtEndOfFunction
Expecting a capability to be held at the end of function.
@ LEK_LockedSomePredecessors
A capability is locked in some but not all predecessors of a CFGBlock.
@ LEK_LockedAtEndOfFunction
A capability is still locked at the end of a function.
@ LEK_LockedSomeLoopIterations
A capability is locked for some but not all loop iterations.
void threadSafetyCleanup(BeforeSet *Cache)
AccessKind
This enum distinguishes between different ways to access (read or write) a variable.
@ AK_Written
Writing a variable.
@ AK_Read
Reading a variable.
LockKind
This enum distinguishes between different kinds of lock actions.
@ LK_Shared
Shared/reader lock of a mutex.
@ LK_Exclusive
Exclusive/writer lock of a mutex.
@ LK_Generic
Can be either Shared or Exclusive.
void runThreadSafetyAnalysis(AnalysisDeclContext &AC, ThreadSafetyHandler &Handler, BeforeSet **Bset)
Check a function's CFG for thread-safety violations.
ProtectedOperationKind
This enum distinguishes between different kinds of operations that may need to be protected by locks.
@ POK_PtPassByRef
Passing a pt-guarded variable by reference.
@ POK_PassPointer
Passing pointer to a guarded variable.
@ POK_VarDereference
Dereferencing a variable (e.g. p in *p = 5;)
@ POK_PassByRef
Passing a guarded variable by reference.
@ POK_ReturnByRef
Returning a guarded variable by reference.
@ POK_PtPassPointer
Passing a pt-guarded pointer.
@ POK_PtReturnPointer
Returning a pt-guarded pointer.
@ POK_VarAccess
Reading or writing a variable (e.g. x in x = 5;)
@ POK_FunctionCall
Making a function call (e.g. fool())
@ POK_ReturnPointer
Returning pointer to a guarded variable.
@ POK_PtReturnByRef
Returning a pt-guarded variable by reference.
The JSON file list parser is used to communicate input to InstallAPI.
OverloadedOperatorKind
Enumeration specifying the different kinds of C++ overloaded operators.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ Self
'self' clause, allowed on Compute and Combined Constructs, plus 'update'.
nullptr
This class represents a compute construct, representing a 'Kind' of ‘parallel’, 'serial',...
Expr * Cond
};
static bool classof(const Stmt *T)
@ Result
The result type of a method or function.
Definition TypeBase.h:905
const FunctionProtoType * T
U cast(CodeGen::Address addr)
Definition Address.h:327
@ Other
Other implicit parameter.
Definition Decl.h:1746
int const char * function
Definition c++config.h:31