Thread Safety Analysis: Support reentrant capabilities (#137133)

Introduce the `reentrant_capability` attribute, which may be specified
alongside the `capability(..)` attribute to denote that the defined
capability type is reentrant. Marking a capability as reentrant means
that acquiring the same capability multiple times is safe, and does not
produce warnings on attempted re-acquisition.

The most significant changes required are plumbing to propagate if the
attribute is present to a CapabilityExpr, and introducing
ReentrancyDepth to the LockableFactEntry class.
This commit is contained in:
Marco Elver
2025-05-26 17:03:55 +02:00
committed by GitHub
parent 365dcf48b8
commit c7ccfc6dfc
16 changed files with 624 additions and 77 deletions

View File

@@ -447,6 +447,7 @@ Improvements to Clang's diagnostics
as function arguments or return value respectively. Note that
:doc:`ThreadSafetyAnalysis` still does not perform alias analysis. The
feature will be default-enabled with ``-Wthread-safety`` in a future release.
- The :doc:`ThreadSafetyAnalysis` now supports reentrant capabilities.
- Clang will now do a better job producing common nested names, when producing
common types for ternary operator, template argument deduction and multiple return auto deduction.
- The ``-Wsign-compare`` warning now treats expressions with bitwise not(~) and minus(-) as signed integers

View File

@@ -434,6 +434,21 @@ class can be used as a capability. The string argument specifies the kind of
capability in error messages, e.g. ``"mutex"``. See the ``Container`` example
given above, or the ``Mutex`` class in :ref:`mutexheader`.
REENTRANT_CAPABILITY
--------------------
``REENTRANT_CAPABILITY`` is an attribute on capability classes, denoting that
they are reentrant. Marking a capability as reentrant means that acquiring the
same capability multiple times is safe. Acquiring the same capability with
different access privileges (exclusive vs. shared) again is not considered
reentrant by the analysis.
Note: In many cases this attribute is only required where a capability is
acquired reentrant within the same function, such as via macros or other
helpers. Otherwise, best practice is to avoid explicitly acquiring a capability
multiple times within the same function, and letting the analysis produce
warnings on double-acquisition attempts.
.. _scoped_capability:
SCOPED_CAPABILITY
@@ -846,6 +861,9 @@ implementation.
#define CAPABILITY(x) \
THREAD_ANNOTATION_ATTRIBUTE__(capability(x))
#define REENTRANT_CAPABILITY \
THREAD_ANNOTATION_ATTRIBUTE__(reentrant_capability)
#define SCOPED_CAPABILITY \
THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)

View File

@@ -166,10 +166,12 @@ public:
/// \param LocEndOfScope -- The location of the end of the scope where the
/// mutex is no longer held
/// \param LEK -- which of the three above cases we should warn for
/// \param ReentrancyMismatch -- mismatching reentrancy depth
virtual void handleMutexHeldEndOfScope(StringRef Kind, Name LockName,
SourceLocation LocLocked,
SourceLocation LocEndOfScope,
LockErrorKind LEK) {}
LockErrorKind LEK,
bool ReentrancyMismatch = false) {}
/// Warn when a mutex is held exclusively and shared at the same point. For
/// example, if a mutex is locked exclusively during an if branch and shared

View File

@@ -22,6 +22,7 @@
#define LLVM_CLANG_ANALYSIS_ANALYSES_THREADSAFETYCOMMON_H
#include "clang/AST/Decl.h"
#include "clang/AST/Type.h"
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
#include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
@@ -272,27 +273,34 @@ private:
class CapabilityExpr {
private:
static constexpr unsigned FlagNegative = 1u << 0;
static constexpr unsigned FlagReentrant = 1u << 1;
/// The capability expression and flags.
llvm::PointerIntPair<const til::SExpr *, 1, unsigned> CapExpr;
llvm::PointerIntPair<const til::SExpr *, 2, unsigned> CapExpr;
/// The kind of capability as specified by @ref CapabilityAttr::getName.
StringRef CapKind;
public:
CapabilityExpr() : CapExpr(nullptr, 0) {}
CapabilityExpr(const til::SExpr *E, StringRef Kind, bool Neg)
: CapExpr(E, Neg ? FlagNegative : 0), CapKind(Kind) {}
CapabilityExpr(const til::SExpr *E, StringRef Kind, bool Neg, bool Reentrant)
: CapExpr(E, (Neg ? FlagNegative : 0) | (Reentrant ? FlagReentrant : 0)),
CapKind(Kind) {}
// Infers `Kind` and `Reentrant` from `QT`.
CapabilityExpr(const til::SExpr *E, QualType QT, bool Neg);
// Don't allow implicitly-constructed StringRefs since we'll capture them.
template <typename T> CapabilityExpr(const til::SExpr *, T, bool) = delete;
template <typename T>
CapabilityExpr(const til::SExpr *, T, bool, bool) = delete;
const til::SExpr *sexpr() const { return CapExpr.getPointer(); }
StringRef getKind() const { return CapKind; }
bool negative() const { return CapExpr.getInt() & FlagNegative; }
bool reentrant() const { return CapExpr.getInt() & FlagReentrant; }
CapabilityExpr operator!() const {
return CapabilityExpr(CapExpr.getPointer(), CapKind, !negative());
return CapabilityExpr(CapExpr.getPointer(), CapKind, !negative(),
reentrant());
}
bool equals(const CapabilityExpr &other) const {
@@ -389,10 +397,6 @@ public:
// Translate a variable reference.
til::LiteralPtr *createVariable(const VarDecl *VD);
// Create placeholder for this: we don't know the VarDecl on construction yet.
std::pair<til::LiteralPtr *, StringRef>
createThisPlaceholder(const Expr *Exp);
// Translate a clang statement or expression to a TIL expression.
// Also performs substitution of variables; Ctx provides the context.
// Dispatches on the type of S.

View File

@@ -4038,6 +4038,13 @@ def LocksExcluded : InheritableAttr {
let Documentation = [Undocumented];
}
def ReentrantCapability : InheritableAttr {
let Spellings = [Clang<"reentrant_capability">];
let Subjects = SubjectList<[Record, TypedefName]>;
let Documentation = [Undocumented];
let SimpleHandler = 1;
}
// C/C++ consumed attributes.
def Consumable : InheritableAttr {

View File

@@ -4082,6 +4082,9 @@ def warn_thread_attribute_not_on_scoped_lockable_param : Warning<
"%0 attribute applies to function parameters only if their type is a "
"reference to a 'scoped_lockable'-annotated type">,
InGroup<ThreadSafetyAttributes>, DefaultIgnore;
def warn_thread_attribute_requires_preceded : Warning<
"%0 attribute on %1 must be preceded by %2 attribute">,
InGroup<ThreadSafetyAttributes>, DefaultIgnore;
def err_attribute_argument_out_of_bounds_extra_info : Error<
"%0 attribute parameter %1 is out of bounds: "
"%plural{0:no parameters to index into|"
@@ -4105,10 +4108,10 @@ def warn_expecting_locked : Warning<
InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
// FIXME: improve the error message about locks not in scope
def warn_lock_some_predecessors : Warning<
"%0 '%1' is not held on every path through here">,
"%0 '%1' is not held on every path through here%select{| with equal reentrancy depth}2">,
InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
def warn_expecting_lock_held_on_loop : Warning<
"expecting %0 '%1' to be held at start of each loop">,
"expecting %0 '%1' to be held at start of each loop%select{| with equal reentrancy depth}2">,
InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
def note_locked_here : Note<"%0 acquired here">;
def note_unlocked_here : Note<"%0 released here">;

View File

@@ -99,8 +99,6 @@ class FactSet;
/// particular point in program execution. Currently, a fact is a capability,
/// along with additional information, such as where it was acquired, whether
/// it is exclusive or shared, etc.
///
/// FIXME: this analysis does not currently support re-entrant locking.
class FactEntry : public CapabilityExpr {
public:
enum FactEntryKind { Lockable, ScopedLockable };
@@ -168,6 +166,8 @@ private:
public:
FactID newFact(std::unique_ptr<FactEntry> Entry) {
Facts.push_back(std::move(Entry));
assert(Facts.size() - 1 <= std::numeric_limits<FactID>::max() &&
"FactID space exhausted");
return static_cast<unsigned short>(Facts.size() - 1);
}
@@ -235,6 +235,20 @@ public:
return false;
}
std::optional<FactID> replaceLock(FactManager &FM, iterator It,
std::unique_ptr<FactEntry> Entry) {
if (It == end())
return std::nullopt;
FactID F = FM.newFact(std::move(Entry));
*It = F;
return F;
}
std::optional<FactID> replaceLock(FactManager &FM, const CapabilityExpr &CapE,
std::unique_ptr<FactEntry> Entry) {
return replaceLock(FM, findLockIter(FM, CapE), std::move(Entry));
}
iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) {
return llvm::find_if(*this,
[&](FactID ID) { return FM[ID].matches(CapE); });
@@ -855,11 +869,19 @@ static void findBlockLocations(CFG *CFGraph,
namespace {
class LockableFactEntry : public FactEntry {
private:
/// Reentrancy depth: incremented when a capability has been acquired
/// reentrantly (after initial acquisition). Always 0 for non-reentrant
/// capabilities.
unsigned int ReentrancyDepth = 0;
public:
LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
SourceKind Src = Acquired)
: FactEntry(Lockable, CE, LK, Loc, Src) {}
unsigned int getReentrancyDepth() const { return ReentrancyDepth; }
void
handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
SourceLocation JoinLoc, LockErrorKind LEK,
@@ -872,8 +894,13 @@ public:
void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
ThreadSafetyHandler &Handler) const override {
Handler.handleDoubleLock(entry.getKind(), entry.toString(), loc(),
entry.loc());
if (std::unique_ptr<FactEntry> RFact = tryReenter(entry.kind())) {
// This capability has been reentrantly acquired.
FSet.replaceLock(FactMan, entry, std::move(RFact));
} else {
Handler.handleDoubleLock(entry.getKind(), entry.toString(), loc(),
entry.loc());
}
}
void handleUnlock(FactSet &FSet, FactManager &FactMan,
@@ -881,12 +908,39 @@ public:
bool FullyRemove,
ThreadSafetyHandler &Handler) const override {
FSet.removeLock(FactMan, Cp);
if (!Cp.negative()) {
if (std::unique_ptr<FactEntry> RFact = leaveReentrant()) {
// This capability remains reentrantly acquired.
FSet.addLock(FactMan, std::move(RFact));
} else if (!Cp.negative()) {
FSet.addLock(FactMan, std::make_unique<LockableFactEntry>(
!Cp, LK_Exclusive, UnlockLoc));
}
}
// Return an updated FactEntry if we can acquire this capability reentrant,
// nullptr otherwise.
std::unique_ptr<LockableFactEntry> tryReenter(LockKind ReenterKind) const {
if (!reentrant())
return nullptr;
if (kind() != ReenterKind)
return nullptr;
auto NewFact = std::make_unique<LockableFactEntry>(*this);
NewFact->ReentrancyDepth++;
return NewFact;
}
// Return an updated FactEntry if we are releasing a capability previously
// acquired reentrant, nullptr otherwise.
std::unique_ptr<LockableFactEntry> leaveReentrant() const {
if (!ReentrancyDepth)
return nullptr;
assert(reentrant());
auto NewFact = std::make_unique<LockableFactEntry>(*this);
NewFact->ReentrancyDepth--;
return NewFact;
}
static bool classof(const FactEntry *A) {
return A->getFactEntryKind() == Lockable;
}
@@ -992,10 +1046,14 @@ private:
void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
LockKind kind, SourceLocation loc,
ThreadSafetyHandler *Handler) const {
if (const FactEntry *Fact = FSet.findLock(FactMan, Cp)) {
if (Handler)
Handler->handleDoubleLock(Cp.getKind(), Cp.toString(), Fact->loc(),
loc);
if (const auto It = FSet.findLockIter(FactMan, Cp); It != FSet.end()) {
const auto &Fact = cast<LockableFactEntry>(FactMan[*It]);
if (std::unique_ptr<FactEntry> RFact = Fact.tryReenter(kind)) {
// This capability has been reentrantly acquired.
FSet.replaceLock(FactMan, It, std::move(RFact));
} else if (Handler) {
Handler->handleDoubleLock(Cp.getKind(), Cp.toString(), Fact.loc(), loc);
}
} else {
FSet.removeLock(FactMan, !Cp);
FSet.addLock(FactMan,
@@ -1005,7 +1063,14 @@ private:
void unlock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
SourceLocation loc, ThreadSafetyHandler *Handler) const {
if (FSet.findLock(FactMan, Cp)) {
if (const auto It = FSet.findLockIter(FactMan, Cp); It != FSet.end()) {
const auto &Fact = cast<LockableFactEntry>(FactMan[*It]);
if (std::unique_ptr<FactEntry> RFact = Fact.leaveReentrant()) {
// This capability remains reentrantly acquired.
FSet.replaceLock(FactMan, It, std::move(RFact));
return;
}
FSet.removeLock(FactMan, Cp);
FSet.addLock(FactMan,
std::make_unique<LockableFactEntry>(!Cp, LK_Exclusive, loc));
@@ -1065,7 +1130,8 @@ public:
const CFGBlock* PredBlock,
const CFGBlock *CurrBlock);
bool join(const FactEntry &a, const FactEntry &b, bool CanModify);
bool join(const FactEntry &A, const FactEntry &B, SourceLocation JoinLoc,
LockErrorKind EntryLEK);
void intersectAndWarn(FactSet &EntrySet, const FactSet &ExitSet,
SourceLocation JoinLoc, LockErrorKind EntryLEK,
@@ -1283,7 +1349,6 @@ void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
Entry->loc(), Entry->getKind());
}
// FIXME: Don't always warn when we have support for reentrant locks.
if (const FactEntry *Cp = FSet.findLock(FactMan, *Entry)) {
if (!Entry->asserted())
Cp->handleLock(FSet, FactMan, *Entry, Handler);
@@ -1808,15 +1873,15 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
assert(!Self);
const auto *TagT = Exp->getType()->getAs<TagType>();
if (D->hasAttrs() && TagT && Exp->isPRValue()) {
std::pair<til::LiteralPtr *, StringRef> Placeholder =
Analyzer->SxBuilder.createThisPlaceholder(Exp);
til::LiteralPtr *Placeholder =
Analyzer->SxBuilder.createVariable(nullptr);
[[maybe_unused]] auto inserted =
Analyzer->ConstructedObjects.insert({Exp, Placeholder.first});
Analyzer->ConstructedObjects.insert({Exp, Placeholder});
assert(inserted.second && "Are we visiting the same expression again?");
if (isa<CXXConstructExpr>(Exp))
Self = Placeholder.first;
Self = Placeholder;
if (TagT->getDecl()->hasAttr<ScopedLockableAttr>())
Scp = CapabilityExpr(Placeholder.first, Placeholder.second, false);
Scp = CapabilityExpr(Placeholder, Exp->getType(), /*Neg=*/false);
}
assert(Loc.isInvalid());
@@ -1954,12 +2019,13 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
if (DeclaredLocks.empty())
continue;
CapabilityExpr Cp(Analyzer->SxBuilder.translate(Arg, nullptr),
StringRef("mutex"), false);
StringRef("mutex"), /*Neg=*/false, /*Reentrant=*/false);
if (const auto *CBTE = dyn_cast<CXXBindTemporaryExpr>(Arg->IgnoreCasts());
Cp.isInvalid() && CBTE) {
if (auto Object = Analyzer->ConstructedObjects.find(CBTE->getSubExpr());
Object != Analyzer->ConstructedObjects.end())
Cp = CapabilityExpr(Object->second, StringRef("mutex"), false);
Cp = CapabilityExpr(Object->second, StringRef("mutex"), /*Neg=*/false,
/*Reentrant=*/false);
}
const FactEntry *Fact = FSet.findLock(Analyzer->FactMan, Cp);
if (!Fact) {
@@ -2254,11 +2320,28 @@ void BuildLockset::VisitReturnStmt(const ReturnStmt *S) {
/// Given two facts merging on a join point, possibly warn and decide whether to
/// keep or replace.
///
/// \param CanModify Whether we can replace \p A by \p B.
/// \return false if we should keep \p A, true if we should take \p B.
bool ThreadSafetyAnalyzer::join(const FactEntry &A, const FactEntry &B,
bool CanModify) {
if (A.kind() != B.kind()) {
SourceLocation JoinLoc,
LockErrorKind EntryLEK) {
// Whether we can replace \p A by \p B.
const bool CanModify = EntryLEK != LEK_LockedSomeLoopIterations;
unsigned int ReentrancyDepthA = 0;
unsigned int ReentrancyDepthB = 0;
if (const auto *LFE = dyn_cast<LockableFactEntry>(&A))
ReentrancyDepthA = LFE->getReentrancyDepth();
if (const auto *LFE = dyn_cast<LockableFactEntry>(&B))
ReentrancyDepthB = LFE->getReentrancyDepth();
if (ReentrancyDepthA != ReentrancyDepthB) {
Handler.handleMutexHeldEndOfScope(B.getKind(), B.toString(), B.loc(),
JoinLoc, EntryLEK,
/*ReentrancyMismatch=*/true);
// Pick the FactEntry with the greater reentrancy depth as the "good"
// fact to reduce potential later warnings.
return CanModify && ReentrancyDepthA < ReentrancyDepthB;
} else if (A.kind() != B.kind()) {
// For managed capabilities, the destructor should unlock in the right mode
// anyway. For asserted capabilities no unlocking is needed.
if ((A.managed() || A.asserted()) && (B.managed() || B.asserted())) {
@@ -2304,8 +2387,7 @@ void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &EntrySet,
FactSet::iterator EntryIt = EntrySet.findLockIter(FactMan, ExitFact);
if (EntryIt != EntrySet.end()) {
if (join(FactMan[*EntryIt], ExitFact,
EntryLEK != LEK_LockedSomeLoopIterations))
if (join(FactMan[*EntryIt], ExitFact, JoinLoc, EntryLEK))
*EntryIt = Fact;
} else if (!ExitFact.managed() || EntryLEK == LEK_LockedAtEndOfFunction) {
ExitFact.handleRemovalFromIntersection(ExitSet, FactMan, JoinLoc,
@@ -2468,7 +2550,8 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
}
if (UnderlyingLocks.empty())
continue;
CapabilityExpr Cp(SxBuilder.createVariable(Param), StringRef(), false);
CapabilityExpr Cp(SxBuilder.createVariable(Param), StringRef(),
/*Neg=*/false, /*Reentrant=*/false);
auto ScopedEntry = std::make_unique<ScopedLockableFactEntry>(
Cp, Param->getLocation(), FactEntry::Declared);
for (const CapabilityExpr &M : UnderlyingLocks)

View File

@@ -67,6 +67,40 @@ static bool isIncompletePhi(const til::SExpr *E) {
return false;
}
static constexpr std::pair<StringRef, bool> ClassifyCapabilityFallback{
/*Kind=*/StringRef("mutex"),
/*Reentrant=*/false};
// Returns pair (Kind, Reentrant).
static std::pair<StringRef, bool> classifyCapability(const TypeDecl &TD) {
if (const auto *CA = TD.getAttr<CapabilityAttr>())
return {CA->getName(), TD.hasAttr<ReentrantCapabilityAttr>()};
return ClassifyCapabilityFallback;
}
// Returns pair (Kind, Reentrant).
static std::pair<StringRef, bool> classifyCapability(QualType QT) {
// We need to look at the declaration of the type of the value to determine
// which it is. The type should either be a record or a typedef, or a pointer
// or reference thereof.
if (const auto *RT = QT->getAs<RecordType>()) {
if (const auto *RD = RT->getDecl())
return classifyCapability(*RD);
} else if (const auto *TT = QT->getAs<TypedefType>()) {
if (const auto *TD = TT->getDecl())
return classifyCapability(*TD);
} else if (QT->isPointerOrReferenceType())
return classifyCapability(QT->getPointeeType());
return ClassifyCapabilityFallback;
}
CapabilityExpr::CapabilityExpr(const til::SExpr *E, QualType QT, bool Neg) {
const auto &[Kind, Reentrant] = classifyCapability(QT);
*this = CapabilityExpr(E, Kind, Neg, Reentrant);
}
using CallingContext = SExprBuilder::CallingContext;
til::SExpr *SExprBuilder::lookupStmt(const Stmt *S) { return SMap.lookup(S); }
@@ -81,28 +115,6 @@ static bool isCalleeArrow(const Expr *E) {
return ME ? ME->isArrow() : false;
}
static StringRef ClassifyDiagnostic(const CapabilityAttr *A) {
return A->getName();
}
static StringRef ClassifyDiagnostic(QualType VDT) {
// We need to look at the declaration of the type of the value to determine
// which it is. The type should either be a record or a typedef, or a pointer
// or reference thereof.
if (const auto *RT = VDT->getAs<RecordType>()) {
if (const auto *RD = RT->getDecl())
if (const auto *CA = RD->getAttr<CapabilityAttr>())
return ClassifyDiagnostic(CA);
} else if (const auto *TT = VDT->getAs<TypedefType>()) {
if (const auto *TD = TT->getDecl())
if (const auto *CA = TD->getAttr<CapabilityAttr>())
return ClassifyDiagnostic(CA);
} else if (VDT->isPointerOrReferenceType())
return ClassifyDiagnostic(VDT->getPointeeType());
return "mutex";
}
/// Translate a clang expression in an attribute to a til::SExpr.
/// Constructs the context from D, DeclExp, and SelfDecl.
///
@@ -170,9 +182,7 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
// If the attribute has no arguments, then assume the argument is "this".
if (!AttrExp)
return CapabilityExpr(
Self,
ClassifyDiagnostic(
cast<CXXMethodDecl>(D)->getFunctionObjectParameterType()),
Self, cast<CXXMethodDecl>(D)->getFunctionObjectParameterType(),
false);
else // For most attributes.
return translateAttrExpr(AttrExp, &Ctx);
@@ -197,7 +207,7 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
// The "*" expr is a universal lock, which essentially turns off
// checks until it is removed from the lockset.
return CapabilityExpr(new (Arena) til::Wildcard(), StringRef("wildcard"),
false);
/*Neg=*/false, /*Reentrant=*/false);
else
// Ignore other string literals for now.
return CapabilityExpr();
@@ -217,33 +227,25 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
}
}
til::SExpr *E = translate(AttrExp, Ctx);
const til::SExpr *E = translate(AttrExp, Ctx);
// Trap mutex expressions like nullptr, or 0.
// Any literal value is nonsense.
if (!E || isa<til::Literal>(E))
return CapabilityExpr();
StringRef Kind = ClassifyDiagnostic(AttrExp->getType());
// Hack to deal with smart pointers -- strip off top-level pointer casts.
if (const auto *CE = dyn_cast<til::Cast>(E)) {
if (CE->castOpcode() == til::CAST_objToPtr)
return CapabilityExpr(CE->expr(), Kind, Neg);
E = CE->expr();
}
return CapabilityExpr(E, Kind, Neg);
return CapabilityExpr(E, AttrExp->getType(), Neg);
}
til::LiteralPtr *SExprBuilder::createVariable(const VarDecl *VD) {
return new (Arena) til::LiteralPtr(VD);
}
std::pair<til::LiteralPtr *, StringRef>
SExprBuilder::createThisPlaceholder(const Expr *Exp) {
return {new (Arena) til::LiteralPtr(nullptr),
ClassifyDiagnostic(Exp->getType())};
}
// Translate a clang statement or expression to a TIL expression.
// Also performs substitution of variables; Ctx provides the context.
// Dispatches on the type of S.

View File

@@ -1907,7 +1907,8 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
void handleMutexHeldEndOfScope(StringRef Kind, Name LockName,
SourceLocation LocLocked,
SourceLocation LocEndOfScope,
LockErrorKind LEK) override {
LockErrorKind LEK,
bool ReentrancyMismatch) override {
unsigned DiagID = 0;
switch (LEK) {
case LEK_LockedSomePredecessors:
@@ -1926,8 +1927,9 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
if (LocEndOfScope.isInvalid())
LocEndOfScope = FunEndLocation;
PartialDiagnosticAt Warning(LocEndOfScope, S.PDiag(DiagID) << Kind
<< LockName);
PartialDiagnosticAt Warning(LocEndOfScope, S.PDiag(DiagID)
<< Kind << LockName
<< ReentrancyMismatch);
Warnings.emplace_back(std::move(Warning),
makeLockedHereNote(LocLocked, Kind));
}

View File

@@ -6243,6 +6243,21 @@ static void handleCapabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(::new (S.Context) CapabilityAttr(S.Context, AL, N));
}
static void handleReentrantCapabilityAttr(Sema &S, Decl *D,
const ParsedAttr &AL) {
// Do not permit 'reentrant_capability' without 'capability(..)'. Note that
// the check here requires 'capability' to be before 'reentrant_capability'.
// This helps enforce a canonical style. Also avoids placing an additional
// branch into ProcessDeclAttributeList().
if (!D->hasAttr<CapabilityAttr>()) {
S.Diag(AL.getLoc(), diag::warn_thread_attribute_requires_preceded)
<< AL << cast<NamedDecl>(D) << "'capability'";
return;
}
D->addAttr(::new (S.Context) ReentrantCapabilityAttr(S.Context, AL));
}
static void handleAssertCapabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
SmallVector<Expr*, 1> Args;
if (!checkLockFunAttrCommon(S, D, AL, Args))
@@ -7557,6 +7572,9 @@ ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
case ParsedAttr::AT_Lockable:
handleCapabilityAttr(S, D, AL);
break;
case ParsedAttr::AT_ReentrantCapability:
handleReentrantCapabilityAttr(S, D, AL);
break;
case ParsedAttr::AT_RequiresCapability:
handleRequiresCapabilityAttr(S, D, AL);
break;

View File

@@ -175,6 +175,7 @@
// CHECK-NEXT: PreserveNone (SubjectMatchRule_hasType_functionType)
// CHECK-NEXT: RandomizeLayout (SubjectMatchRule_record)
// CHECK-NEXT: ReadOnlyPlacement (SubjectMatchRule_record)
// CHECK-NEXT: ReentrantCapability (SubjectMatchRule_record, SubjectMatchRule_type_alias)
// CHECK-NEXT: ReleaseHandle (SubjectMatchRule_variable_is_parameter)
// CHECK-NEXT: ReqdWorkGroupSize (SubjectMatchRule_function)
// CHECK-NEXT: Restrict (SubjectMatchRule_function)

View File

@@ -2,6 +2,7 @@
// RUN: %clang_cc1 -fsyntax-only -verify -Wthread-safety -Wthread-safety-pointer -Wthread-safety-beta -fexperimental-late-parse-attributes -DLATE_PARSING %s
#define LOCKABLE __attribute__ ((lockable))
#define REENTRANT_CAPABILITY __attribute__ ((reentrant_capability))
#define SCOPED_LOCKABLE __attribute__ ((scoped_lockable))
#define GUARDED_BY(...) __attribute__ ((guarded_by(__VA_ARGS__)))
#define GUARDED_VAR __attribute__ ((guarded_var))
@@ -216,6 +217,25 @@ int main(void) {
return 0;
}
/*** Reentrancy test ***/
struct LOCKABLE REENTRANT_CAPABILITY ReentrantMutex {};
void reentrant_mutex_lock(struct ReentrantMutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu);
void reentrant_mutex_unlock(struct ReentrantMutex *mu) UNLOCK_FUNCTION(mu);
struct ReentrantMutex rmu;
int r_ GUARDED_BY(&rmu);
void test_reentrant(void) {
reentrant_mutex_lock(&rmu);
r_ = 1;
reentrant_mutex_lock(&rmu);
r_ = 1;
reentrant_mutex_unlock(&rmu);
r_ = 1;
reentrant_mutex_unlock(&rmu);
r_ = 1; // expected-warning{{writing variable 'r_' requires holding mutex 'rmu' exclusively}}
}
// We had a problem where we'd skip all attributes that follow a late-parsed
// attribute in a single __attribute__.
void run(void) __attribute__((guarded_by(mu1), guarded_by(mu1))); // expected-warning 2{{only applies to non-static data members and global variables}}

View File

@@ -35,6 +35,7 @@
#define PT_GUARDED_BY(x) __attribute__((pt_guarded_by(x)))
// Common
#define REENTRANT_CAPABILITY __attribute__((reentrant_capability))
#define SCOPED_LOCKABLE __attribute__((scoped_lockable))
#define ACQUIRED_AFTER(...) __attribute__((acquired_after(__VA_ARGS__)))
#define ACQUIRED_BEFORE(...) __attribute__((acquired_before(__VA_ARGS__)))

View File

@@ -6860,3 +6860,367 @@ class PointerGuard {
}
};
} // namespace Derived_Smart_Pointer
namespace Reentrancy {
class LOCKABLE REENTRANT_CAPABILITY ReentrantMutex {
public:
void Lock() EXCLUSIVE_LOCK_FUNCTION();
void ReaderLock() SHARED_LOCK_FUNCTION();
void Unlock() UNLOCK_FUNCTION();
void ExclusiveUnlock() EXCLUSIVE_UNLOCK_FUNCTION();
void ReaderUnlock() SHARED_UNLOCK_FUNCTION();
bool TryLock() EXCLUSIVE_TRYLOCK_FUNCTION(true);
bool ReaderTryLock() SHARED_TRYLOCK_FUNCTION(true);
// for negative capabilities
const ReentrantMutex& operator!() const { return *this; }
void AssertHeld() ASSERT_EXCLUSIVE_LOCK();
void AssertReaderHeld() ASSERT_SHARED_LOCK();
};
class SCOPED_LOCKABLE ReentrantMutexLock {
public:
ReentrantMutexLock(ReentrantMutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu);
~ReentrantMutexLock() UNLOCK_FUNCTION();
};
class SCOPED_LOCKABLE ReentrantReaderMutexLock {
public:
ReentrantReaderMutexLock(ReentrantMutex *mu) SHARED_LOCK_FUNCTION(mu);
~ReentrantReaderMutexLock() UNLOCK_FUNCTION();
};
class SCOPED_LOCKABLE RelockableReentrantMutexLock {
public:
RelockableReentrantMutexLock(ReentrantMutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu);
~RelockableReentrantMutexLock() EXCLUSIVE_UNLOCK_FUNCTION();
void Lock() EXCLUSIVE_LOCK_FUNCTION();
void Unlock() UNLOCK_FUNCTION();
};
ReentrantMutex rmu;
int guard_var __attribute__((guarded_var)) = 0;
int guardby_var __attribute__((guarded_by(rmu))) = 0;
void testReentrantMany() {
rmu.Lock();
rmu.Lock();
rmu.Lock();
rmu.Lock();
rmu.Lock();
rmu.Lock();
rmu.Lock();
rmu.Lock();
rmu.Unlock();
rmu.Unlock();
rmu.Unlock();
rmu.Unlock();
rmu.Unlock();
rmu.Unlock();
rmu.Unlock();
rmu.Unlock();
}
void testReentrantManyReader() {
rmu.ReaderLock();
rmu.ReaderLock();
rmu.ReaderLock();
rmu.ReaderLock();
rmu.ReaderLock();
rmu.ReaderLock();
rmu.ReaderLock();
rmu.ReaderLock();
rmu.ReaderUnlock();
rmu.ReaderUnlock();
rmu.ReaderUnlock();
rmu.ReaderUnlock();
rmu.ReaderUnlock();
rmu.ReaderUnlock();
rmu.ReaderUnlock();
rmu.ReaderUnlock();
}
void testReentrantLock1() {
rmu.Lock();
guard_var = 2;
rmu.Lock();
guard_var = 2;
rmu.Unlock();
guard_var = 2;
rmu.Unlock();
guard_var = 2; // expected-warning{{writing variable 'guard_var' requires holding any mutex exclusively}}
}
void testReentrantReaderLock1() {
rmu.ReaderLock();
int x = guard_var;
rmu.ReaderLock();
int y = guard_var;
rmu.ReaderUnlock();
int z = guard_var;
rmu.ReaderUnlock();
int a = guard_var; // expected-warning{{reading variable 'guard_var' requires holding any mutex}}
}
void testReentrantLock2() {
rmu.Lock();
guardby_var = 2;
rmu.Lock();
guardby_var = 2;
rmu.Unlock();
guardby_var = 2;
rmu.Unlock();
guardby_var = 2; // expected-warning{{writing variable 'guardby_var' requires holding mutex 'rmu' exclusively}}
}
void testReentrantReaderLock2() {
rmu.ReaderLock();
int x = guardby_var;
rmu.ReaderLock();
int y = guardby_var;
rmu.ReaderUnlock();
int z = guardby_var;
rmu.ReaderUnlock();
int a = guardby_var; // expected-warning{{reading variable 'guardby_var' requires holding mutex 'rmu'}}
}
void testReentrantTryLock1() {
if (rmu.TryLock()) {
guardby_var = 1;
if (rmu.TryLock()) {
guardby_var = 1;
rmu.Unlock();
}
guardby_var = 1;
rmu.Unlock();
}
guardby_var = 1; // expected-warning{{writing variable 'guardby_var' requires holding mutex 'rmu' exclusively}}
}
void testReentrantTryLock2() {
rmu.Lock();
guardby_var = 1;
if (rmu.TryLock()) {
guardby_var = 1;
rmu.Unlock();
}
guardby_var = 1;
rmu.Unlock();
guardby_var = 1; // expected-warning{{writing variable 'guardby_var' requires holding mutex 'rmu' exclusively}}
}
void testReentrantNotHeld() {
rmu.Unlock(); // \
// expected-warning{{releasing mutex 'rmu' that was not held}}
}
void testReentrantMissingUnlock() {
rmu.Lock(); // expected-note{{mutex acquired here}}
rmu.Lock(); // reenter
rmu.Unlock();
} // expected-warning{{mutex 'rmu' is still held at the end of function}}
// Acquiring the same capability with different access privileges is not
// considered reentrant.
void testMixedSharedExclusive() {
rmu.ReaderLock(); // expected-note{{mutex acquired here}}
rmu.Lock(); // expected-warning{{acquiring mutex 'rmu' that is already held}}
rmu.Unlock(); // expected-note{{mutex released here}}
rmu.ReaderUnlock(); // expected-warning{{releasing mutex 'rmu' that was not held}}
}
void testReentrantIntersection() {
rmu.Lock();
if (guardby_var) {
rmu.Lock();
rmu.Lock();
rmu.Unlock();
} else {
rmu.Lock();
guardby_var = 1;
}
guardby_var = 1;
rmu.Unlock();
guardby_var = 1;
rmu.Unlock();
guardby_var = 1; // expected-warning{{writing variable 'guardby_var' requires holding mutex 'rmu' exclusively}}
}
void testReentrantIntersectionBad() {
rmu.Lock(); // expected-note{{mutex acquired here}}
if (guardby_var) {
rmu.Lock();
rmu.Lock();
} else {
rmu.Lock();
guardby_var = 1;
}
guardby_var = 1; // expected-warning{{mutex 'rmu' is not held on every path through here with equal reentrancy depth}}
rmu.Unlock();
guardby_var = 1;
rmu.Unlock();
guardby_var = 1;
rmu.Unlock();
}
void testReentrantLoopBad() {
rmu.Lock(); // expected-note{{mutex acquired here}}
while (guardby_var) { // expected-warning{{expecting mutex 'rmu' to be held at start of each loop with equal reentrancy depth}}
rmu.Lock();
}
rmu.Unlock();
}
void testLocksRequiredReentrant() EXCLUSIVE_LOCKS_REQUIRED(rmu) {
guardby_var = 1;
rmu.Lock();
rmu.Lock();
guardby_var = 1;
rmu.Unlock();
rmu.Unlock();
guardby_var = 1;
}
void testAssertReentrant() {
rmu.AssertHeld();
guardby_var = 1;
rmu.Lock();
guardby_var = 1;
rmu.Unlock();
guardby_var = 1;
}
void testAssertReaderReentrant() {
rmu.AssertReaderHeld();
int x = guardby_var;
rmu.ReaderLock();
int y = guardby_var;
rmu.ReaderUnlock();
int z = guardby_var;
}
struct TestScopedReentrantLockable {
ReentrantMutex mu1;
ReentrantMutex mu2;
int a __attribute__((guarded_by(mu1)));
int b __attribute__((guarded_by(mu2)));
bool getBool();
void foo1() {
ReentrantMutexLock mulock1(&mu1);
a = 5;
ReentrantMutexLock mulock2(&mu1);
a = 5;
}
void foo2() {
ReentrantMutexLock mulock1(&mu1);
a = 5;
mu1.Lock();
a = 5;
mu1.Unlock();
a = 5;
}
#ifdef __cpp_guaranteed_copy_elision
void const_lock() {
const ReentrantMutexLock mulock1 = ReentrantMutexLock(&mu1);
a = 5;
const ReentrantMutexLock mulock2 = ReentrantMutexLock(&mu1);
a = 3;
}
#endif
void temporary() {
ReentrantMutexLock{&mu1}, a = 1, ReentrantMutexLock{&mu1}, a = 5;
}
void lifetime_extension() {
const ReentrantMutexLock &mulock1 = ReentrantMutexLock(&mu1);
a = 5;
const ReentrantMutexLock &mulock2 = ReentrantMutexLock(&mu1);
a = 5;
}
void foo3() {
ReentrantReaderMutexLock mulock1(&mu1);
if (getBool()) {
ReentrantMutexLock mulock2a(&mu2);
b = a + 1;
}
else {
ReentrantMutexLock mulock2b(&mu2);
b = a + 2;
}
}
void foo4() {
ReentrantMutexLock mulock_a(&mu1);
ReentrantMutexLock mulock_b(&mu1);
}
void temporary_double_lock() {
ReentrantMutexLock mulock_a(&mu1);
ReentrantMutexLock{&mu1};
}
void foo5() {
ReentrantMutexLock mulock1(&mu1), mulock2(&mu2);
{
ReentrantMutexLock mulock3(&mu1), mulock4(&mu2);
a = b+1;
}
b = a+1;
}
};
void scopedDoubleUnlock() {
RelockableReentrantMutexLock scope(&rmu);
scope.Unlock(); // expected-note{{mutex released here}}
scope.Unlock(); // expected-warning {{releasing mutex 'rmu' that was not held}}
}
void scopedDoubleLock1() {
RelockableReentrantMutexLock scope(&rmu);
scope.Lock();
scope.Unlock();
}
void scopedDoubleLock2() {
RelockableReentrantMutexLock scope(&rmu);
scope.Unlock();
scope.Lock();
scope.Lock();
scope.Unlock();
}
typedef int __attribute__((capability("bitlock"))) REENTRANT_CAPABILITY *bitlock_t;
void bit_lock(bitlock_t l) EXCLUSIVE_LOCK_FUNCTION(l);
void bit_unlock(bitlock_t l) UNLOCK_FUNCTION(l);
bitlock_t bl;
void testReentrantTypedef() {
bit_lock(bl);
bit_lock(bl);
bit_unlock(bl);
bit_unlock(bl);
}
class TestNegativeWithReentrantMutex {
ReentrantMutex rmu;
int a GUARDED_BY(rmu);
public:
void baz() EXCLUSIVE_LOCKS_REQUIRED(!rmu) {
rmu.Lock();
rmu.Lock();
a = 0;
rmu.Unlock();
rmu.Unlock();
}
};
} // namespace Reentrancy

View File

@@ -3,6 +3,7 @@
// RUN: %clang_cc1 -fsyntax-only -verify -Wthread-safety -std=c++11 %s
#define LOCKABLE __attribute__ ((lockable))
#define REENTRANT_CAPABILITY __attribute__ ((reentrant_capability))
#define SCOPED_LOCKABLE __attribute__ ((scoped_lockable))
#define GUARDED_BY(x) __attribute__ ((guarded_by(x)))
#define GUARDED_VAR __attribute__ ((guarded_var))
@@ -260,6 +261,17 @@ class LFoo {
void l_function_params(int lvar LOCKABLE); // \
// expected-warning {{'lockable' attribute only applies to}}
class LOCKABLE REENTRANT_CAPABILITY LTestReentrant {
};
// Attribute order does matter.
class REENTRANT_CAPABILITY LOCKABLE LTestReentrantWrongOrder { // \
// expected-warning {{'reentrant_capability' attribute on 'LTestReentrantWrongOrder' must be preceded by 'capability' attribute}}
};
class REENTRANT_CAPABILITY LTestReentrantOnly { // \
// expected-warning {{'reentrant_capability' attribute on 'LTestReentrantOnly' must be preceded by 'capability' attribute}}
};
//-----------------------------------------//
// Scoped Lockable Attribute (sl)

View File

@@ -7995,6 +7995,15 @@ TEST_P(ImportAttributes, ImportLocksExcluded) {
checkImportVariadicArg(FromAttr->args(), ToAttr->args());
}
TEST_P(ImportAttributes, ImportReentrantCapability) {
ReentrantCapabilityAttr *FromAttr, *ToAttr;
importAttr<CXXRecordDecl>(
R"(
struct __attribute__((capability("x"), reentrant_capability)) test {};
)",
FromAttr, ToAttr);
}
TEST_P(ImportAttributes, ImportC99NoThrowAttr) {
NoThrowAttr *FromAttr, *ToAttr;
importAttr<FunctionDecl>("void test () __attribute__ ((__nothrow__));",