-
Notifications
You must be signed in to change notification settings - Fork 14.7k
Thread safety analysis: Allocate FactEntrys with BumpPtrAllocator #149660
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
@llvm/pr-subscribers-clang @llvm/pr-subscribers-clang-analysis Author: Aaron Puchert (aaronpuchert) ChangesThe FactManager managing the FactEntrys stays alive for the analysis of a single function. We are already using that by allocating TIL S-expressions via BumpPtrAllocator. We can do the same with FactEntrys. If we allocate the facts in an arena, we won't get destructor calls for them, and they better be trivially destructible. This required replacing the SmallVector member of ScopedLockableFactEntry with TrailingObjects. FactEntrys are now passed around by plain pointer. However, to hide the allocation of TrailingObjects from users, we introduce Patch is 20.85 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/149660.diff 1 Files Affected:
diff --git a/clang/lib/Analysis/ThreadSafety.cpp b/clang/lib/Analysis/ThreadSafety.cpp
index 80e7c8eff671a..3f993af9d95d0 100644
--- a/clang/lib/Analysis/ThreadSafety.cpp
+++ b/clang/lib/Analysis/ThreadSafety.cpp
@@ -43,6 +43,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TrailingObjects.h"
#include "llvm/Support/raw_ostream.h"
#include <cassert>
#include <functional>
@@ -118,11 +119,13 @@ class FactEntry : public CapabilityExpr {
/// Where it was acquired.
SourceLocation AcquireLoc;
+protected:
+ ~FactEntry() = default;
+
public:
FactEntry(FactEntryKind FK, const CapabilityExpr &CE, LockKind LK,
SourceLocation Loc, SourceKind Src)
: CapabilityExpr(CE), Kind(FK), LKind(LK), Source(Src), AcquireLoc(Loc) {}
- virtual ~FactEntry() = default;
LockKind kind() const { return LKind; }
SourceLocation loc() const { return AcquireLoc; }
@@ -156,11 +159,20 @@ using FactID = unsigned short;
/// the analysis of a single routine.
class FactManager {
private:
- std::vector<std::unique_ptr<const FactEntry>> Facts;
+ llvm::BumpPtrAllocator &Alloc;
+ std::vector<const FactEntry *> Facts;
public:
- FactID newFact(std::unique_ptr<FactEntry> Entry) {
- Facts.push_back(std::move(Entry));
+ FactManager(llvm::BumpPtrAllocator &Alloc) : Alloc(Alloc) {}
+
+ template <typename T, typename... ArgTypes>
+ T *createFact(ArgTypes &&...Args) {
+ static_assert(std::is_trivially_destructible_v<T>);
+ return T::create(Alloc, std::forward<ArgTypes>(Args)...);
+ }
+
+ FactID newFact(const FactEntry *Entry) {
+ Facts.push_back(Entry);
assert(Facts.size() - 1 <= std::numeric_limits<FactID>::max() &&
"FactID space exhausted");
return static_cast<unsigned short>(Facts.size() - 1);
@@ -205,8 +217,8 @@ class FactSet {
void addLockByID(FactID ID) { FactIDs.push_back(ID); }
- FactID addLock(FactManager &FM, std::unique_ptr<FactEntry> Entry) {
- FactID F = FM.newFact(std::move(Entry));
+ FactID addLock(FactManager &FM, const FactEntry *Entry) {
+ FactID F = FM.newFact(Entry);
FactIDs.push_back(F);
return F;
}
@@ -231,17 +243,17 @@ class FactSet {
}
std::optional<FactID> replaceLock(FactManager &FM, iterator It,
- std::unique_ptr<FactEntry> Entry) {
+ const FactEntry *Entry) {
if (It == end())
return std::nullopt;
- FactID F = FM.newFact(std::move(Entry));
+ FactID F = FM.newFact(Entry);
*It = F;
return F;
}
std::optional<FactID> replaceLock(FactManager &FM, const CapabilityExpr &CapE,
- std::unique_ptr<FactEntry> Entry) {
- return replaceLock(FM, findLockIter(FM, CapE), std::move(Entry));
+ const FactEntry *Entry) {
+ return replaceLock(FM, findLockIter(FM, CapE), Entry);
}
iterator findLockIter(FactManager &FM, const CapabilityExpr &CapE) {
@@ -863,18 +875,30 @@ static void findBlockLocations(CFG *CFGraph,
namespace {
-class LockableFactEntry : public FactEntry {
+class LockableFactEntry final : public FactEntry {
private:
/// Reentrancy depth: incremented when a capability has been acquired
/// reentrantly (after initial acquisition). Always 0 for non-reentrant
/// capabilities.
unsigned int ReentrancyDepth = 0;
-public:
LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
- SourceKind Src = Acquired)
+ SourceKind Src)
: FactEntry(Lockable, CE, LK, Loc, Src) {}
+public:
+ static LockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
+ const LockableFactEntry &Other) {
+ return new (Alloc) LockableFactEntry(Other);
+ }
+
+ static LockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
+ const CapabilityExpr &CE, LockKind LK,
+ SourceLocation Loc,
+ SourceKind Src = Acquired) {
+ return new (Alloc) LockableFactEntry(CE, LK, Loc, Src);
+ }
+
unsigned int getReentrancyDepth() const { return ReentrancyDepth; }
void
@@ -889,9 +913,9 @@ class LockableFactEntry : public FactEntry {
void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
ThreadSafetyHandler &Handler) const override {
- if (std::unique_ptr<FactEntry> RFact = tryReenter(entry.kind())) {
+ if (const FactEntry *RFact = tryReenter(FactMan, entry.kind())) {
// This capability has been reentrantly acquired.
- FSet.replaceLock(FactMan, entry, std::move(RFact));
+ FSet.replaceLock(FactMan, entry, RFact);
} else {
Handler.handleDoubleLock(entry.getKind(), entry.toString(), loc(),
entry.loc());
@@ -904,34 +928,35 @@ class LockableFactEntry : public FactEntry {
ThreadSafetyHandler &Handler) const override {
FSet.removeLock(FactMan, Cp);
- if (std::unique_ptr<FactEntry> RFact = leaveReentrant()) {
+ if (const FactEntry *RFact = leaveReentrant(FactMan)) {
// This capability remains reentrantly acquired.
- FSet.addLock(FactMan, std::move(RFact));
+ FSet.addLock(FactMan, RFact);
} else if (!Cp.negative()) {
- FSet.addLock(FactMan, std::make_unique<LockableFactEntry>(
+ FSet.addLock(FactMan, FactMan.createFact<LockableFactEntry>(
!Cp, LK_Exclusive, UnlockLoc));
}
}
// Return an updated FactEntry if we can acquire this capability reentrant,
// nullptr otherwise.
- std::unique_ptr<LockableFactEntry> tryReenter(LockKind ReenterKind) const {
+ const FactEntry *tryReenter(FactManager &FactMan,
+ LockKind ReenterKind) const {
if (!reentrant())
return nullptr;
if (kind() != ReenterKind)
return nullptr;
- auto NewFact = std::make_unique<LockableFactEntry>(*this);
+ auto *NewFact = FactMan.createFact<LockableFactEntry>(*this);
NewFact->ReentrancyDepth++;
return NewFact;
}
// Return an updated FactEntry if we are releasing a capability previously
// acquired reentrant, nullptr otherwise.
- std::unique_ptr<LockableFactEntry> leaveReentrant() const {
+ const FactEntry *leaveReentrant(FactManager &FactMan) const {
if (!ReentrancyDepth)
return nullptr;
assert(reentrant());
- auto NewFact = std::make_unique<LockableFactEntry>(*this);
+ auto *NewFact = FactMan.createFact<LockableFactEntry>(*this);
NewFact->ReentrancyDepth--;
return NewFact;
}
@@ -941,43 +966,68 @@ class LockableFactEntry : public FactEntry {
}
};
-class ScopedLockableFactEntry : public FactEntry {
+enum UnderlyingCapabilityKind {
+ UCK_Acquired, ///< Any kind of acquired capability.
+ UCK_ReleasedShared, ///< Shared capability that was released.
+ UCK_ReleasedExclusive, ///< Exclusive capability that was released.
+};
+
+struct UnderlyingCapability {
+ CapabilityExpr Cap;
+ UnderlyingCapabilityKind Kind;
+};
+
+class ScopedLockableFactEntry final
+ : public FactEntry,
+ private llvm::TrailingObjects<ScopedLockableFactEntry,
+ UnderlyingCapability> {
+ friend TrailingObjects;
+
private:
- enum UnderlyingCapabilityKind {
- UCK_Acquired, ///< Any kind of acquired capability.
- UCK_ReleasedShared, ///< Shared capability that was released.
- UCK_ReleasedExclusive, ///< Exclusive capability that was released.
- };
+ const unsigned ManagedCapacity;
+ unsigned ManagedSize = 0;
- struct UnderlyingCapability {
- CapabilityExpr Cap;
- UnderlyingCapabilityKind Kind;
- };
+ ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc,
+ SourceKind Src, unsigned ManagedCapacity)
+ : FactEntry(ScopedLockable, CE, LK_Exclusive, Loc, Src),
+ ManagedCapacity(ManagedCapacity) {}
- SmallVector<UnderlyingCapability, 2> UnderlyingMutexes;
+ void addManaged(const CapabilityExpr &M, UnderlyingCapabilityKind UCK) {
+ assert(ManagedSize < ManagedCapacity);
+ new (getTrailingObjects() + ManagedSize) UnderlyingCapability{M, UCK};
+ ++ManagedSize;
+ }
+
+ ArrayRef<UnderlyingCapability> getManaged() const {
+ return getTrailingObjects(ManagedSize);
+ }
public:
- ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc,
- SourceKind Src)
- : FactEntry(ScopedLockable, CE, LK_Exclusive, Loc, Src) {}
+ static ScopedLockableFactEntry *create(llvm::BumpPtrAllocator &Alloc,
+ const CapabilityExpr &CE,
+ SourceLocation Loc, SourceKind Src,
+ unsigned ManagedCapacity) {
+ void *Storage =
+ Alloc.Allocate(totalSizeToAlloc<UnderlyingCapability>(ManagedCapacity),
+ alignof(ScopedLockableFactEntry));
+ return new (Storage) ScopedLockableFactEntry(CE, Loc, Src, ManagedCapacity);
+ }
CapExprSet getUnderlyingMutexes() const {
CapExprSet UnderlyingMutexesSet;
- for (const UnderlyingCapability &UnderlyingMutex : UnderlyingMutexes)
+ for (const UnderlyingCapability &UnderlyingMutex : getManaged())
UnderlyingMutexesSet.push_back(UnderlyingMutex.Cap);
return UnderlyingMutexesSet;
}
- void addLock(const CapabilityExpr &M) {
- UnderlyingMutexes.push_back(UnderlyingCapability{M, UCK_Acquired});
- }
+ void addLock(const CapabilityExpr &M) { addManaged(M, UCK_Acquired); }
void addExclusiveUnlock(const CapabilityExpr &M) {
- UnderlyingMutexes.push_back(UnderlyingCapability{M, UCK_ReleasedExclusive});
+ addManaged(M, UCK_ReleasedExclusive);
}
void addSharedUnlock(const CapabilityExpr &M) {
- UnderlyingMutexes.push_back(UnderlyingCapability{M, UCK_ReleasedShared});
+ addManaged(M, UCK_ReleasedShared);
}
void
@@ -987,7 +1037,7 @@ class ScopedLockableFactEntry : public FactEntry {
if (LEK == LEK_LockedAtEndOfFunction || LEK == LEK_NotLockedAtEndOfFunction)
return;
- for (const auto &UnderlyingMutex : UnderlyingMutexes) {
+ for (const auto &UnderlyingMutex : getManaged()) {
const auto *Entry = FSet.findLock(FactMan, UnderlyingMutex.Cap);
if ((UnderlyingMutex.Kind == UCK_Acquired && Entry) ||
(UnderlyingMutex.Kind != UCK_Acquired && !Entry)) {
@@ -1002,7 +1052,7 @@ class ScopedLockableFactEntry : public FactEntry {
void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
ThreadSafetyHandler &Handler) const override {
- for (const auto &UnderlyingMutex : UnderlyingMutexes) {
+ for (const auto &UnderlyingMutex : getManaged()) {
if (UnderlyingMutex.Kind == UCK_Acquired)
lock(FSet, FactMan, UnderlyingMutex.Cap, entry.kind(), entry.loc(),
&Handler);
@@ -1016,7 +1066,7 @@ class ScopedLockableFactEntry : public FactEntry {
bool FullyRemove,
ThreadSafetyHandler &Handler) const override {
assert(!Cp.negative() && "Managing object cannot be negative.");
- for (const auto &UnderlyingMutex : UnderlyingMutexes) {
+ for (const auto &UnderlyingMutex : getManaged()) {
// Remove/lock the underlying mutex if it exists/is still unlocked; warn
// on double unlocking/locking if we're not destroying the scoped object.
ThreadSafetyHandler *TSHandler = FullyRemove ? nullptr : &Handler;
@@ -1043,16 +1093,16 @@ class ScopedLockableFactEntry : public FactEntry {
ThreadSafetyHandler *Handler) const {
if (const auto It = FSet.findLockIter(FactMan, Cp); It != FSet.end()) {
const auto &Fact = cast<LockableFactEntry>(FactMan[*It]);
- if (std::unique_ptr<FactEntry> RFact = Fact.tryReenter(kind)) {
+ if (const FactEntry *RFact = Fact.tryReenter(FactMan, kind)) {
// This capability has been reentrantly acquired.
- FSet.replaceLock(FactMan, It, std::move(RFact));
+ FSet.replaceLock(FactMan, It, RFact);
} else if (Handler) {
Handler->handleDoubleLock(Cp.getKind(), Cp.toString(), Fact.loc(), loc);
}
} else {
FSet.removeLock(FactMan, !Cp);
- FSet.addLock(FactMan,
- std::make_unique<LockableFactEntry>(Cp, kind, loc, Managed));
+ FSet.addLock(FactMan, FactMan.createFact<LockableFactEntry>(Cp, kind, loc,
+ Managed));
}
}
@@ -1060,15 +1110,15 @@ class ScopedLockableFactEntry : public FactEntry {
SourceLocation loc, ThreadSafetyHandler *Handler) const {
if (const auto It = FSet.findLockIter(FactMan, Cp); It != FSet.end()) {
const auto &Fact = cast<LockableFactEntry>(FactMan[*It]);
- if (std::unique_ptr<FactEntry> RFact = Fact.leaveReentrant()) {
+ if (const FactEntry *RFact = Fact.leaveReentrant(FactMan)) {
// This capability remains reentrantly acquired.
- FSet.replaceLock(FactMan, It, std::move(RFact));
+ FSet.replaceLock(FactMan, It, RFact);
return;
}
FSet.replaceLock(
FactMan, It,
- std::make_unique<LockableFactEntry>(!Cp, LK_Exclusive, loc));
+ FactMan.createFact<LockableFactEntry>(!Cp, LK_Exclusive, loc));
} else if (Handler) {
SourceLocation PrevLoc;
if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
@@ -1098,13 +1148,13 @@ class ThreadSafetyAnalyzer {
BeforeSet *GlobalBeforeSet;
public:
- ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet* Bset)
- : Arena(&Bpa), SxBuilder(Arena), Handler(H), GlobalBeforeSet(Bset) {}
+ ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet *Bset)
+ : Arena(&Bpa), SxBuilder(Arena), Handler(H), FactMan(Bpa),
+ GlobalBeforeSet(Bset) {}
bool inCurrentScope(const CapabilityExpr &CapE);
- void addLock(FactSet &FSet, std::unique_ptr<FactEntry> Entry,
- bool ReqAttr = false);
+ void addLock(FactSet &FSet, const FactEntry *Entry, bool ReqAttr = false);
void removeLock(FactSet &FSet, const CapabilityExpr &CapE,
SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind);
@@ -1317,8 +1367,7 @@ bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
/// Add a new lock to the lockset, warning if the lock is already there.
/// \param ReqAttr -- true if this is part of an initial Requires attribute.
-void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
- std::unique_ptr<FactEntry> Entry,
+void ThreadSafetyAnalyzer::addLock(FactSet &FSet, const FactEntry *Entry,
bool ReqAttr) {
if (Entry->shouldIgnore())
return;
@@ -1348,7 +1397,7 @@ void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
if (!Entry->asserted())
Cp->handleLock(FSet, FactMan, *Entry, Handler);
} else {
- FSet.addLock(FactMan, std::move(Entry));
+ FSet.addLock(FactMan, Entry);
}
}
@@ -1563,11 +1612,11 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
// Add and remove locks.
SourceLocation Loc = Exp->getExprLoc();
for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
- addLock(Result, std::make_unique<LockableFactEntry>(ExclusiveLockToAdd,
- LK_Exclusive, Loc));
+ addLock(Result, FactMan.createFact<LockableFactEntry>(ExclusiveLockToAdd,
+ LK_Exclusive, Loc));
for (const auto &SharedLockToAdd : SharedLocksToAdd)
- addLock(Result, std::make_unique<LockableFactEntry>(SharedLockToAdd,
- LK_Shared, Loc));
+ addLock(Result, FactMan.createFact<LockableFactEntry>(SharedLockToAdd,
+ LK_Shared, Loc));
}
namespace {
@@ -1903,10 +1952,10 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
CapExprSet AssertLocks;
Analyzer->getMutexIDs(AssertLocks, A, Exp, D, Self);
for (const auto &AssertLock : AssertLocks)
- Analyzer->addLock(FSet, std::make_unique<LockableFactEntry>(
- AssertLock,
- A->isShared() ? LK_Shared : LK_Exclusive,
- Loc, FactEntry::Asserted));
+ Analyzer->addLock(
+ FSet, Analyzer->FactMan.createFact<LockableFactEntry>(
+ AssertLock, A->isShared() ? LK_Shared : LK_Exclusive,
+ Loc, FactEntry::Asserted));
break;
}
@@ -2063,16 +2112,19 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
FactEntry::SourceKind Source =
!Scp.shouldIgnore() ? FactEntry::Managed : FactEntry::Acquired;
for (const auto &M : ExclusiveLocksToAdd)
- Analyzer->addLock(FSet, std::make_unique<LockableFactEntry>(M, LK_Exclusive,
- Loc, Source));
+ Analyzer->addLock(FSet, Analyzer->FactMan.createFact<LockableFactEntry>(
+ M, LK_Exclusive, Loc, Source));
for (const auto &M : SharedLocksToAdd)
- Analyzer->addLock(
- FSet, std::make_unique<LockableFactEntry>(M, LK_Shared, Loc, Source));
+ Analyzer->addLock(FSet, Analyzer->FactMan.createFact<LockableFactEntry>(
+ M, LK_Shared, Loc, Source));
if (!Scp.shouldIgnore()) {
// Add the managing object as a dummy mutex, mapped to the underlying mutex.
- auto ScopedEntry = std::make_unique<ScopedLockableFactEntry>(
- Scp, Loc, FactEntry::Acquired);
+ auto *ScopedEntry = Analyzer->FactMan.createFact<ScopedLockableFactEntry>(
+ Scp, Loc, FactEntry::Acquired,
+ ExclusiveLocksToAdd.size() + SharedLocksToAdd.size() +
+ ScopedReqsAndExcludes.size() + ExclusiveLocksToRemove.size() +
+ SharedLocksToRemove.size());
for (const auto &M : ExclusiveLocksToAdd)
ScopedEntry->addLock(M);
for (const auto &M : SharedLocksToAdd)
@@ -2083,7 +2135,7 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
ScopedEntry->addExclusiveUnlock(M);
for (const auto &M : SharedLocksToRemove)
ScopedEntry->addSharedUnlock(M);
- Analyzer->addLock(FSet, std::move(ScopedEntry));
+ Analyzer->addLock(FSet, ScopedEntry);
}
}
@@ -2547,23 +2599,24 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
continue;
CapabilityExpr Cp(SxBuilder.createVariable(Param), StringRef(),
/*Neg=*/false, /*Reentrant=*/false);
- auto ScopedEntry = std::make_unique<ScopedLockableFactEntry>(
- Cp, Param->getLocation(), FactEntry::Declared);
+ auto *ScopedEntry = FactMan.createFact<ScopedLockableFactEntry>(
+ Cp, Param->getLocation(), FactEntry::Declared,
+ UnderlyingLocks.size());
for (const CapabilityExpr &M : UnderlyingLocks)
ScopedEntry->addLock(M);
- addLock(InitialLockset, std::move(ScopedEntry), true);
+ addLock(InitialLockset, ScopedEntry, true);
}
// FIXME -- Loc can be wrong here.
for (const auto &Mu : ExclusiveLocksToAdd) {
- auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc,
- FactEntry::Declared);
- addLock(InitialLockset, std::move(Entry), true);
+ const auto *Entry = FactMan.createFact<LockableFactEntry>(
+ Mu, LK_Exclusive, Loc, FactEntry::Declared);
+ addLock(InitialLockset, Entry, true);
}
for (const auto &Mu : SharedLocksToAdd) {
- auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc,
- FactEntry::Declared);
- addLock(InitialLockset, std::move(Entry), true);
+ const auto *Entry = FactMan.createFact<LockableFactEntry>(
+ Mu, LK_Shared, Loc, FactEntry::Declared);
+ addLock(InitialLockset, Entry, true);
}
}
@@ -2577,12 +2630,12 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
...
[truncated]
|
|
||
SmallVector<UnderlyingCapability, 2> UnderlyingMutexes; | ||
void addManaged(const CapabilityExpr &M, UnderlyingCapabilityKind UCK) { | ||
assert(ManagedSize < ManagedCapacity); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I feel this should not just be an assert, but a runtime check that also aborts in release builds. This bounds check is too critical, and it looks like the caller getting the capacity right is critical.
Either that, or somehow designing this new API in a way that does not require passing ManagedCapacity (unclear how, perhaps an API that takes a parameter pack of CapabilityExprs to be added, but seems non-trivial when wanting to distinguish Exlusive/Shared/etc.).
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
For a general API, I get your point. But this is used in two places. Also, FactEntry
s are made immutable pretty soon. They're only modified before being added to the FactManager
. So with good test coverage you should notice any discrepancy via tests.
In principle a pack is a nice idea, but the different types of managed capabilities complicate this somewhat. I'm not sure if this would still be readable.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Fair points. At the very least more comments above the ScopedLockableFactEntry::create and addManaged and wherever else needed to make the danger here clear.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
You're right, an explicit comment on the add*
methods makes sense.
Does this have measurable performance impacts? Or was there some other benefit driving the change? |
Thread Safety Analysis is not very expensive overall. I just benchmarked compilation of But the real motivation was conceptual:
|
Accidentally left assertions on. Without it's 277,111,134 versus 276,855,186, but this is also 0.1%, and Thread Safety Analysis is still roughly 1% of the overall compilation time. |
Saving 0.1% in a large build cluster is still a nice saving, so in general, I'm in favour. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM in general, but this needs more comments around the new dangerous internal API.
|
||
SmallVector<UnderlyingCapability, 2> UnderlyingMutexes; | ||
void addManaged(const CapabilityExpr &M, UnderlyingCapabilityKind UCK) { | ||
assert(ManagedSize < ManagedCapacity); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Fair points. At the very least more comments above the ScopedLockableFactEntry::create and addManaged and wherever else needed to make the danger here clear.
The FactManager managing the FactEntrys stays alive for the analysis of a single function. We are already using that by allocating TIL S-expressions via BumpPtrAllocator. We can do the same with FactEntrys. If we allocate the facts in an arena, we won't get destructor calls for them, and they better be trivially destructible. This required replacing the SmallVector member of ScopedLockableFactEntry with TrailingObjects. FactEntrys are now passed around by plain pointer. However, to hide the allocation of TrailingObjects from users, we introduce `create` methods, and this allows us to make the constructors private.
e4715a1
to
1b1d311
Compare
The FactManager managing the FactEntrys stays alive for the analysis of a single function. We are already using that by allocating TIL S-expressions via BumpPtrAllocator. We can do the same with FactEntrys.
If we allocate the facts in an arena, we won't get destructor calls for them, and they better be trivially destructible. This required replacing the SmallVector member of ScopedLockableFactEntry with TrailingObjects.
FactEntrys are now passed around by plain pointer. However, to hide the allocation of TrailingObjects from users, we introduce
create
methods, and this allows us to make the constructors private.