sanitizer_common: add thread safety annotations

Enable clang Thread Safety Analysis for sanitizers:
https://clang.llvm.org/docs/ThreadSafetyAnalysis.html

Thread Safety Analysis can detect inconsistent locking,
deadlocks and data races. Without GUARDED_BY annotations
it has limited value. But this does all the heavy lifting
to enable analysis and allows to add GUARDED_BY incrementally.

Reviewed By: melver

Differential Revision: https://reviews.llvm.org/D105716
This commit is contained in:
Dmitry Vyukov
2021-07-09 19:29:41 +02:00
parent 41ba96f531
commit 0da172b176
23 changed files with 167 additions and 132 deletions

View File

@@ -359,6 +359,14 @@ endif()
append_list_if(COMPILER_RT_DEBUG -DSANITIZER_DEBUG=1 SANITIZER_COMMON_CFLAGS)
if(CMAKE_CXX_COMPILER_ID MATCHES Clang)
list(APPEND SANITIZER_COMMON_CFLAGS
"-Werror=thread-safety"
"-Werror=thread-safety-reference"
"-Werror=thread-safety-beta"
)
endif()
# If we're using MSVC,
# always respect the optimization flags set by CMAKE_BUILD_TYPE instead.
if (NOT MSVC)

View File

@@ -852,12 +852,12 @@ struct Allocator {
quarantine.PrintStats();
}
void ForceLock() {
void ForceLock() ACQUIRE(fallback_mutex) {
allocator.ForceLock();
fallback_mutex.Lock();
}
void ForceUnlock() {
void ForceUnlock() RELEASE(fallback_mutex) {
fallback_mutex.Unlock();
allocator.ForceUnlock();
}
@@ -1081,11 +1081,9 @@ uptr asan_mz_size(const void *ptr) {
return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
}
void asan_mz_force_lock() {
instance.ForceLock();
}
void asan_mz_force_lock() NO_THREAD_SAFETY_ANALYSIS { instance.ForceLock(); }
void asan_mz_force_unlock() {
void asan_mz_force_unlock() NO_THREAD_SAFETY_ANALYSIS {
instance.ForceUnlock();
}

View File

@@ -322,14 +322,14 @@ void InitShadow() {
THREADLOCAL int in_loader;
BlockingMutex shadow_update_lock(LINKER_INITIALIZED);
void EnterLoader() {
void EnterLoader() NO_THREAD_SAFETY_ANALYSIS {
if (in_loader == 0) {
shadow_update_lock.Lock();
}
++in_loader;
}
void ExitLoader() {
void ExitLoader() NO_THREAD_SAFETY_ANALYSIS {
CHECK(in_loader > 0);
--in_loader;
UpdateShadow();

View File

@@ -221,8 +221,8 @@ void UnlockAllocator();
// Returns true if [addr, addr + sizeof(void *)) is poisoned.
bool WordIsPoisoned(uptr addr);
// Wrappers for ThreadRegistry access.
void LockThreadRegistry();
void UnlockThreadRegistry();
void LockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
void UnlockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
ThreadRegistry *GetThreadRegistryLocked();
bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
uptr *tls_begin, uptr *tls_end, uptr *cache_begin,

View File

@@ -736,12 +736,12 @@ struct Allocator {
void PrintStats() { allocator.PrintStats(); }
void ForceLock() {
void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
allocator.ForceLock();
fallback_mutex.Lock();
}
void ForceUnlock() {
void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
fallback_mutex.Unlock();
allocator.ForceUnlock();
}

View File

@@ -185,6 +185,7 @@ set(SANITIZER_IMPL_HEADERS
sanitizer_syscall_linux_riscv64.inc
sanitizer_syscalls_netbsd.inc
sanitizer_thread_registry.h
sanitizer_thread_safety.h
sanitizer_tls_get_addr.h
sanitizer_vector.h
sanitizer_win.h

View File

@@ -162,8 +162,8 @@ AddrHashMap<T, kSize>::AddrHashMap() {
table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), "AddrHashMap");
}
template<typename T, uptr kSize>
void AddrHashMap<T, kSize>::acquire(Handle *h) {
template <typename T, uptr kSize>
void AddrHashMap<T, kSize>::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
uptr addr = h->addr_;
uptr hash = calcHash(addr);
Bucket *b = &table_[hash];
@@ -289,57 +289,57 @@ void AddrHashMap<T, kSize>::acquire(Handle *h) {
CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), 0);
h->addidx_ = i;
h->cell_ = c;
}
}
template<typename T, uptr kSize>
void AddrHashMap<T, kSize>::release(Handle *h) {
if (!h->cell_)
return;
Bucket *b = h->bucket_;
Cell *c = h->cell_;
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
if (h->created_) {
// Denote completion of insertion.
CHECK_EQ(addr1, 0);
// After the following store, the element becomes available
// for lock-free reads.
atomic_store(&c->addr, h->addr_, memory_order_release);
b->mtx.Unlock();
} else if (h->remove_) {
// Denote that the cell is empty now.
CHECK_EQ(addr1, h->addr_);
atomic_store(&c->addr, 0, memory_order_release);
// See if we need to compact the bucket.
AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
if (h->addidx_ == -1U) {
// Removed from embed array, move an add element into the freed cell.
if (add && add->size != 0) {
uptr last = --add->size;
Cell *c1 = &add->cells[last];
c->val = c1->val;
uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
atomic_store(&c->addr, addr1, memory_order_release);
atomic_store(&c1->addr, 0, memory_order_release);
}
} else {
// Removed from add array, compact it.
uptr last = --add->size;
Cell *c1 = &add->cells[last];
if (c != c1) {
*c = *c1;
atomic_store(&c1->addr, 0, memory_order_relaxed);
}
}
if (add && add->size == 0) {
// FIXME(dvyukov): free add?
}
b->mtx.Unlock();
} else {
CHECK_EQ(addr1, h->addr_);
if (h->addidx_ != -1U)
b->mtx.ReadUnlock();
}
}
template <typename T, uptr kSize>
void AddrHashMap<T, kSize>::release(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
if (!h->cell_)
return;
Bucket *b = h->bucket_;
Cell *c = h->cell_;
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
if (h->created_) {
// Denote completion of insertion.
CHECK_EQ(addr1, 0);
// After the following store, the element becomes available
// for lock-free reads.
atomic_store(&c->addr, h->addr_, memory_order_release);
b->mtx.Unlock();
} else if (h->remove_) {
// Denote that the cell is empty now.
CHECK_EQ(addr1, h->addr_);
atomic_store(&c->addr, 0, memory_order_release);
// See if we need to compact the bucket.
AddBucket *add = (AddBucket *)atomic_load(&b->add, memory_order_relaxed);
if (h->addidx_ == -1U) {
// Removed from embed array, move an add element into the freed cell.
if (add && add->size != 0) {
uptr last = --add->size;
Cell *c1 = &add->cells[last];
c->val = c1->val;
uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
atomic_store(&c->addr, addr1, memory_order_release);
atomic_store(&c1->addr, 0, memory_order_release);
}
} else {
// Removed from add array, compact it.
uptr last = --add->size;
Cell *c1 = &add->cells[last];
if (c != c1) {
*c = *c1;
atomic_store(&c1->addr, 0, memory_order_relaxed);
}
}
if (add && add->size == 0) {
// FIXME(dvyukov): free add?
}
b->mtx.Unlock();
} else {
CHECK_EQ(addr1, h->addr_);
if (h->addidx_ != -1U)
b->mtx.ReadUnlock();
}
}
template<typename T, uptr kSize>
uptr AddrHashMap<T, kSize>::calcHash(uptr addr) {

View File

@@ -177,12 +177,12 @@ class CombinedAllocator {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
void ForceLock() {
void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
primary_.ForceLock();
secondary_.ForceLock();
}
void ForceUnlock() {
void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
secondary_.ForceUnlock();
primary_.ForceUnlock();
}

View File

@@ -312,13 +312,13 @@ class SizeClassAllocator64 {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
void ForceLock() {
void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
for (uptr i = 0; i < kNumClasses; i++) {
GetRegionInfo(i)->mutex.Lock();
}
}
void ForceUnlock() {
void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
for (int i = (int)kNumClasses - 1; i >= 0; i--) {
GetRegionInfo(i)->mutex.Unlock();
}

View File

@@ -267,13 +267,9 @@ class LargeMmapAllocator {
// ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
// introspection API.
void ForceLock() {
mutex_.Lock();
}
void ForceLock() ACQUIRE(mutex_) { mutex_.Lock(); }
void ForceUnlock() {
mutex_.Unlock();
}
void ForceUnlock() RELEASE(mutex_) { mutex_.Unlock(); }
// Iterate over all existing chunks.
// The allocator must be locked when calling this function.

View File

@@ -136,8 +136,8 @@ void BlockingMutex::Unlock() {
}
}
void BlockingMutex::CheckLocked() {
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
void BlockingMutex::CheckLocked() const {
auto m = reinterpret_cast<atomic_uint32_t const *>(&opaque_storage_);
CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
}

View File

@@ -677,11 +677,11 @@ void BlockingMutex::Unlock() {
}
}
void BlockingMutex::CheckLocked() {
atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
void BlockingMutex::CheckLocked() const {
auto m = reinterpret_cast<atomic_uint32_t const *>(&opaque_storage_);
CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
}
#endif // !SANITIZER_SOLARIS
# endif // !SANITIZER_SOLARIS
// ----------------- sanitizer_linux.h
// The actual size of this structure is specified by d_reclen.

View File

@@ -524,7 +524,7 @@ void BlockingMutex::Unlock() {
OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
}
void BlockingMutex::CheckLocked() {
void BlockingMutex::CheckLocked() const {
CHECK_NE(*(OSSpinLock*)&opaque_storage_, 0);
}

View File

@@ -16,30 +16,29 @@
#include "sanitizer_atomic.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_thread_safety.h"
namespace __sanitizer {
class StaticSpinMutex {
class MUTEX StaticSpinMutex {
public:
void Init() {
atomic_store(&state_, 0, memory_order_relaxed);
}
void Lock() {
void Lock() ACQUIRE() {
if (TryLock())
return;
LockSlow();
}
bool TryLock() {
bool TryLock() TRY_ACQUIRE(true) {
return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
}
void Unlock() {
atomic_store(&state_, 0, memory_order_release);
}
void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); }
void CheckLocked() {
void CheckLocked() const CHECK_LOCKED {
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
}
@@ -59,7 +58,7 @@ class StaticSpinMutex {
}
};
class SpinMutex : public StaticSpinMutex {
class MUTEX SpinMutex : public StaticSpinMutex {
public:
SpinMutex() {
Init();
@@ -70,13 +69,13 @@ class SpinMutex : public StaticSpinMutex {
void operator=(const SpinMutex &) = delete;
};
class BlockingMutex {
class MUTEX BlockingMutex {
public:
explicit constexpr BlockingMutex(LinkerInitialized)
: opaque_storage_ {0, }, owner_ {0} {}
BlockingMutex();
void Lock();
void Unlock();
void Lock() ACQUIRE();
void Unlock() RELEASE();
// This function does not guarantee an explicit check that the calling thread
// is the thread which owns the mutex. This behavior, while more strictly
@@ -85,7 +84,7 @@ class BlockingMutex {
// maintaining complex state to work around those situations, the check only
// checks that the mutex is owned, and assumes callers to be generally
// well-behaved.
void CheckLocked();
void CheckLocked() const CHECK_LOCKED;
private:
// Solaris mutex_t has a member that requires 64-bit alignment.
@@ -94,7 +93,7 @@ class BlockingMutex {
};
// Reader-writer spin mutex.
class RWMutex {
class MUTEX RWMutex {
public:
RWMutex() {
atomic_store(&state_, kUnlocked, memory_order_relaxed);
@@ -104,7 +103,7 @@ class RWMutex {
CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
}
void Lock() {
void Lock() ACQUIRE() {
u32 cmp = kUnlocked;
if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
memory_order_acquire))
@@ -112,27 +111,27 @@ class RWMutex {
LockSlow();
}
void Unlock() {
void Unlock() RELEASE() {
u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
DCHECK_NE(prev & kWriteLock, 0);
(void)prev;
}
void ReadLock() {
void ReadLock() ACQUIRE_SHARED() {
u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
if ((prev & kWriteLock) == 0)
return;
ReadLockSlow();
}
void ReadUnlock() {
void ReadUnlock() RELEASE_SHARED() {
u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
DCHECK_EQ(prev & kWriteLock, 0);
DCHECK_GT(prev & ~kWriteLock, 0);
(void)prev;
}
void CheckLocked() {
void CheckLocked() const CHECK_LOCKED {
CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
}
@@ -175,17 +174,14 @@ class RWMutex {
void operator=(const RWMutex &) = delete;
};
template<typename MutexType>
class GenericScopedLock {
template <typename MutexType>
class SCOPED_LOCK GenericScopedLock {
public:
explicit GenericScopedLock(MutexType *mu)
: mu_(mu) {
explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
mu_->Lock();
}
~GenericScopedLock() {
mu_->Unlock();
}
~GenericScopedLock() RELEASE() { mu_->Unlock(); }
private:
MutexType *mu_;
@@ -194,17 +190,14 @@ class GenericScopedLock {
void operator=(const GenericScopedLock &) = delete;
};
template<typename MutexType>
class GenericScopedReadLock {
template <typename MutexType>
class SCOPED_LOCK GenericScopedReadLock {
public:
explicit GenericScopedReadLock(MutexType *mu)
: mu_(mu) {
explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
mu_->ReadLock();
}
~GenericScopedReadLock() {
mu_->ReadUnlock();
}
~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); }
private:
MutexType *mu_;

View File

@@ -149,7 +149,8 @@ class Quarantine {
Cache cache_;
char pad2_[kCacheLineSize];
void NOINLINE Recycle(uptr min_size, Callback cb) {
void NOINLINE Recycle(uptr min_size, Callback cb) REQUIRES(recycle_mutex_)
RELEASE(recycle_mutex_) {
Cache tmp;
{
SpinMutexLock l(&cache_mutex_);

View File

@@ -231,9 +231,7 @@ void BlockingMutex::Unlock() {
CHECK_EQ(mutex_unlock((mutex_t *)&opaque_storage_), 0);
}
void BlockingMutex::CheckLocked() {
CHECK_EQ((uptr)thr_self(), owner_);
}
void BlockingMutex::CheckLocked() const { CHECK_EQ((uptr)thr_self(), owner_); }
} // namespace __sanitizer

View File

@@ -85,7 +85,7 @@ class ThreadContextBase {
typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid);
class ThreadRegistry {
class MUTEX ThreadRegistry {
public:
ThreadRegistry(ThreadContextFactory factory);
ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
@@ -94,9 +94,9 @@ class ThreadRegistry {
uptr *alive = nullptr);
uptr GetMaxAliveThreads();
void Lock() { mtx_.Lock(); }
void CheckLocked() { mtx_.CheckLocked(); }
void Unlock() { mtx_.Unlock(); }
void Lock() ACQUIRE() { mtx_.Lock(); }
void CheckLocked() const CHECK_LOCKED { mtx_.CheckLocked(); }
void Unlock() RELEASE() { mtx_.Unlock(); }
// Should be guarded by ThreadRegistryLock.
ThreadContextBase *GetThreadLocked(u32 tid) {

View File

@@ -0,0 +1,42 @@
//===-- sanitizer_thread_safety.h -------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is shared between sanitizer tools.
//
// Wrappers around thread safety annotations.
// https://clang.llvm.org/docs/ThreadSafetyAnalysis.html
//===----------------------------------------------------------------------===//
#ifndef SANITIZER_THREAD_SAFETY_H
#define SANITIZER_THREAD_SAFETY_H
#if defined(__clang__)
# define THREAD_ANNOTATION(x) __attribute__((x))
#else
# define THREAD_ANNOTATION(x)
#endif
#define MUTEX THREAD_ANNOTATION(capability("mutex"))
#define SCOPED_LOCK THREAD_ANNOTATION(scoped_lockable)
#define GUARDED_BY(x) THREAD_ANNOTATION(guarded_by(x))
#define PT_GUARDED_BY(x) THREAD_ANNOTATION(pt_guarded_by(x))
#define REQUIRES(...) THREAD_ANNOTATION(requires_capability(__VA_ARGS__))
#define REQUIRES_SHARED(...) \
THREAD_ANNOTATION(requires_shared_capability(__VA_ARGS__))
#define ACQUIRE(...) THREAD_ANNOTATION(acquire_capability(__VA_ARGS__))
#define ACQUIRE_SHARED(...) \
THREAD_ANNOTATION(acquire_shared_capability(__VA_ARGS__))
#define TRY_ACQUIRE(...) THREAD_ANNOTATION(try_acquire_capability(__VA_ARGS__))
#define RELEASE(...) THREAD_ANNOTATION(release_capability(__VA_ARGS__))
#define RELEASE_SHARED(...) \
THREAD_ANNOTATION(release_shared_capability(__VA_ARGS__))
#define EXCLUDES(...) THREAD_ANNOTATION(locks_excluded(__VA_ARGS__))
#define CHECK_LOCKED THREAD_ANNOTATION(assert_capability(this))
#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION(no_thread_safety_analysis)
#endif

View File

@@ -832,9 +832,7 @@ void BlockingMutex::Unlock() {
ReleaseSRWLockExclusive((PSRWLOCK)opaque_storage_);
}
void BlockingMutex::CheckLocked() {
CHECK_EQ(owner_, GetThreadSelf());
}
void BlockingMutex::CheckLocked() const { CHECK_EQ(owner_, GetThreadSelf()); }
uptr GetTlsSize() {
return 0;

View File

@@ -300,7 +300,7 @@ struct Allocator {
// Allocates a chunk.
void *allocate(uptr Size, uptr Alignment, AllocType Type,
bool ForceZeroContents = false) {
bool ForceZeroContents = false) NO_THREAD_SAFETY_ANALYSIS {
initThreadMaybe();
if (UNLIKELY(Alignment > MaxAlignment)) {
@@ -405,7 +405,7 @@ struct Allocator {
// a zero-sized quarantine, or if the size of the chunk is greater than the
// quarantine chunk size threshold.
void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header,
uptr Size) {
uptr Size) NO_THREAD_SAFETY_ANALYSIS {
const bool BypassQuarantine = !Size || (Size > QuarantineChunksUpToSize);
if (BypassQuarantine) {
UnpackedHeader NewHeader = *Header;

View File

@@ -29,7 +29,7 @@ struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
void init();
void commitBack();
inline bool tryLock() {
inline bool tryLock() TRY_ACQUIRE(true, Mutex) {
if (Mutex.TryLock()) {
atomic_store_relaxed(&Precedence, 0);
return true;
@@ -40,12 +40,12 @@ struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
return false;
}
inline void lock() {
inline void lock() ACQUIRE(Mutex) {
atomic_store_relaxed(&Precedence, 0);
Mutex.Lock();
}
inline void unlock() { Mutex.Unlock(); }
inline void unlock() RELEASE(Mutex) { Mutex.Unlock(); }
inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }

View File

@@ -34,7 +34,7 @@ ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
initThread(MinimalInit);
}
ALWAYS_INLINE ScudoTSD *getTSDAndLock(bool *UnlockRequired) {
ALWAYS_INLINE ScudoTSD *getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
if (UNLIKELY(ScudoThreadState != ThreadInitialized)) {
FallbackTSD.lock();
*UnlockRequired = true;

View File

@@ -531,7 +531,7 @@ int Finalize(ThreadState *thr) {
}
#if !SANITIZER_GO
void ForkBefore(ThreadState *thr, uptr pc) {
void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
ctx->thread_registry->Lock();
ctx->report_mtx.Lock();
// Suppress all reports in the pthread_atfork callbacks.
@@ -545,14 +545,14 @@ void ForkBefore(ThreadState *thr, uptr pc) {
thr->ignore_interceptors++;
}
void ForkParentAfter(ThreadState *thr, uptr pc) {
void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
thr->suppress_reports--; // Enabled in ForkBefore.
thr->ignore_interceptors--;
ctx->report_mtx.Unlock();
ctx->thread_registry->Unlock();
}
void ForkChildAfter(ThreadState *thr, uptr pc) {
void ForkChildAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
thr->suppress_reports--; // Enabled in ForkBefore.
thr->ignore_interceptors--;
ctx->report_mtx.Unlock();