TestCases/Misc/Linux/sigaction.cpp fails because dlsym() may call malloc on failure. And then the wrapped malloc appears to access thread local storage using global dynamic accesses, thus calling ___interceptor___tls_get_addr, before REAL(__tls_get_addr) has been set, so we get a crash inside ___interceptor___tls_get_addr. For example, this can happen when looking up __isoc23_scanf which might not exist in some libcs. Fix this by marking the thread local variable accessed inside the debug checks as "initial-exec", which does not require __tls_get_addr. This is probably a better alternative to https://github.com/llvm/llvm-project/pull/83886. This fixes a different crash but is related to https://github.com/llvm/llvm-project/issues/46204. Backtrace: ``` #0 0x0000000000000000 in ?? () #1 0x00007ffff6a9d89e in ___interceptor___tls_get_addr (arg=0x7ffff6b27be8) at /path/to/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:2759 #2 0x00007ffff6a46bc6 in __sanitizer::CheckedMutex::LockImpl (this=0x7ffff6b27be8, pc=140737331846066) at /path/to/llvm/compiler-rt/lib/sanitizer_common/sanitizer_mutex.cpp:218 #3 0x00007ffff6a448b2 in __sanitizer::CheckedMutex::Lock (this=0x7ffff6b27be8, this@entry=0x730000000580) at /path/to/llvm/compiler-rt/lib/tsan/rtl/../../sanitizer_common/sanitizer_mutex.h:129 #4 __sanitizer::Mutex::Lock (this=0x7ffff6b27be8, this@entry=0x730000000580) at /path/to/llvm/compiler-rt/lib/tsan/rtl/../../sanitizer_common/sanitizer_mutex.h:167 #5 0x00007ffff6abdbb2 in __sanitizer::GenericScopedLock<__sanitizer::Mutex>::GenericScopedLock (mu=0x730000000580, this=<optimized out>) at /path/to/llvm/compiler-rt/lib/tsan/rtl/../../sanitizer_common/sanitizer_mutex.h:383 #6 __sanitizer::SizeClassAllocator64<__tsan::AP64>::GetFromAllocator (this=0x7ffff7487dc0 <__tsan::allocator_placeholder>, stat=stat@entry=0x7ffff570db68, class_id=11, chunks=chunks@entry=0x7ffff5702cc8, n_chunks=n_chunks@entry=128) at /path/to/llvm/compiler-rt/lib/tsan/rtl/../../sanitizer_common/sanitizer_allocator_primary64.h:207 #7 0x00007ffff6abdaa0 in __sanitizer::SizeClassAllocator64LocalCache<__sanitizer::SizeClassAllocator64<__tsan::AP64> >::Refill (this=<optimized out>, c=c@entry=0x7ffff5702cb8, allocator=<optimized out>, class_id=<optimized out>) at /path/to/llvm/compiler-rt/lib/tsan/rtl/../../sanitizer_common/sanitizer_allocator_local_cache.h:103 #8 0x00007ffff6abd731 in __sanitizer::SizeClassAllocator64LocalCache<__sanitizer::SizeClassAllocator64<__tsan::AP64> >::Allocate (this=0x7ffff6b27be8, allocator=0x7ffff5702cc8, class_id=140737311157448) at /path/to/llvm/compiler-rt/lib/tsan/rtl/../../sanitizer_common/sanitizer_allocator_local_cache.h:39 #9 0x00007ffff6abc397 in __sanitizer::CombinedAllocator<__sanitizer::SizeClassAllocator64<__tsan::AP64>, __sanitizer::LargeMmapAllocatorPtrArrayDynamic>::Allocate (this=0x7ffff5702cc8, cache=0x7ffff6b27be8, size=<optimized out>, size@entry=175, alignment=alignment@entry=16) at /path/to/llvm/compiler-rt/lib/tsan/rtl/../../sanitizer_common/sanitizer_allocator_combined.h:69 #10 0x00007ffff6abaa6a in __tsan::user_alloc_internal (thr=0x7ffff7ebd980, pc=140737331499943, sz=sz@entry=175, align=align@entry=16, signal=true) at /path/to/llvm/compiler-rt/lib/tsan/rtl/tsan_mman.cpp:198 #11 0x00007ffff6abb0d1 in __tsan::user_alloc (thr=0x7ffff6b27be8, pc=140737331846066, sz=11, sz@entry=175) at /path/to/llvm/compiler-rt/lib/tsan/rtl/tsan_mman.cpp:223 #12 0x00007ffff6a693b5 in ___interceptor_malloc (size=175) at /path/to/llvm/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp:666 #13 0x00007ffff7fce7f2 in malloc (size=175) at ../include/rtld-malloc.h:56 #14 __GI__dl_exception_create_format (exception=exception@entry=0x7fffffffd0d0, objname=0x7ffff7fc3550 "/path/to/llvm/compiler-rt/cmake-build-all-sanitizers/lib/linux/libclang_rt.tsan-x86_64.so", fmt=fmt@entry=0x7ffff7ff2db9 "undefined symbol: %s%s%s") at ./elf/dl-exception.c:157 #15 0x00007ffff7fd50e8 in _dl_lookup_symbol_x (undef_name=0x7ffff6af868b "__isoc23_scanf", undef_map=<optimized out>, ref=0x7fffffffd148, symbol_scope=<optimized out>, version=<optimized out>, type_class=0, flags=2, skip_map=0x7ffff7fc35e0) at ./elf/dl-lookup.c:793 --Type <RET> for more, q to quit, c to continue without paging-- #16 0x00007ffff656d6ed in do_sym (handle=<optimized out>, name=0x7ffff6af868b "__isoc23_scanf", who=0x7ffff6a3bb84 <__interception::InterceptFunction(char const*, unsigned long*, unsigned long, unsigned long)+36>, vers=vers@entry=0x0, flags=flags@entry=2) at ./elf/dl-sym.c:146 #17 0x00007ffff656d9dd in _dl_sym (handle=<optimized out>, name=<optimized out>, who=<optimized out>) at ./elf/dl-sym.c:195 #18 0x00007ffff64a2854 in dlsym_doit (a=a@entry=0x7fffffffd3b0) at ./dlfcn/dlsym.c:40 #19 0x00007ffff7fcc489 in __GI__dl_catch_exception (exception=exception@entry=0x7fffffffd310, operate=0x7ffff64a2840 <dlsym_doit>, args=0x7fffffffd3b0) at ./elf/dl-catch.c:237 #20 0x00007ffff7fcc5af in _dl_catch_error (objname=0x7fffffffd368, errstring=0x7fffffffd370, mallocedp=0x7fffffffd367, operate=<optimized out>, args=<optimized out>) at ./elf/dl-catch.c:256 #21 0x00007ffff64a2257 in _dlerror_run (operate=operate@entry=0x7ffff64a2840 <dlsym_doit>, args=args@entry=0x7fffffffd3b0) at ./dlfcn/dlerror.c:138 #22 0x00007ffff64a28e5 in dlsym_implementation (dl_caller=<optimized out>, name=<optimized out>, handle=<optimized out>) at ./dlfcn/dlsym.c:54 #23 ___dlsym (handle=<optimized out>, name=<optimized out>) at ./dlfcn/dlsym.c:68 #24 0x00007ffff6a3bb84 in __interception::GetFuncAddr (name=0x7ffff6af868b "__isoc23_scanf", trampoline=140737311157448) at /path/to/llvm/compiler-rt/lib/interception/interception_linux.cpp:42 #25 __interception::InterceptFunction (name=0x7ffff6af868b "__isoc23_scanf", ptr_to_real=0x7ffff74850e8 <__interception::real___isoc23_scanf>, func=11, trampoline=140737311157448) at /path/to/llvm/compiler-rt/lib/interception/interception_linux.cpp:61 #26 0x00007ffff6a9f2d9 in InitializeCommonInterceptors () at /path/to/llvm/compiler-rt/lib/tsan/rtl/../../sanitizer_common/sanitizer_common_interceptors.inc:10315 ``` Reviewed By: vitalybuka, MaskRay Pull Request: https://github.com/llvm/llvm-project/pull/83890
228 lines
7.1 KiB
C++
228 lines
7.1 KiB
C++
//===-- sanitizer_mutex.cpp -----------------------------------------------===//
|
|
//
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
//
|
|
//===----------------------------------------------------------------------===//
|
|
//
|
|
// This file is shared between AddressSanitizer and ThreadSanitizer
|
|
// run-time libraries.
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
#include "sanitizer_mutex.h"
|
|
|
|
#include "sanitizer_common.h"
|
|
|
|
namespace __sanitizer {
|
|
|
|
void StaticSpinMutex::LockSlow() {
|
|
for (int i = 0;; i++) {
|
|
if (i < 100)
|
|
proc_yield(1);
|
|
else
|
|
internal_sched_yield();
|
|
if (atomic_load(&state_, memory_order_relaxed) == 0 &&
|
|
atomic_exchange(&state_, 1, memory_order_acquire) == 0)
|
|
return;
|
|
}
|
|
}
|
|
|
|
void Semaphore::Wait() {
|
|
u32 count = atomic_load(&state_, memory_order_relaxed);
|
|
for (;;) {
|
|
if (count == 0) {
|
|
FutexWait(&state_, 0);
|
|
count = atomic_load(&state_, memory_order_relaxed);
|
|
continue;
|
|
}
|
|
if (atomic_compare_exchange_weak(&state_, &count, count - 1,
|
|
memory_order_acquire))
|
|
break;
|
|
}
|
|
}
|
|
|
|
void Semaphore::Post(u32 count) {
|
|
CHECK_NE(count, 0);
|
|
atomic_fetch_add(&state_, count, memory_order_release);
|
|
FutexWake(&state_, count);
|
|
}
|
|
|
|
#if SANITIZER_CHECK_DEADLOCKS
|
|
// An empty mutex meta table, it effectively disables deadlock detection.
|
|
// Each tool can override the table to define own mutex hierarchy and
|
|
// enable deadlock detection.
|
|
// The table defines a static mutex type hierarchy (what mutex types can be locked
|
|
// under what mutex types). This table is checked to be acyclic and then
|
|
// actual mutex lock/unlock operations are checked to adhere to this hierarchy.
|
|
// The checking happens on mutex types rather than on individual mutex instances
|
|
// because doing it on mutex instances will both significantly complicate
|
|
// the implementation, worsen performance and memory overhead and is mostly
|
|
// unnecessary (we almost never lock multiple mutexes of the same type recursively).
|
|
static constexpr int kMutexTypeMax = 20;
|
|
SANITIZER_WEAK_ATTRIBUTE MutexMeta mutex_meta[kMutexTypeMax] = {};
|
|
SANITIZER_WEAK_ATTRIBUTE void PrintMutexPC(uptr pc) {}
|
|
static StaticSpinMutex mutex_meta_mtx;
|
|
static int mutex_type_count = -1;
|
|
// Adjacency matrix of what mutexes can be locked under what mutexes.
|
|
static bool mutex_can_lock[kMutexTypeMax][kMutexTypeMax];
|
|
// Mutex types with MutexMulti mark.
|
|
static bool mutex_multi[kMutexTypeMax];
|
|
|
|
void DebugMutexInit() {
|
|
// Build adjacency matrix.
|
|
bool leaf[kMutexTypeMax];
|
|
internal_memset(&leaf, 0, sizeof(leaf));
|
|
int cnt[kMutexTypeMax];
|
|
internal_memset(&cnt, 0, sizeof(cnt));
|
|
for (int t = 0; t < kMutexTypeMax; t++) {
|
|
mutex_type_count = t;
|
|
if (!mutex_meta[t].name)
|
|
break;
|
|
CHECK_EQ(t, mutex_meta[t].type);
|
|
for (uptr j = 0; j < ARRAY_SIZE(mutex_meta[t].can_lock); j++) {
|
|
MutexType z = mutex_meta[t].can_lock[j];
|
|
if (z == MutexInvalid)
|
|
break;
|
|
if (z == MutexLeaf) {
|
|
CHECK(!leaf[t]);
|
|
leaf[t] = true;
|
|
continue;
|
|
}
|
|
if (z == MutexMulti) {
|
|
mutex_multi[t] = true;
|
|
continue;
|
|
}
|
|
CHECK_LT(z, kMutexTypeMax);
|
|
CHECK(!mutex_can_lock[t][z]);
|
|
mutex_can_lock[t][z] = true;
|
|
cnt[t]++;
|
|
}
|
|
}
|
|
// Indicates the array is not properly terminated.
|
|
CHECK_LT(mutex_type_count, kMutexTypeMax);
|
|
// Add leaf mutexes.
|
|
for (int t = 0; t < mutex_type_count; t++) {
|
|
if (!leaf[t])
|
|
continue;
|
|
CHECK_EQ(cnt[t], 0);
|
|
for (int z = 0; z < mutex_type_count; z++) {
|
|
if (z == MutexInvalid || t == z || leaf[z])
|
|
continue;
|
|
CHECK(!mutex_can_lock[z][t]);
|
|
mutex_can_lock[z][t] = true;
|
|
}
|
|
}
|
|
// Build the transitive closure and check that the graphs is acyclic.
|
|
u32 trans[kMutexTypeMax];
|
|
static_assert(sizeof(trans[0]) * 8 >= kMutexTypeMax,
|
|
"kMutexTypeMax does not fit into u32, switch to u64");
|
|
internal_memset(&trans, 0, sizeof(trans));
|
|
for (int i = 0; i < mutex_type_count; i++) {
|
|
for (int j = 0; j < mutex_type_count; j++)
|
|
if (mutex_can_lock[i][j])
|
|
trans[i] |= 1 << j;
|
|
}
|
|
for (int k = 0; k < mutex_type_count; k++) {
|
|
for (int i = 0; i < mutex_type_count; i++) {
|
|
if (trans[i] & (1 << k))
|
|
trans[i] |= trans[k];
|
|
}
|
|
}
|
|
for (int i = 0; i < mutex_type_count; i++) {
|
|
if (trans[i] & (1 << i)) {
|
|
Printf("Mutex %s participates in a cycle\n", mutex_meta[i].name);
|
|
Die();
|
|
}
|
|
}
|
|
}
|
|
|
|
struct InternalDeadlockDetector {
|
|
struct LockDesc {
|
|
u64 seq;
|
|
uptr pc;
|
|
int recursion;
|
|
};
|
|
int initialized;
|
|
u64 sequence;
|
|
LockDesc locked[kMutexTypeMax];
|
|
|
|
void Lock(MutexType type, uptr pc) {
|
|
if (!Initialize(type))
|
|
return;
|
|
CHECK_LT(type, mutex_type_count);
|
|
// Find the last locked mutex type.
|
|
// This is the type we will use for hierarchy checks.
|
|
u64 max_seq = 0;
|
|
MutexType max_idx = MutexInvalid;
|
|
for (int i = 0; i != mutex_type_count; i++) {
|
|
if (locked[i].seq == 0)
|
|
continue;
|
|
CHECK_NE(locked[i].seq, max_seq);
|
|
if (max_seq < locked[i].seq) {
|
|
max_seq = locked[i].seq;
|
|
max_idx = (MutexType)i;
|
|
}
|
|
}
|
|
if (max_idx == type && mutex_multi[type]) {
|
|
// Recursive lock of the same type.
|
|
CHECK_EQ(locked[type].seq, max_seq);
|
|
CHECK(locked[type].pc);
|
|
locked[type].recursion++;
|
|
return;
|
|
}
|
|
if (max_idx != MutexInvalid && !mutex_can_lock[max_idx][type]) {
|
|
Printf("%s: internal deadlock: can't lock %s under %s mutex\n", SanitizerToolName,
|
|
mutex_meta[type].name, mutex_meta[max_idx].name);
|
|
PrintMutexPC(locked[max_idx].pc);
|
|
CHECK(0);
|
|
}
|
|
locked[type].seq = ++sequence;
|
|
locked[type].pc = pc;
|
|
locked[type].recursion = 1;
|
|
}
|
|
|
|
void Unlock(MutexType type) {
|
|
if (!Initialize(type))
|
|
return;
|
|
CHECK_LT(type, mutex_type_count);
|
|
CHECK(locked[type].seq);
|
|
CHECK_GT(locked[type].recursion, 0);
|
|
if (--locked[type].recursion)
|
|
return;
|
|
locked[type].seq = 0;
|
|
locked[type].pc = 0;
|
|
}
|
|
|
|
void CheckNoLocks() {
|
|
for (int i = 0; i < mutex_type_count; i++) CHECK_EQ(locked[i].recursion, 0);
|
|
}
|
|
|
|
bool Initialize(MutexType type) {
|
|
if (type == MutexUnchecked || type == MutexInvalid)
|
|
return false;
|
|
CHECK_GT(type, MutexInvalid);
|
|
if (initialized != 0)
|
|
return initialized > 0;
|
|
initialized = -1;
|
|
SpinMutexLock lock(&mutex_meta_mtx);
|
|
if (mutex_type_count < 0)
|
|
DebugMutexInit();
|
|
initialized = mutex_type_count ? 1 : -1;
|
|
return initialized > 0;
|
|
}
|
|
};
|
|
// This variable is used by the __tls_get_addr interceptor, so cannot use the
|
|
// global-dynamic TLS model, as that would result in crashes.
|
|
__attribute__((tls_model("initial-exec"))) static THREADLOCAL
|
|
InternalDeadlockDetector deadlock_detector;
|
|
|
|
void CheckedMutex::LockImpl(uptr pc) { deadlock_detector.Lock(type_, pc); }
|
|
|
|
void CheckedMutex::UnlockImpl() { deadlock_detector.Unlock(type_); }
|
|
|
|
void CheckedMutex::CheckNoLocksImpl() { deadlock_detector.CheckNoLocks(); }
|
|
#endif
|
|
|
|
} // namespace __sanitizer
|