diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp index 795e05394d71a..860e23630558d 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp @@ -22,6 +22,7 @@ #include "sanitizer_common/sanitizer_internal_defs.h" #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_linux.h" +#include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_platform_interceptors.h" #include "sanitizer_common/sanitizer_platform_limits_netbsd.h" #include "sanitizer_common/sanitizer_platform_limits_posix.h" @@ -2141,13 +2142,29 @@ static void ReportErrnoSpoiling(ThreadState *thr, uptr pc, int sig) { // StackTrace::GetNestInstructionPc(pc) is used because return address is // expected, OutputReport() will undo this. ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack); - ThreadRegistryLock l(&ctx->thread_registry); - ScopedReport rep(ReportTypeErrnoInSignal); - rep.SetSigNum(sig); - if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) { - rep.AddStack(stack, true); - OutputReport(thr, rep); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + bool suppressed; + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(ReportTypeErrnoInSignal); + rep->SetSigNum(sig); + suppressed = IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack); + if (!suppressed) + rep->AddStack(stack, true); +#if SANITIZER_APPLE + } // Close this scope to release the locks before writing report +#endif + if (!suppressed) + OutputReport(thr, *rep); +#if !SANITIZER_APPLE } +#endif + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); } static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire, diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp index befd6a369026d..9ec34506a562b 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp @@ -437,16 +437,29 @@ void __tsan_mutex_post_divert(void *addr, unsigned flagz) { } static void ReportMutexHeldWrongContext(ThreadState *thr, uptr pc) { - ThreadRegistryLock l(&ctx->thread_registry); - ScopedReport rep(ReportTypeMutexHeldWrongContext); - for (uptr i = 0; i < thr->mset.Size(); ++i) { - MutexSet::Desc desc = thr->mset.Get(i); - rep.AddMutex(desc.addr, desc.stack_id); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(ReportTypeMutexHeldWrongContext); + for (uptr i = 0; i < thr->mset.Size(); ++i) { + MutexSet::Desc desc = thr->mset.Get(i); + rep->AddMutex(desc.addr, desc.stack_id); + } + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); + rep->AddStack(trace, true); +#if SANITIZER_APPLE + } // Close this scope to release the locks + OutputReport(thr, *rep); +#else + OutputReport(thr, *rep); } - VarSizeStackTrace trace; - ObtainCurrentStack(thr, pc, &trace); - rep.AddStack(trace, true); - OutputReport(thr, rep); +#endif + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); } INTERFACE_ATTRIBUTE diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp index 0ea83fb3b5982..37faacb56d7ff 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp @@ -182,10 +182,23 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) { ObtainCurrentStack(thr, pc, &stack); if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack)) return; - ThreadRegistryLock l(&ctx->thread_registry); - ScopedReport rep(ReportTypeSignalUnsafe); - rep.AddStack(stack, true); - OutputReport(thr, rep); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(ReportTypeSignalUnsafe); + rep->AddStack(stack, true); +#if SANITIZER_APPLE + } // Close this scope to release the locks + OutputReport(thr, *rep); +#else + OutputReport(thr, *rep); + } +#endif + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); } diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp index 487fa490636eb..02719beac9f07 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp @@ -419,6 +419,11 @@ NOINLINE void TraceRestartMemoryAccess(ThreadState* thr, uptr pc, uptr addr, ALWAYS_INLINE USED void MemoryAccess(ThreadState* thr, uptr pc, uptr addr, uptr size, AccessType typ) { +#if SANITIZER_APPLE && !SANITIZER_GO + // Swift symbolizer can be intercepted and deadlock without this + if (thr->in_symbolizer) + return; +#endif RawShadow* shadow_mem = MemToShadow(addr); UNUSED char memBuf[4][64]; DPrintf2("#%d: Access: %d@%d %p/%zd typ=0x%x {%s, %s, %s, %s}\n", thr->tid, diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp index 2a2bf42c92ecb..70a8cc8ce3674 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp @@ -11,14 +11,15 @@ //===----------------------------------------------------------------------===// #include +#include #include -#include "tsan_rtl.h" #include "tsan_flags.h" -#include "tsan_sync.h" +#include "tsan_platform.h" #include "tsan_report.h" +#include "tsan_rtl.h" #include "tsan_symbolize.h" -#include "tsan_platform.h" +#include "tsan_sync.h" namespace __tsan { @@ -55,14 +56,27 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ, return; if (!ShouldReport(thr, typ)) return; - ThreadRegistryLock l(&ctx->thread_registry); - ScopedReport rep(typ); - rep.AddMutex(addr, creation_stack_id); - VarSizeStackTrace trace; - ObtainCurrentStack(thr, pc, &trace); - rep.AddStack(trace, true); - rep.AddLocation(addr, 1); - OutputReport(thr, rep); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(typ); + rep->AddMutex(addr, creation_stack_id); + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); + rep->AddStack(trace, true); + rep->AddLocation(addr, 1); +#if SANITIZER_APPLE + } // Close this scope to release the locks + OutputReport(thr, *rep); +#else + OutputReport(thr, *rep); + } +#endif + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); } static void RecordMutexLock(ThreadState *thr, uptr pc, uptr addr, @@ -528,53 +542,79 @@ void AfterSleep(ThreadState *thr, uptr pc) { void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) { if (r == 0 || !ShouldReport(thr, ReportTypeDeadlock)) return; - ThreadRegistryLock l(&ctx->thread_registry); - ScopedReport rep(ReportTypeDeadlock); - for (int i = 0; i < r->n; i++) { - rep.AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]); - rep.AddUniqueTid((int)r->loop[i].thr_ctx); - rep.AddThread((int)r->loop[i].thr_ctx); - } - uptr dummy_pc = 0x42; - for (int i = 0; i < r->n; i++) { - for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) { - u32 stk = r->loop[i].stk[j]; - StackTrace stack; - if (stk && stk != kInvalidStackID) { - stack = StackDepotGet(stk); - } else { - // Sometimes we fail to extract the stack trace (FIXME: investigate), - // but we should still produce some stack trace in the report. - stack = StackTrace(&dummy_pc, 1); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(ReportTypeDeadlock); + for (int i = 0; i < r->n; i++) { + rep->AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]); + rep->AddUniqueTid((int)r->loop[i].thr_ctx); + rep->AddThread((int)r->loop[i].thr_ctx); + } + uptr dummy_pc = 0x42; + for (int i = 0; i < r->n; i++) { + for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) { + u32 stk = r->loop[i].stk[j]; + StackTrace stack; + if (stk && stk != kInvalidStackID) { + stack = StackDepotGet(stk); + } else { + // Sometimes we fail to extract the stack trace (FIXME: investigate), + // but we should still produce some stack trace in the report. + stack = StackTrace(&dummy_pc, 1); + } + rep->AddStack(stack, true); } - rep.AddStack(stack, true); } +#if SANITIZER_APPLE + } // Close this scope to release the locks + OutputReport(thr, *rep); +#else + OutputReport(thr, *rep); } - OutputReport(thr, rep); +#endif + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); } void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr, FastState last_lock, StackID creation_stack_id) { - // We need to lock the slot during RestoreStack because it protects - // the slot journal. - Lock slot_lock(&ctx->slots[static_cast(last_lock.sid())].mtx); - ThreadRegistryLock l0(&ctx->thread_registry); - Lock slots_lock(&ctx->slot_mtx); - ScopedReport rep(ReportTypeMutexDestroyLocked); - rep.AddMutex(addr, creation_stack_id); - VarSizeStackTrace trace; - ObtainCurrentStack(thr, pc, &trace); - rep.AddStack(trace, true); - - Tid tid; - DynamicMutexSet mset; - uptr tag; - if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(), addr, - 0, kAccessWrite, &tid, &trace, mset, &tag)) - return; - rep.AddStack(trace, true); - rep.AddLocation(addr, 1); - OutputReport(thr, rep); + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + // We need to lock the slot during RestoreStack because it protects + // the slot journal. + Lock slot_lock(&ctx->slots[static_cast(last_lock.sid())].mtx); + ThreadRegistryLock l0(&ctx->thread_registry); + Lock slots_lock(&ctx->slot_mtx); + new (rep) ScopedReport(ReportTypeMutexDestroyLocked); + rep->AddMutex(addr, creation_stack_id); + VarSizeStackTrace trace; + ObtainCurrentStack(thr, pc, &trace); + rep->AddStack(trace, true); + + Tid tid; + DynamicMutexSet mset; + uptr tag; + if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(), + addr, 0, kAccessWrite, &tid, &trace, mset, &tag)) + return; + rep->AddStack(trace, true); + rep->AddLocation(addr, 1); +#if SANITIZER_APPLE + } // Close this scope to release the locks + OutputReport(thr, *rep); +#else + OutputReport(thr, *rep); + } +#endif + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); } } // namespace __tsan diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp index e6f0fda9c72af..41009a50f12d7 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp @@ -16,6 +16,7 @@ #include "sanitizer_common/sanitizer_placement_new.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_stacktrace.h" +#include "tsan_defs.h" #include "tsan_fd.h" #include "tsan_flags.h" #include "tsan_mman.h" @@ -806,65 +807,80 @@ void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old, DynamicMutexSet mset1; MutexSet *mset[kMop] = {&thr->mset, mset1}; - // We need to lock the slot during RestoreStack because it protects - // the slot journal. - Lock slot_lock(&ctx->slots[static_cast(s[1].sid())].mtx); - ThreadRegistryLock l0(&ctx->thread_registry); - Lock slots_lock(&ctx->slot_mtx); - if (SpuriousRace(old)) - return; - if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1, - size1, typ1, &tids[1], &traces[1], mset[1], &tags[1])) { - StoreShadow(&ctx->last_spurious_race, old.raw()); - return; - } + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); + // Take a new scope as Apple platforms require the below locks released + // before symbolizing in order to avoid a deadlock + { + // We need to lock the slot during RestoreStack because it protects + // the slot journal. + Lock slot_lock(&ctx->slots[static_cast(s[1].sid())].mtx); + ThreadRegistryLock l0(&ctx->thread_registry); + Lock slots_lock(&ctx->slot_mtx); + if (SpuriousRace(old)) + return; + if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1, + size1, typ1, &tids[1], &traces[1], mset[1], &tags[1])) { + StoreShadow(&ctx->last_spurious_race, old.raw()); + return; + } - if (IsFiredSuppression(ctx, rep_typ, traces[1])) - return; + if (IsFiredSuppression(ctx, rep_typ, traces[1])) + return; - if (HandleRacyStacks(thr, traces)) - return; + if (HandleRacyStacks(thr, traces)) + return; - // If any of the accesses has a tag, treat this as an "external" race. - uptr tag = kExternalTagNone; - for (uptr i = 0; i < kMop; i++) { - if (tags[i] != kExternalTagNone) { - rep_typ = ReportTypeExternalRace; - tag = tags[i]; - break; + // If any of the accesses has a tag, treat this as an "external" race. + uptr tag = kExternalTagNone; + for (uptr i = 0; i < kMop; i++) { + if (tags[i] != kExternalTagNone) { + rep_typ = ReportTypeExternalRace; + tag = tags[i]; + break; + } } - } - ScopedReport rep(rep_typ, tag); - for (uptr i = 0; i < kMop; i++) - rep.AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]); + new (rep) ScopedReport(rep_typ, tag); + for (uptr i = 0; i < kMop; i++) + rep->AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]); - for (uptr i = 0; i < kMop; i++) { - ThreadContext *tctx = static_cast( - ctx->thread_registry.GetThreadLocked(tids[i])); - rep.AddThread(tctx); - } + for (uptr i = 0; i < kMop; i++) { + ThreadContext *tctx = static_cast( + ctx->thread_registry.GetThreadLocked(tids[i])); + rep->AddThread(tctx); + } - rep.AddLocation(addr_min, addr_max - addr_min); - - if (flags()->print_full_thread_history) { - const ReportDesc *rep_desc = rep.GetReport(); - for (uptr i = 0; i < rep_desc->threads.Size(); i++) { - Tid parent_tid = rep_desc->threads[i]->parent_tid; - if (parent_tid == kMainTid || parent_tid == kInvalidTid) - continue; - ThreadContext *parent_tctx = static_cast( - ctx->thread_registry.GetThreadLocked(parent_tid)); - rep.AddThread(parent_tctx); + rep->AddLocation(addr_min, addr_max - addr_min); + + if (flags()->print_full_thread_history) { + const ReportDesc *rep_desc = rep->GetReport(); + for (uptr i = 0; i < rep_desc->threads.Size(); i++) { + Tid parent_tid = rep_desc->threads[i]->parent_tid; + if (parent_tid == kMainTid || parent_tid == kInvalidTid) + continue; + ThreadContext *parent_tctx = static_cast( + ctx->thread_registry.GetThreadLocked(parent_tid)); + rep->AddThread(parent_tctx); + } } - } #if !SANITIZER_GO - if (!((typ0 | typ1) & kAccessFree) && - s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid())) - rep.AddSleep(thr->last_sleep_stack_id); + if (!((typ0 | typ1) & kAccessFree) && + s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid())) + rep->AddSleep(thr->last_sleep_stack_id); +#endif + +#if SANITIZER_APPLE + } // Close this scope to release the locks + OutputReport(thr, *rep); +#else + OutputReport(thr, *rep); + } #endif - OutputReport(thr, rep); + + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); } void PrintCurrentStack(ThreadState *thr, uptr pc) { diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp index c6a8fd2acb6a8..7a20348016946 100644 --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp @@ -88,15 +88,30 @@ void ThreadFinalize(ThreadState *thr) { #if !SANITIZER_GO if (!ShouldReport(thr, ReportTypeThreadLeak)) return; - ThreadRegistryLock l(&ctx->thread_registry); Vector leaks; - ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks, - &leaks); + { + ThreadRegistryLock l(&ctx->thread_registry); + ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks, + &leaks); + } + + // Use alloca, because malloc during signal handling deadlocks + ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); for (uptr i = 0; i < leaks.Size(); i++) { - ScopedReport rep(ReportTypeThreadLeak); - rep.AddThread(leaks[i].tctx, true); - rep.SetCount(leaks[i].count); - OutputReport(thr, rep); + { + ThreadRegistryLock l(&ctx->thread_registry); + new (rep) ScopedReport(ReportTypeThreadLeak); + rep->AddThread(leaks[i].tctx, true); + rep->SetCount(leaks[i].count); +# if SANITIZER_APPLE + } // Close this scope to release the locks + OutputReport(thr, *rep); +# else + OutputReport(thr, *rep); + } +# endif + // Need to manually destroy this because we used placement new to allocate + rep->~ScopedReport(); } #endif }