#ifndef TSAN_RTL_H
#define TSAN_RTL_H
#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_asm.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
#include "sanitizer_common/sanitizer_libignore.h"
#include "sanitizer_common/sanitizer_suppressions.h"
#include "sanitizer_common/sanitizer_thread_registry.h"
#include "sanitizer_common/sanitizer_vector.h"
#include "tsan_defs.h"
#include "tsan_flags.h"
#include "tsan_ignoreset.h"
#include "tsan_ilist.h"
#include "tsan_mman.h"
#include "tsan_mutexset.h"
#include "tsan_platform.h"
#include "tsan_report.h"
#include "tsan_shadow.h"
#include "tsan_stack_trace.h"
#include "tsan_sync.h"
#include "tsan_trace.h"
#include "tsan_vector_clock.h"
#if SANITIZER_WORDSIZE != 64
# error "ThreadSanitizer is supported only on 64-bit platforms"
#endif
namespace __tsan {
#if !SANITIZER_GO
struct MapUnmapCallback;
# if defined(__mips64) || defined(__aarch64__) || defined(__loongarch__) || \
defined(__powerpc__) || SANITIZER_RISCV64
struct AP32 {
static const uptr kSpaceBeg = 0;
static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
static const uptr kMetadataSize = 0;
typedef __sanitizer::CompactSizeClassMap SizeClassMap;
static const uptr kRegionSizeLog = 20;
using AddressSpaceView = LocalAddressSpaceView;
typedef __tsan::MapUnmapCallback MapUnmapCallback;
static const uptr kFlags = 0;
};
typedef SizeClassAllocator32<AP32> PrimaryAllocator;
#else
struct AP64 { … };
PrimaryAllocator;
#endif
Allocator;
AllocatorCache;
Allocator *allocator();
#endif
struct ThreadSignalContext;
struct JmpBuf { … };
struct Processor { … };
#if !SANITIZER_GO
struct ScopedGlobalProcessor { … };
#endif
struct TidEpoch { … };
struct alignas(SANITIZER_CACHE_LINE_SIZE) TidSlot { … };
struct alignas(SANITIZER_CACHE_LINE_SIZE) ThreadState { … };
#if !SANITIZER_GO
#if SANITIZER_APPLE || SANITIZER_ANDROID
ThreadState *cur_thread();
void set_cur_thread(ThreadState *thr);
void cur_thread_finalize();
inline ThreadState *cur_thread_init() { return cur_thread(); }
# else
__attribute__((tls_model("initial-exec")))
extern THREADLOCAL char cur_thread_placeholder[];
inline ThreadState *cur_thread() { … }
inline ThreadState *cur_thread_init() { … }
inline void set_cur_thread(ThreadState *thr) { … }
inline void cur_thread_finalize() { … }
# endif
#endif
class ThreadContext final : public ThreadContextBase { … };
struct RacyStacks { … };
struct RacyAddress { … };
struct FiredSuppression { … };
struct Context { … };
extern Context *ctx;
ALWAYS_INLINE Flags *flags() { … }
struct ScopedIgnoreInterceptors { … };
const char *GetObjectTypeFromTag(uptr tag);
const char *GetReportHeaderFromTag(uptr tag);
uptr TagFromShadowStackFrame(uptr pc);
class ScopedReportBase { … };
class ScopedReport : public ScopedReportBase { … };
bool ShouldReport(ThreadState *thr, ReportType typ);
ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
template<typename StackTraceTy>
void ExtractTagFromStack(StackTraceTy *stack, uptr *tag = nullptr) { … }
template<typename StackTraceTy>
void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack,
uptr *tag = nullptr) { … }
#define GET_STACK_TRACE_FATAL(thr, pc) …
void MapShadow(uptr addr, uptr size);
void MapThreadTrace(uptr addr, uptr size, const char *name);
void DontNeedShadowFor(uptr addr, uptr size);
void UnmapShadow(ThreadState *thr, uptr addr, uptr size);
void InitializeShadowMemory();
void DontDumpShadow(uptr addr, uptr size);
void InitializeInterceptors();
void InitializeLibIgnore();
void InitializeDynamicAnnotations();
void ForkBefore(ThreadState *thr, uptr pc);
void ForkParentAfter(ThreadState *thr, uptr pc);
void ForkChildAfter(ThreadState *thr, uptr pc, bool start_thread);
void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
AccessType typ);
bool OutputReport(ThreadState *thr, const ScopedReport &srep);
bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
bool IsExpectedReport(uptr addr, uptr size);
#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
#define DPrintf …
#else
#define DPrintf(...) …
#endif
#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
#define DPrintf2 …
#else
#define DPrintf2(...) …
#endif
StackID CurrentStackId(ThreadState *thr, uptr pc);
ReportStack *SymbolizeStackId(StackID stack_id);
void PrintCurrentStack(ThreadState *thr, uptr pc);
void PrintCurrentStackSlow(uptr pc);
MBlock *JavaHeapBlock(uptr addr, uptr *start);
void Initialize(ThreadState *thr);
void MaybeSpawnBackgroundThread();
int Finalize(ThreadState *thr);
void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
AccessType typ);
void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
AccessType typ);
template <bool is_read>
void MemoryAccessRangeT(ThreadState *thr, uptr pc, uptr addr, uptr size);
ALWAYS_INLINE
void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
bool is_write) { … }
void ShadowSet(RawShadow *p, RawShadow *end, RawShadow v);
void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
uptr size);
void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
void ThreadIgnoreEnd(ThreadState *thr);
void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
void ThreadIgnoreSyncEnd(ThreadState *thr);
Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
ThreadType thread_type);
void ThreadFinish(ThreadState *thr);
Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
void ThreadJoin(ThreadState *thr, uptr pc, Tid tid);
void ThreadDetach(ThreadState *thr, uptr pc, Tid tid);
void ThreadFinalize(ThreadState *thr);
void ThreadSetName(ThreadState *thr, const char *name);
int ThreadCount(ThreadState *thr);
void ProcessPendingSignalsImpl(ThreadState *thr);
void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid);
Processor *ProcCreate();
void ProcDestroy(Processor *proc);
void ProcWire(Processor *proc, ThreadState *thr);
void ProcUnwire(Processor *proc, ThreadState *thr);
void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0,
int rec = 1);
int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
void MutexRepair(ThreadState *thr, uptr pc, uptr addr);
void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
void Acquire(ThreadState *thr, uptr pc, uptr addr);
void AcquireGlobal(ThreadState *thr);
void Release(ThreadState *thr, uptr pc, uptr addr);
void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr);
void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
void AfterSleep(ThreadState *thr, uptr pc);
void IncrementEpoch(ThreadState *thr);
#if !SANITIZER_GO
uptr ALWAYS_INLINE HeapEnd() { … }
#endif
void SlotAttachAndLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
void SlotDetach(ThreadState *thr);
void SlotLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
void SlotUnlock(ThreadState *thr) SANITIZER_RELEASE(thr->slot->mtx);
void DoReset(ThreadState *thr, uptr epoch);
void FlushShadowMemory();
ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags);
void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber);
void FiberSwitch(ThreadState *thr, uptr pc, ThreadState *fiber, unsigned flags);
enum FiberSwitchFlags { … };
class SlotLocker { … };
class SlotUnlocker { … };
ALWAYS_INLINE void ProcessPendingSignals(ThreadState *thr) { … }
extern bool is_initialized;
ALWAYS_INLINE
void LazyInitialize(ThreadState *thr) { … }
void TraceResetForTesting();
void TraceSwitchPart(ThreadState *thr);
void TraceSwitchPartImpl(ThreadState *thr);
bool RestoreStack(EventType type, Sid sid, Epoch epoch, uptr addr, uptr size,
AccessType typ, Tid *ptid, VarSizeStackTrace *pstk,
MutexSet *pmset, uptr *ptag);
template <typename EventT>
ALWAYS_INLINE WARN_UNUSED_RESULT bool TraceAcquire(ThreadState *thr,
EventT **ev) { … }
template <typename EventT>
ALWAYS_INLINE void TraceRelease(ThreadState *thr, EventT *evp) { … }
template <typename EventT>
void TraceEvent(ThreadState *thr, EventT ev) { … }
ALWAYS_INLINE WARN_UNUSED_RESULT bool TryTraceFunc(ThreadState *thr,
uptr pc = 0) { … }
WARN_UNUSED_RESULT
bool TryTraceMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
AccessType typ);
WARN_UNUSED_RESULT
bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
AccessType typ);
void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
AccessType typ);
void TraceFunc(ThreadState *thr, uptr pc = 0);
void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
StackID stk);
void TraceMutexUnlock(ThreadState *thr, uptr addr);
void TraceTime(ThreadState *thr);
void TraceRestartFuncExit(ThreadState *thr);
void TraceRestartFuncEntry(ThreadState *thr, uptr pc);
void GrowShadowStack(ThreadState *thr);
ALWAYS_INLINE
void FuncEntry(ThreadState *thr, uptr pc) { … }
ALWAYS_INLINE
void FuncExit(ThreadState *thr) { … }
#if !SANITIZER_GO
extern void (*on_initialize)(void);
extern int (*on_finalize)(int);
#endif
}
#endif