llvm/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp

//===-- tsan_rtl_report.cpp -----------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
//===----------------------------------------------------------------------===//

#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "tsan_fd.h"
#include "tsan_flags.h"
#include "tsan_mman.h"
#include "tsan_platform.h"
#include "tsan_report.h"
#include "tsan_rtl.h"
#include "tsan_suppressions.h"
#include "tsan_symbolize.h"
#include "tsan_sync.h"

namespace __tsan {

usingnamespace__sanitizer;

static ReportStack *SymbolizeStack(StackTrace trace);

// Can be overriden by an application/test to intercept reports.
#ifdef TSAN_EXTERNAL_HOOKS
bool OnReport(const ReportDesc *rep, bool suppressed);
#else
SANITIZER_WEAK_CXX_DEFAULT_IMPL
bool OnReport(const ReportDesc *rep, bool suppressed) {}
#endif

SANITIZER_WEAK_DEFAULT_IMPL
void __tsan_on_report(const ReportDesc *rep) {}

static void StackStripMain(SymbolizedStack *frames) {}

ReportStack *SymbolizeStackId(u32 stack_id) {}

static ReportStack *SymbolizeStack(StackTrace trace) {}

bool ShouldReport(ThreadState *thr, ReportType typ) {}

ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) {}

ScopedReportBase::~ScopedReportBase() {}

void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) {}

void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
                                       Tid tid, StackTrace stack,
                                       const MutexSet *mset) {}

void ScopedReportBase::AddUniqueTid(Tid unique_tid) {}

void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) {}

#if !SANITIZER_GO
static ThreadContext *FindThreadByTidLocked(Tid tid) {}

static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {}

ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {}
#endif

void ScopedReportBase::AddThread(Tid tid, bool suppressable) {}

int ScopedReportBase::AddMutex(uptr addr, StackID creation_stack_id) {}

void ScopedReportBase::AddLocation(uptr addr, uptr size) {}

#if !SANITIZER_GO
void ScopedReportBase::AddSleep(StackID stack_id) {}
#endif

void ScopedReportBase::SetCount(int count) {}

void ScopedReportBase::SetSigNum(int sig) {}

const ReportDesc *ScopedReportBase::GetReport() const {}

ScopedReport::ScopedReport(ReportType typ, uptr tag)
    :{}

ScopedReport::~ScopedReport() {}

// Replays the trace up to last_pos position in the last part
// or up to the provided epoch/sid (whichever is earlier)
// and calls the provided function f for each event.
template <typename Func>
void TraceReplay(Trace *trace, TracePart *last, Event *last_pos, Sid sid,
                 Epoch epoch, Func f) {}

static void RestoreStackMatch(VarSizeStackTrace *pstk, MutexSet *pmset,
                              Vector<uptr> *stack, MutexSet *mset, uptr pc,
                              bool *found) {}

// Checks if addr1|size1 is fully contained in addr2|size2.
// We check for fully contained instread of just overlapping
// because a memory access is always traced once, but can be
// split into multiple accesses in the shadow.
static constexpr bool IsWithinAccess(uptr addr1, uptr size1, uptr addr2,
                                     uptr size2) {}

// Replays the trace of slot sid up to the target event identified
// by epoch/addr/size/typ and restores and returns tid, stack, mutex set
// and tag for that event. If there are multiple such events, it returns
// the last one. Returns false if the event is not present in the trace.
bool RestoreStack(EventType type, Sid sid, Epoch epoch, uptr addr, uptr size,
                  AccessType typ, Tid *ptid, VarSizeStackTrace *pstk,
                  MutexSet *pmset, uptr *ptag) {}

bool RacyStacks::operator==(const RacyStacks &other) const {}

static bool FindRacyStacks(const RacyStacks &hash) {}

static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) {}

bool OutputReport(ThreadState *thr, const ScopedReport &srep) {}

bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) {}

static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {}

static bool SpuriousRace(Shadow old) {}

void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
                AccessType typ0) {}

void PrintCurrentStack(ThreadState *thr, uptr pc) {}

// Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes
// __sanitizer_print_stack_trace exists in the actual unwinded stack, but
// tail-call to PrintCurrentStackSlow breaks this assumption because
// __sanitizer_print_stack_trace disappears after tail-call.
// However, this solution is not reliable enough, please see dvyukov's comment
// http://reviews.llvm.org/D19148#406208
// Also see PR27280 comment 2 and 3 for breaking examples and analysis.
ALWAYS_INLINE USED void PrintCurrentStackSlow(uptr pc) {}

}  // namespace __tsan

usingnamespace__tsan;

extern "C" {
SANITIZER_INTERFACE_ATTRIBUTE
void __sanitizer_print_stack_trace() {}
}  // extern "C"