llvm/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp

//===-- tsan_rtl_access.cpp -----------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer (TSan), a race detector.
//
// Definitions of memory access and function entry/exit entry points.
//===----------------------------------------------------------------------===//

#include "tsan_rtl.h"

namespace __tsan {

ALWAYS_INLINE USED bool TryTraceMemoryAccess(ThreadState* thr, uptr pc,
                                             uptr addr, uptr size,
                                             AccessType typ) {}

ALWAYS_INLINE
bool TryTraceMemoryAccessRange(ThreadState* thr, uptr pc, uptr addr, uptr size,
                               AccessType typ) {}

void TraceMemoryAccessRange(ThreadState* thr, uptr pc, uptr addr, uptr size,
                            AccessType typ) {}

void TraceFunc(ThreadState* thr, uptr pc) {}

NOINLINE void TraceRestartFuncEntry(ThreadState* thr, uptr pc) {}

NOINLINE void TraceRestartFuncExit(ThreadState* thr) {}

void TraceMutexLock(ThreadState* thr, EventType type, uptr pc, uptr addr,
                    StackID stk) {}

void TraceMutexUnlock(ThreadState* thr, uptr addr) {}

void TraceTime(ThreadState* thr) {}

NOINLINE void DoReportRace(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
                           Shadow old,
                           AccessType typ) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {}

#if !TSAN_VECTORIZE
ALWAYS_INLINE
bool ContainsSameAccess(RawShadow* s, Shadow cur, int unused0, int unused1,
                        AccessType typ) {
  for (uptr i = 0; i < kShadowCnt; i++) {
    auto old = LoadShadow(&s[i]);
    if (!(typ & kAccessRead)) {
      if (old == cur.raw())
        return true;
      continue;
    }
    auto masked = static_cast<RawShadow>(static_cast<u32>(old) |
                                         static_cast<u32>(Shadow::kRodata));
    if (masked == cur.raw())
      return true;
    if (!(typ & kAccessNoRodata) && !SANITIZER_GO) {
      if (old == Shadow::kRodata)
        return true;
    }
  }
  return false;
}

ALWAYS_INLINE
bool CheckRaces(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
                int unused0, int unused1, AccessType typ) {
  bool stored = false;
  for (uptr idx = 0; idx < kShadowCnt; idx++) {
    RawShadow* sp = &shadow_mem[idx];
    Shadow old(LoadShadow(sp));
    if (LIKELY(old.raw() == Shadow::kEmpty)) {
      if (!(typ & kAccessCheckOnly) && !stored)
        StoreShadow(sp, cur.raw());
      return false;
    }
    if (LIKELY(!(cur.access() & old.access())))
      continue;
    if (LIKELY(cur.sid() == old.sid())) {
      if (!(typ & kAccessCheckOnly) &&
          LIKELY(cur.access() == old.access() && old.IsRWWeakerOrEqual(typ))) {
        StoreShadow(sp, cur.raw());
        stored = true;
      }
      continue;
    }
    if (LIKELY(old.IsBothReadsOrAtomic(typ)))
      continue;
    if (LIKELY(thr->clock.Get(old.sid()) >= old.epoch()))
      continue;
    DoReportRace(thr, shadow_mem, cur, old, typ);
    return true;
  }
  // We did not find any races and had already stored
  // the current access info, so we are done.
  if (LIKELY(stored))
    return false;
  // Choose a random candidate slot and replace it.
  uptr index =
      atomic_load_relaxed(&thr->trace_pos) / sizeof(Event) % kShadowCnt;
  StoreShadow(&shadow_mem[index], cur.raw());
  return false;
}

#define LOAD_CURRENT_SHADOW

#else /* !TSAN_VECTORIZE */

ALWAYS_INLINE
bool ContainsSameAccess(RawShadow* unused0, Shadow unused1, m128 shadow,
                        m128 access, AccessType typ) {}

NOINLINE void DoReportRaceV(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
                            u32 race_mask, m128 shadow, AccessType typ) {}

ALWAYS_INLINE
bool CheckRaces(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
                m128 shadow, m128 access, AccessType typ) {}

#define LOAD_CURRENT_SHADOW(cur, shadow_mem)
#endif

char* DumpShadow(char* buf, RawShadow raw) {}

// TryTrace* and TraceRestart* functions allow to turn memory access and func
// entry/exit callbacks into leaf functions with all associated performance
// benefits. These hottest callbacks do only 2 slow path calls: report a race
// and trace part switching. Race reporting is easy to turn into a tail call, we
// just always return from the runtime after reporting a race. But trace part
// switching is harder because it needs to be in the middle of callbacks. To
// turn it into a tail call we immidiately return after TraceRestart* functions,
// but TraceRestart* functions themselves recurse into the callback after
// switching trace part. As the result the hottest callbacks contain only tail
// calls, which effectively makes them leaf functions (can use all registers,
// no frame setup, etc).
NOINLINE void TraceRestartMemoryAccess(ThreadState* thr, uptr pc, uptr addr,
                                       uptr size, AccessType typ) {}

ALWAYS_INLINE USED void MemoryAccess(ThreadState* thr, uptr pc, uptr addr,
                                     uptr size, AccessType typ) {}

void MemoryAccess16(ThreadState* thr, uptr pc, uptr addr, AccessType typ);

NOINLINE
void RestartMemoryAccess16(ThreadState* thr, uptr pc, uptr addr,
                           AccessType typ) {}

ALWAYS_INLINE USED void MemoryAccess16(ThreadState* thr, uptr pc, uptr addr,
                                       AccessType typ) {}

NOINLINE
void RestartUnalignedMemoryAccess(ThreadState* thr, uptr pc, uptr addr,
                                  uptr size, AccessType typ) {}

ALWAYS_INLINE USED void UnalignedMemoryAccess(ThreadState* thr, uptr pc,
                                              uptr addr, uptr size,
                                              AccessType typ) {}

void ShadowSet(RawShadow* p, RawShadow* end, RawShadow v) {}

static void MemoryRangeSet(uptr addr, uptr size, RawShadow val) {}

void MemoryResetRange(ThreadState* thr, uptr pc, uptr addr, uptr size) {}

void MemoryRangeFreed(ThreadState* thr, uptr pc, uptr addr, uptr size) {}

void MemoryRangeImitateWrite(ThreadState* thr, uptr pc, uptr addr, uptr size) {}

void MemoryRangeImitateWriteOrResetRange(ThreadState* thr, uptr pc, uptr addr,
                                         uptr size) {}

ALWAYS_INLINE
bool MemoryAccessRangeOne(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
                          AccessType typ) {}

template <bool is_read>
NOINLINE void RestartMemoryAccessRange(ThreadState* thr, uptr pc, uptr addr,
                                       uptr size) {}

template <bool is_read>
void MemoryAccessRangeT(ThreadState* thr, uptr pc, uptr addr, uptr size) {}

template void MemoryAccessRangeT<true>(ThreadState* thr, uptr pc, uptr addr,
                                       uptr size);
template void MemoryAccessRangeT<false>(ThreadState* thr, uptr pc, uptr addr,
                                        uptr size);

}  // namespace __tsan

#if !SANITIZER_GO
// Must be included in this file to make sure everything is inlined.
#  include "tsan_interface.inc"
#endif