#include <google/protobuf/arenaz_sampler.h>
#include <atomic>
#include <cstdint>
#include <limits>
#include <google/protobuf/port_def.inc>
namespace google {
namespace protobuf {
namespace internal {
ThreadSafeArenazSampler& GlobalThreadSafeArenazSampler() { … }
void UnsampleSlow(ThreadSafeArenaStats* info) { … }
#if defined(PROTOBUF_ARENAZ_SAMPLE)
namespace {
PROTOBUF_CONSTINIT std::atomic<bool> g_arenaz_enabled{true};
PROTOBUF_CONSTINIT std::atomic<int32_t> g_arenaz_sample_parameter{1 << 10};
PROTOBUF_THREAD_LOCAL absl::profiling_internal::ExponentialBiased
g_exponential_biased_generator;
}
PROTOBUF_THREAD_LOCAL int64_t global_next_sample = 1LL << 10;
ThreadSafeArenaStats::ThreadSafeArenaStats() { PrepareForSampling(); }
ThreadSafeArenaStats::~ThreadSafeArenaStats() = default;
void ThreadSafeArenaStats::PrepareForSampling() {
num_allocations.store(0, std::memory_order_relaxed);
num_resets.store(0, std::memory_order_relaxed);
bytes_requested.store(0, std::memory_order_relaxed);
bytes_allocated.store(0, std::memory_order_relaxed);
bytes_wasted.store(0, std::memory_order_relaxed);
max_bytes_allocated.store(0, std::memory_order_relaxed);
thread_ids.store(0, std::memory_order_relaxed);
depth = absl::GetStackTrace(stack, kMaxStackDepth, 0);
}
void RecordResetSlow(ThreadSafeArenaStats* info) {
const size_t max_bytes =
info->max_bytes_allocated.load(std::memory_order_relaxed);
const size_t allocated_bytes =
info->bytes_allocated.load(std::memory_order_relaxed);
if (max_bytes < allocated_bytes) {
info->max_bytes_allocated.store(allocated_bytes);
}
info->bytes_requested.store(0, std::memory_order_relaxed);
info->bytes_allocated.store(0, std::memory_order_relaxed);
info->bytes_wasted.fetch_add(0, std::memory_order_relaxed);
info->num_allocations.fetch_add(0, std::memory_order_relaxed);
info->num_resets.fetch_add(1, std::memory_order_relaxed);
}
void RecordAllocateSlow(ThreadSafeArenaStats* info, size_t requested,
size_t allocated, size_t wasted) {
info->bytes_requested.fetch_add(requested, std::memory_order_relaxed);
info->bytes_allocated.fetch_add(allocated, std::memory_order_relaxed);
info->bytes_wasted.fetch_add(wasted, std::memory_order_relaxed);
info->num_allocations.fetch_add(1, std::memory_order_relaxed);
const uint64_t tid = (1ULL << (GetCachedTID() % 63));
const uint64_t thread_ids = info->thread_ids.load(std::memory_order_relaxed);
if (!(thread_ids & tid)) {
info->thread_ids.store(thread_ids | tid, std::memory_order_relaxed);
}
}
ThreadSafeArenaStats* SampleSlow(int64_t* next_sample) {
bool first = *next_sample < 0;
*next_sample = g_exponential_biased_generator.GetStride(
g_arenaz_sample_parameter.load(std::memory_order_relaxed));
ABSL_ASSERT(*next_sample >= 1);
if (!g_arenaz_enabled.load(std::memory_order_relaxed)) return nullptr;
if (first) {
if (PROTOBUF_PREDICT_TRUE(--*next_sample > 0)) return nullptr;
return SampleSlow(next_sample);
}
return GlobalThreadSafeArenazSampler().Register();
}
void SetThreadSafeArenazEnabled(bool enabled) {
g_arenaz_enabled.store(enabled, std::memory_order_release);
}
void SetThreadSafeArenazSampleParameter(int32_t rate) {
if (rate > 0) {
g_arenaz_sample_parameter.store(rate, std::memory_order_release);
} else {
ABSL_RAW_LOG(ERROR, "Invalid thread safe arenaz sample rate: %lld",
static_cast<long long>(rate));
}
}
void SetThreadSafeArenazMaxSamples(int32_t max) {
if (max > 0) {
GlobalThreadSafeArenazSampler().SetMaxSamples(max);
} else {
ABSL_RAW_LOG(ERROR, "Invalid thread safe arenaz max samples: %lld",
static_cast<long long>(max));
}
}
void SetThreadSafeArenazGlobalNextSample(int64_t next_sample) {
if (next_sample >= 0) {
global_next_sample = next_sample;
} else {
ABSL_RAW_LOG(ERROR, "Invalid thread safe arenaz next sample: %lld",
static_cast<long long>(next_sample));
}
}
#else
ThreadSafeArenaStats* SampleSlow(int64_t* next_sample) { … }
void SetThreadSafeArenazEnabled(bool enabled) { … }
void SetThreadSafeArenazSampleParameter(int32_t rate) { … }
void SetThreadSafeArenazMaxSamples(int32_t max) { … }
void SetThreadSafeArenazGlobalNextSample(int64_t next_sample) { … }
#endif
}
}
}