chromium/third_party/abseil-cpp/absl/base/internal/low_level_alloc.cc

// Copyright 2017 The Abseil Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//      https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

// A low-level allocator that can be used by other low-level
// modules without introducing dependency cycles.
// This allocator is slow and wasteful of memory;
// it should not be used when performance is key.

#include "absl/base/internal/low_level_alloc.h"

#include <type_traits>

#include "absl/base/call_once.h"
#include "absl/base/config.h"
#include "absl/base/internal/direct_mmap.h"
#include "absl/base/internal/scheduling_mode.h"
#include "absl/base/macros.h"
#include "absl/base/thread_annotations.h"

// LowLevelAlloc requires that the platform support low-level
// allocation of virtual memory. Platforms lacking this cannot use
// LowLevelAlloc.
#ifndef ABSL_LOW_LEVEL_ALLOC_MISSING

#ifndef _WIN32
#include <pthread.h>
#include <signal.h>
#include <sys/mman.h>
#include <unistd.h>
#else
#include <windows.h>
#endif

#ifdef __linux__
#include <sys/prctl.h>
#endif

#include <string.h>

#include <algorithm>
#include <atomic>
#include <cerrno>
#include <cstddef>
#include <new>  // for placement-new

#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/internal/spinlock.h"

#if defined(MAP_ANON) && !defined(MAP_ANONYMOUS)
#define MAP_ANONYMOUS
#endif

namespace absl {
ABSL_NAMESPACE_BEGIN
namespace base_internal {

// A first-fit allocator with amortized logarithmic free() time.

// ---------------------------------------------------------------------------
static const int kMaxLevel =;

namespace {
// This struct describes one allocated block, or one free block.
struct AllocList {};
}  // namespace

// ---------------------------------------------------------------------------
// A trivial skiplist implementation.  This is used to keep the freelist
// in address order while taking only logarithmic time per insert and delete.

// An integer approximation of log2(size/base)
// Requires size >= base.
static int IntLog2(size_t size, size_t base) {}

// Return a random integer n:  p(n)=1/(2**n) if 1 <= n; p(n)=0 if n < 1.
static int Random(uint32_t *state) {}

// Return a number of skiplist levels for a node of size bytes, where
// base is the minimum node size.  Compute level=log2(size / base)+n
// where n is 1 if random is false and otherwise a random number generated with
// the standard distribution for a skiplist:  See Random() above.
// Bigger nodes tend to have more skiplist levels due to the log2(size / base)
// term, so first-fit searches touch fewer nodes.  "level" is clipped so
// level<kMaxLevel and next[level-1] will fit in the node.
// 0 < LLA_SkiplistLevels(x,y,false) <= LLA_SkiplistLevels(x,y,true) < kMaxLevel
static int LLA_SkiplistLevels(size_t size, size_t base, uint32_t *random) {}

// Return "atleast", the first element of AllocList *head s.t. *atleast >= *e.
// For 0 <= i < head->levels, set prev[i] to "no_greater", where no_greater
// points to the last element at level i in the AllocList less than *e, or is
// head if no such element exists.
static AllocList *LLA_SkiplistSearch(AllocList *head, AllocList *e,
                                     AllocList **prev) {}

// Insert element *e into AllocList *head.  Set prev[] as LLA_SkiplistSearch.
// Requires that e->levels be previously set by the caller (using
// LLA_SkiplistLevels())
static void LLA_SkiplistInsert(AllocList *head, AllocList *e,
                               AllocList **prev) {}

// Remove element *e from AllocList *head.  Set prev[] as LLA_SkiplistSearch().
// Requires that e->levels be previous set by the caller (using
// LLA_SkiplistLevels())
static void LLA_SkiplistDelete(AllocList *head, AllocList *e,
                               AllocList **prev) {}

// ---------------------------------------------------------------------------
// Arena implementation

// Metadata for an LowLevelAlloc arena instance.
struct LowLevelAlloc::Arena {};

namespace {
// Static storage space for the lazily-constructed, default global arena
// instances.  We require this space because the whole point of LowLevelAlloc
// is to avoid relying on malloc/new.
alignas(LowLevelAlloc::Arena) unsigned char default_arena_storage[sizeof(
    LowLevelAlloc::Arena)];
alignas(LowLevelAlloc::Arena) unsigned char unhooked_arena_storage[sizeof(
    LowLevelAlloc::Arena)];
#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
alignas(
    LowLevelAlloc::Arena) unsigned char unhooked_async_sig_safe_arena_storage
    [sizeof(LowLevelAlloc::Arena)];
#endif

// We must use LowLevelCallOnce here to construct the global arenas, rather than
// using function-level statics, to avoid recursively invoking the scheduler.
absl::once_flag create_globals_once;

void CreateGlobalArenas() {}

// Returns a global arena that does not call into hooks.  Used by NewArena()
// when kCallMallocHook is not set.
LowLevelAlloc::Arena *UnhookedArena() {}

#ifndef ABSL_LOW_LEVEL_ALLOC_ASYNC_SIGNAL_SAFE_MISSING
// Returns a global arena that is async-signal safe.  Used by NewArena() when
// kAsyncSignalSafe is set.
LowLevelAlloc::Arena *UnhookedAsyncSigSafeArena() {}
#endif

}  // namespace

// Returns the default arena, as used by LowLevelAlloc::Alloc() and friends.
LowLevelAlloc::Arena *LowLevelAlloc::DefaultArena() {}

// magic numbers to identify allocated and unallocated blocks
static const uintptr_t kMagicAllocated =;
static const uintptr_t kMagicUnallocated =;

namespace {
class ABSL_SCOPED_LOCKABLE ArenaLock {};
}  // namespace

// create an appropriate magic number for an object at "ptr"
// "magic" should be kMagicAllocated or kMagicUnallocated
inline static uintptr_t Magic(uintptr_t magic, AllocList::Header *ptr) {}

namespace {
size_t GetPageSize() {}

size_t RoundedUpBlockSize() {}

}  // namespace

LowLevelAlloc::Arena::Arena(uint32_t flags_value)
    :{}

// L < meta_data_arena->mu
LowLevelAlloc::Arena *LowLevelAlloc::NewArena(uint32_t flags) {}

// L < arena->mu, L < arena->arena->mu
bool LowLevelAlloc::DeleteArena(Arena *arena) {}

// ---------------------------------------------------------------------------

// Addition, checking for overflow.  The intent is to die if an external client
// manages to push through a request that would cause arithmetic to fail.
static inline uintptr_t CheckedAdd(uintptr_t a, uintptr_t b) {}

// Return value rounded up to next multiple of align.
// align must be a power of two.
static inline uintptr_t RoundUp(uintptr_t addr, uintptr_t align) {}

// Equivalent to "return prev->next[i]" but with sanity checking
// that the freelist is in the correct order, that it
// consists of regions marked "unallocated", and that no two regions
// are adjacent in memory (they should have been coalesced).
// L >= arena->mu
static AllocList *Next(int i, AllocList *prev, LowLevelAlloc::Arena *arena) {}

// Coalesce list item "a" with its successor if they are adjacent.
static void Coalesce(AllocList *a) {}

// Adds block at location "v" to the free list
// L >= arena->mu
static void AddToFreelist(void *v, LowLevelAlloc::Arena *arena) {}

// Frees storage allocated by LowLevelAlloc::Alloc().
// L < arena->mu
void LowLevelAlloc::Free(void *v) {}

// allocates and returns a block of size bytes, to be freed with Free()
// L < arena->mu
static void *DoAllocWithArena(size_t request, LowLevelAlloc::Arena *arena) {}

void *LowLevelAlloc::Alloc(size_t request) {}

void *LowLevelAlloc::AllocWithArena(size_t request, Arena *arena) {}

}  // namespace base_internal
ABSL_NAMESPACE_END
}  // namespace absl

#endif  // ABSL_LOW_LEVEL_ALLOC_MISSING