chromium/base/allocator/partition_allocator/src/partition_alloc/partition_root.cc

// Copyright 2020 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "partition_alloc/partition_root.h"

#include <cstdint>

#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/freeslot_bitmap.h"
#include "partition_alloc/in_slot_metadata.h"
#include "partition_alloc/oom.h"
#include "partition_alloc/page_allocator.h"
#include "partition_alloc/partition_address_space.h"
#include "partition_alloc/partition_alloc-inl.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/component_export.h"
#include "partition_alloc/partition_alloc_base/thread_annotations.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_bucket.h"
#include "partition_alloc/partition_cookie.h"
#include "partition_alloc/partition_oom.h"
#include "partition_alloc/partition_page.h"
#include "partition_alloc/reservation_offset_table.h"
#include "partition_alloc/tagging.h"
#include "partition_alloc/thread_isolation/thread_isolation.h"

#if PA_BUILDFLAG(IS_MAC)
#include "partition_alloc/partition_alloc_base/mac/mac_util.h"
#endif

#if !PA_BUILDFLAG(HAS_64_BIT_POINTERS)
#include "partition_alloc/address_pool_manager_bitmap.h"
#endif

#if PA_BUILDFLAG(IS_WIN)
#include <windows.h>

#include "wow64apiset.h"
#endif

#if PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)
#include <pthread.h>
#endif

namespace partition_alloc::internal {

#if PA_BUILDFLAG(RECORD_ALLOC_INFO)
// Even if this is not hidden behind a BUILDFLAG, it should not use any memory
// when recording is disabled, since it ends up in the .bss section.
AllocInfo g_allocs = {};

void RecordAllocOrFree(uintptr_t addr, size_t size) {
  g_allocs.allocs[g_allocs.index.fetch_add(1, std::memory_order_relaxed) %
                  kAllocInfoSize] = {addr, size};
}
#endif  // PA_BUILDFLAG(RECORD_ALLOC_INFO)

#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
PtrPosWithinAlloc IsPtrWithinSameAlloc(uintptr_t orig_address,
                                       uintptr_t test_address,
                                       size_t type_size) {}
#endif  // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)

}  // namespace partition_alloc::internal

namespace partition_alloc {

#if PA_CONFIG(USE_PARTITION_ROOT_ENUMERATOR)

namespace {
internal::Lock g_root_enumerator_lock;
}

internal::Lock& PartitionRoot::GetEnumeratorLock() {}

namespace internal {

class PartitionRootEnumerator {};

}  // namespace internal

#endif  // PA_USE_PARTITION_ROOT_ENUMERATOR

#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)

namespace {

#if PA_CONFIG(HAS_ATFORK_HANDLER)

void LockRoot(PartitionRoot* root, bool) PA_NO_THREAD_SAFETY_ANALYSIS {}

// PA_NO_THREAD_SAFETY_ANALYSIS: acquires the lock and doesn't release it, by
// design.
void BeforeForkInParent() PA_NO_THREAD_SAFETY_ANALYSIS {}

template <typename T>
void UnlockOrReinit(T& lock, bool in_child) PA_NO_THREAD_SAFETY_ANALYSIS {}

void UnlockOrReinitRoot(PartitionRoot* root,
                        bool in_child) PA_NO_THREAD_SAFETY_ANALYSIS {}

void ReleaseLocks(bool in_child) PA_NO_THREAD_SAFETY_ANALYSIS {}

void AfterForkInParent() {}

void AfterForkInChild() {}
#endif  // PA_CONFIG(HAS_ATFORK_HANDLER)

std::atomic<bool> g_global_init_called;
void PartitionAllocMallocInitOnce() {}

}  // namespace

#if PA_BUILDFLAG(IS_APPLE)
void PartitionAllocMallocHookOnBeforeForkInParent() {
  BeforeForkInParent();
}

void PartitionAllocMallocHookOnAfterForkInParent() {
  AfterForkInParent();
}

void PartitionAllocMallocHookOnAfterForkInChild() {
  AfterForkInChild();
}
#endif  // PA_BUILDFLAG(IS_APPLE)

#endif  // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)

#if PA_CONFIG(ENABLE_SHADOW_METADATA)
namespace {

void MakeSuperPageExtentEntriesShared(PartitionRoot* root,
                                      internal::PoolHandleMask mask)
    PA_NO_THREAD_SAFETY_ANALYSIS {
  PA_DCHECK(root);
  switch (root->ChoosePool()) {
    case internal::kRegularPoolHandle:
      if (!ContainsFlags(mask, internal::PoolHandleMask::kRegular)) {
        return;
      }
      root->settings.shadow_pool_offset_ =
          internal::PartitionAddressSpace::RegularPoolShadowOffset();
      break;
    case internal::kBRPPoolHandle:
      if (!ContainsFlags(mask, internal::PoolHandleMask::kBRP)) {
        return;
      }
      root->settings.shadow_pool_offset_ =
          internal::PartitionAddressSpace::BRPPoolShadowOffset();
      break;
    case internal::kConfigurablePoolHandle:
      if (!ContainsFlags(mask, internal::PoolHandleMask::kConfigurable)) {
        return;
      }
      root->settings.shadow_pool_offset_ =
          internal::PartitionAddressSpace::ConfigurablePoolShadowOffset();
      break;
    default:
      return;
  }

  // For normal-bucketed.
  for (const internal::PartitionSuperPageExtentEntry<MetadataKind::kReadOnly>*
           extent = root->first_extent;
       extent != nullptr; extent = extent->next) {
    //  The page which contains the extent is in-used and shared mapping.
    uintptr_t super_page = SuperPagesBeginFromExtent(extent);
    for (size_t i = 0; i < extent->number_of_consecutive_super_pages; ++i) {
      internal::PartitionAddressSpace::MapMetadata(super_page,
                                                   /*copy_metadata=*/true);
      super_page += kSuperPageSize;
    }
    PA_DCHECK(extent->root == root);
  }

  // For direct-mapped.
  for (const internal::PartitionDirectMapExtent<MetadataKind::kReadOnly>*
           extent = root->direct_map_list;
       extent != nullptr; extent = extent->next_extent) {
    internal::PartitionAddressSpace::MapMetadata(
        reinterpret_cast<uintptr_t>(extent) & internal::kSuperPageBaseMask,
        /*copy_metadata=*/true);
  }
}

}  // namespace
#endif  // PA_CONFIG(ENABLE_SHADOW_METADATA)

namespace internal {

namespace {
// 64 was chosen arbitrarily, as it seems like a reasonable trade-off between
// performance and purging opportunity. Higher value (i.e. smaller slots)
// wouldn't necessarily increase chances of purging, but would result in
// more work and larger |slot_usage| array. Lower value would probably decrease
// chances of purging. Not empirically tested.
constexpr size_t kMaxPurgeableSlotsPerSystemPage =;
// See above, this will lead to less work getting done, so lower cost, lower
// savings.
constexpr size_t kConservativeMaxPurgeableSlotsPerSystemPage =;
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
MinPurgeableSlotSize() {}

PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
MinConservativePurgeableSlotSize() {}
}  // namespace

// The function attempts to unprovision unused slots and discard unused pages.
// It may also "straighten" the free list.
//
// If `accounting_only` is set to true, no action is performed and the function
// merely returns the number of bytes in the would-be discarded pages.
PA_NOPROFILE
static size_t PartitionPurgeSlotSpan(PartitionRoot* root,
                                     internal::SlotSpanMetadata* slot_span,
                                     bool accounting_only)
    PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(root)) {}

PA_NOPROFILE
static void PartitionPurgeBucket(PartitionRoot* root,
                                 internal::PartitionBucket* bucket)
    PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(root)) {}

static void PartitionDumpSlotSpanStats(PartitionBucketMemoryStats* stats_out,
                                       PartitionRoot* root,
                                       internal::SlotSpanMetadata* slot_span)
    PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(root)) {}

static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out,
                                     PartitionRoot* root,
                                     const internal::PartitionBucket* bucket)
    PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(root)) {}

#if PA_BUILDFLAG(DCHECKS_ARE_ON)
void DCheckIfManagedByPartitionAllocBRPPool(uintptr_t address) {}
#endif

#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
void PartitionAllocThreadIsolationInit(ThreadIsolationOption thread_isolation) {}
#endif  // PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)

}  // namespace internal

[[noreturn]] PA_NOINLINE void PartitionRoot::OutOfMemory(size_t size) {}

void PartitionRoot::DecommitEmptySlotSpans() {}

void PartitionRoot::DecommitEmptySlotSpansForTesting() {}

void PartitionRoot::DestructForTesting()
    PA_EXCLUSIVE_LOCKS_REQUIRED(internal::PartitionRootLock(this)) {}

#if PA_CONFIG(MAYBE_ENABLE_MAC11_MALLOC_SIZE_HACK)
void PartitionRoot::InitMac11MallocSizeHackUsableSize() {
  settings.mac11_malloc_size_hack_enabled_ = true;

  // Request of 32B will fall into a 48B bucket in the presence of BRP
  // in-slot metadata, yielding |48 - in_slot_metadata_size| of actual usable
  // space.
  PA_DCHECK(settings.in_slot_metadata_size);
  settings.mac11_malloc_size_hack_usable_size_ =
      48 - settings.in_slot_metadata_size;
}

void PartitionRoot::EnableMac11MallocSizeHackForTesting() {
  InitMac11MallocSizeHackUsableSize();
}

void PartitionRoot::EnableMac11MallocSizeHackIfNeeded() {
  PA_DCHECK(settings.brp_enabled_);
  if (internal::base::mac::MacOSMajorVersion() == 11) {
    InitMac11MallocSizeHackUsableSize();
  }
}
#endif  // PA_CONFIG(MAYBE_ENABLE_MAC11_MALLOC_SIZE_HACK)

#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
    !PA_BUILDFLAG(HAS_64_BIT_POINTERS)
namespace {
std::atomic<bool> g_reserve_brp_guard_region_called;
// An address constructed by repeating `kQuarantinedByte` shouldn't never point
// to valid memory. Preemptively reserve a memory region around that address and
// make it inaccessible. Not needed for 64-bit platforms where the address is
// guaranteed to be non-canonical. Safe to call multiple times.
void ReserveBackupRefPtrGuardRegionIfNeeded() {
  bool expected = false;
  // No need to block execution for potential concurrent initialization, merely
  // want to make sure this is only called once.
  if (!g_reserve_brp_guard_region_called.compare_exchange_strong(expected,
                                                                 true)) {
    return;
  }

  size_t alignment = internal::PageAllocationGranularity();
  uintptr_t requested_address;
  memset(&requested_address, internal::kQuarantinedByte,
         sizeof(requested_address));
  requested_address = RoundDownToPageAllocationGranularity(requested_address);

  // Request several pages so that even unreasonably large C++ objects stay
  // within the inaccessible region. If some of the pages can't be reserved,
  // it's still preferable to try and reserve the rest.
  for (size_t i = 0; i < 4; ++i) {
    [[maybe_unused]] uintptr_t allocated_address =
        AllocPages(requested_address, alignment, alignment,
                   PageAccessibilityConfiguration(
                       PageAccessibilityConfiguration::kInaccessible),
                   PageTag::kPartitionAlloc);
    requested_address += alignment;
  }
}
}  // namespace
#endif  // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) &&
        // !PA_BUILDFLAG(HAS_64_BIT_POINTERS)

void PartitionRoot::Init(PartitionOptions opts) {}

PartitionRoot::Settings::Settings() = default;

PartitionRoot::PartitionRoot() :{}

PartitionRoot::PartitionRoot(PartitionOptions opts)
    :{}

PartitionRoot::~PartitionRoot() {}

void PartitionRoot::EnableThreadCacheIfSupported() {}

bool PartitionRoot::TryReallocInPlaceForDirectMap(
    internal::SlotSpanMetadata* slot_span,
    size_t requested_size) {}

bool PartitionRoot::TryReallocInPlaceForNormalBuckets(
    void* object,
    SlotSpanMetadata* slot_span,
    size_t new_size) {}

void PartitionRoot::PurgeMemory(int flags) {}

void PartitionRoot::ShrinkEmptySlotSpansRing(size_t limit) {}

void PartitionRoot::DumpStats(const char* partition_name,
                              bool is_light_dump,
                              PartitionStatsDumper* dumper) {}

// static
void PartitionRoot::DeleteForTesting(PartitionRoot* partition_root) {}

void PartitionRoot::ResetForTesting(bool allow_leaks) {}

void PartitionRoot::ResetBookkeepingForTesting() {}

void PartitionRoot::SetGlobalEmptySlotSpanRingIndexForTesting(int16_t index) {}

ThreadCache* PartitionRoot::MaybeInitThreadCache() {}

// static
void PartitionRoot::SetStraightenLargerSlotSpanFreeListsMode(
    StraightenLargerSlotSpanFreeListsMode new_value) {}

// static
void PartitionRoot::SetSortSmallerSlotSpanFreeListsEnabled(bool new_value) {}

// static
void PartitionRoot::SetSortActiveSlotSpansEnabled(bool new_value) {}

#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
PA_NOINLINE void PartitionRoot::QuarantineForBrp(
    const SlotSpanMetadata* slot_span,
    void* object) {}
#endif  // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)

// static
#if PA_CONFIG(ENABLE_SHADOW_METADATA)
void PartitionRoot::EnableShadowMetadata(internal::PoolHandleMask mask) {
  internal::ScopedGuard guard(g_root_enumerator_lock);
  // Must lock all PartitionRoot-s and ThreadCache.
  internal::PartitionRootEnumerator::Instance().Enumerate(
      LockRoot, false,
      internal::PartitionRootEnumerator::EnumerateOrder::kNormal);
  {
    internal::ScopedGuard thread_cache_guard(ThreadCacheRegistry::GetLock());
    internal::PartitionAddressSpace::InitShadowMetadata(mask);
    internal::PartitionRootEnumerator::Instance().Enumerate(
        MakeSuperPageExtentEntriesShared, mask,
        internal::PartitionRootEnumerator::EnumerateOrder::kNormal);
  }
  internal::PartitionRootEnumerator::Instance().Enumerate(
      UnlockOrReinitRoot, false,
      internal::PartitionRootEnumerator::EnumerateOrder::kReverse);
}
#endif  // PA_CONFIG(ENABLE_SHADOW_METADATA)

// Explicitly define common template instantiations to reduce compile time.
#define EXPORT_TEMPLATE
EXPORT_TEMPLATE void* PartitionRoot::Alloc<AllocFlags::kNone>(size_t,
                                                              const char*);
EXPORT_TEMPLATE void* PartitionRoot::Alloc<AllocFlags::kReturnNull>(
    size_t,
    const char*);
EXPORT_TEMPLATE void*
PartitionRoot::Realloc<AllocFlags::kNone, FreeFlags::kNone>(void*,
                                                            size_t,
                                                            const char*);
EXPORT_TEMPLATE void*
PartitionRoot::Realloc<AllocFlags::kReturnNull, FreeFlags::kNone>(void*,
                                                                  size_t,
                                                                  const char*);
EXPORT_TEMPLATE void* PartitionRoot::AlignedAlloc<AllocFlags::kNone>(size_t,
                                                                     size_t);
#undef EXPORT_TEMPLATE

// TODO(crbug.com/40940915) Stop ignoring the -Winvalid-offsetof warning.
#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Winvalid-offsetof"
#endif
static_assert;

static_assert;
#if defined(__clang__)
#pragma clang diagnostic pop
#endif

}  // namespace partition_alloc