chromium/base/allocator/partition_allocator/src/partition_alloc/address_pool_manager.cc

// Copyright 2020 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "partition_alloc/address_pool_manager.h"

#include <algorithm>
#include <atomic>
#include <cstdint>
#include <limits>

#include "partition_alloc/address_space_stats.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/page_allocator.h"
#include "partition_alloc/page_allocator_constants.h"
#include "partition_alloc/partition_alloc_base/notreached.h"
#include "partition_alloc/partition_alloc_check.h"
#include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/reservation_offset_table.h"
#include "partition_alloc/thread_isolation/alignment.h"

#if PA_BUILDFLAG(IS_APPLE) || PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
#include <sys/mman.h>
#endif

namespace partition_alloc::internal {

AddressPoolManager AddressPoolManager::singleton_;

// static
AddressPoolManager& AddressPoolManager::GetInstance() {}

namespace {
// Allocations are all performed on behalf of PartitionAlloc.
constexpr PageTag kPageTag =;

}  // namespace

#if PA_BUILDFLAG(HAS_64_BIT_POINTERS)

namespace {

// This will crash if the range cannot be decommitted.
void DecommitPages(uintptr_t address, size_t size) {}

}  // namespace

void AddressPoolManager::Add(pool_handle handle, uintptr_t ptr, size_t length) {}

void AddressPoolManager::GetPoolUsedSuperPages(
    pool_handle handle,
    std::bitset<kMaxSuperPagesInPool>& used) {}

uintptr_t AddressPoolManager::GetPoolBaseAddress(pool_handle handle) {}

void AddressPoolManager::ResetForTesting() {}

void AddressPoolManager::Remove(pool_handle handle) {}

uintptr_t AddressPoolManager::Reserve(pool_handle handle,
                                      uintptr_t requested_address,
                                      size_t length) {}

void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
                                              uintptr_t address,
                                              size_t length) {}

void AddressPoolManager::Pool::Initialize(uintptr_t ptr, size_t length) {}

bool AddressPoolManager::Pool::IsInitialized() {}

void AddressPoolManager::Pool::Reset() {}

void AddressPoolManager::Pool::GetUsedSuperPages(
    std::bitset<kMaxSuperPagesInPool>& used) {}

uintptr_t AddressPoolManager::Pool::GetBaseAddress() {}

uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {}

bool AddressPoolManager::Pool::TryReserveChunk(uintptr_t address,
                                               size_t requested_size) {}

void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) {}

void AddressPoolManager::Pool::GetStats(PoolStats* stats) {}

void AddressPoolManager::GetPoolStats(const pool_handle handle,
                                      PoolStats* stats) {}

bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {}

#else  // PA_BUILDFLAG(HAS_64_BIT_POINTERS)

static_assert(
    kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
        0,
    "kSuperPageSize must be a multiple of kBytesPer1BitOfBRPPoolBitmap.");
static_assert(
    kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap > 0,
    "kSuperPageSize must be larger than kBytesPer1BitOfBRPPoolBitmap.");
static_assert(AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap >=
                  AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
              "kGuardBitsOfBRPPoolBitmap must be larger than or equal to "
              "kGuardOffsetOfBRPPoolBitmap.");

template <size_t bitsize>
void SetBitmap(std::bitset<bitsize>& bitmap,
               size_t start_bit,
               size_t bit_length) {
  const size_t end_bit = start_bit + bit_length;
  PA_DCHECK(start_bit <= bitsize);
  PA_DCHECK(end_bit <= bitsize);

  for (size_t i = start_bit; i < end_bit; ++i) {
    PA_DCHECK(!bitmap.test(i));
    bitmap.set(i);
  }
}

template <size_t bitsize>
void ResetBitmap(std::bitset<bitsize>& bitmap,
                 size_t start_bit,
                 size_t bit_length) {
  const size_t end_bit = start_bit + bit_length;
  PA_DCHECK(start_bit <= bitsize);
  PA_DCHECK(end_bit <= bitsize);

  for (size_t i = start_bit; i < end_bit; ++i) {
    PA_DCHECK(bitmap.test(i));
    bitmap.reset(i);
  }
}

uintptr_t AddressPoolManager::Reserve(pool_handle handle,
                                      uintptr_t requested_address,
                                      size_t length) {
  PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
  uintptr_t address =
      AllocPages(requested_address, length, kSuperPageSize,
                 PageAccessibilityConfiguration(
                     PageAccessibilityConfiguration::kInaccessible),
                 kPageTag);
  return address;
}

void AddressPoolManager::UnreserveAndDecommit(pool_handle handle,
                                              uintptr_t address,
                                              size_t length) {
  PA_DCHECK(!(address & kSuperPageOffsetMask));
  PA_DCHECK(!(length & DirectMapAllocationGranularityOffsetMask()));
  FreePages(address, length);
}

void AddressPoolManager::MarkUsed(pool_handle handle,
                                  uintptr_t address,
                                  size_t length) {
  ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
  // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
  if (handle == kBRPPoolHandle) {
    PA_DCHECK(
        (length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);

    // Make IsManagedByBRPPoolPool() return false when an address inside the
    // first or the last PartitionPageSize()-bytes block is given:
    //
    //          ------+---+---------------+---+----
    // memory   ..... | B | managed by PA | B | ...
    // regions  ------+---+---------------+---+----
    //
    // B: PartitionPageSize()-bytes block. This is used internally by the
    // allocator and is not available for callers.
    //
    // This is required to avoid crash caused by the following code:
    //   {
    //     // Assume this allocation happens outside of PartitionAlloc.
    //     raw_ptr<T> ptr = new T[20];
    //     for (size_t i = 0; i < 20; i ++) { ptr++; }
    //     // |ptr| may point to an address inside 'B'.
    //   }
    //
    // Suppose that |ptr| points to an address inside B after the loop. If
    // IsManagedByBRPPoolPool(ptr) were to return true, ~raw_ptr<T>() would
    // crash, since the memory is not allocated by PartitionAlloc.
    SetBitmap(AddressPoolManagerBitmap::brp_pool_bits_,
              (address >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) +
                  AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
              (length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
                  AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
  } else
#endif  // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
  {
    PA_DCHECK(handle == kRegularPoolHandle);
    PA_DCHECK(
        (length % AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap) ==
        0);
    SetBitmap(AddressPoolManagerBitmap::regular_pool_bits_,
              address >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap,
              length >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap);
  }
}

void AddressPoolManager::MarkUnused(pool_handle handle,
                                    uintptr_t address,
                                    size_t length) {
  // Address regions allocated for normal buckets are never released, so this
  // function can only be called for direct map. However, do not DCHECK on
  // IsManagedByDirectMap(address), because many tests test this function using
  // small allocations.

  ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
  // When ENABLE_BACKUP_REF_PTR_SUPPORT is off, BRP pool isn't used.
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
  if (handle == kBRPPoolHandle) {
    PA_DCHECK(
        (length % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap) == 0);

    // Make IsManagedByBRPPoolPool() return false when an address inside the
    // first or the last PartitionPageSize()-bytes block is given.
    // (See MarkUsed comment)
    ResetBitmap(
        AddressPoolManagerBitmap::brp_pool_bits_,
        (address >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) +
            AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap,
        (length >> AddressPoolManagerBitmap::kBitShiftOfBRPPoolBitmap) -
            AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap);
  } else
#endif  // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
  {
    PA_DCHECK(handle == kRegularPoolHandle);
    PA_DCHECK(
        (length % AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap) ==
        0);
    ResetBitmap(
        AddressPoolManagerBitmap::regular_pool_bits_,
        address >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap,
        length >> AddressPoolManagerBitmap::kBitShiftOfRegularPoolBitmap);
  }
}

void AddressPoolManager::ResetForTesting() {
  ScopedGuard guard(AddressPoolManagerBitmap::GetLock());
  AddressPoolManagerBitmap::regular_pool_bits_.reset();
  AddressPoolManagerBitmap::brp_pool_bits_.reset();
}

namespace {

// Counts super pages in use represented by `bitmap`.
template <size_t bitsize>
size_t CountUsedSuperPages(const std::bitset<bitsize>& bitmap,
                           const size_t bits_per_super_page) {
  size_t count = 0;
  size_t bit_index = 0;

  // Stride over super pages.
  for (size_t super_page_index = 0; bit_index < bitsize; ++super_page_index) {
    // Stride over the bits comprising the super page.
    for (bit_index = super_page_index * bits_per_super_page;
         bit_index < (super_page_index + 1) * bits_per_super_page &&
         bit_index < bitsize;
         ++bit_index) {
      if (bitmap[bit_index]) {
        count += 1;
        // Move on to the next super page.
        break;
      }
    }
  }
  return count;
}

}  // namespace

bool AddressPoolManager::GetStats(AddressSpaceStats* stats) {
  std::bitset<AddressPoolManagerBitmap::kRegularPoolBits> regular_pool_bits;
  std::bitset<AddressPoolManagerBitmap::kBRPPoolBits> brp_pool_bits;
  {
    ScopedGuard scoped_lock(AddressPoolManagerBitmap::GetLock());
    regular_pool_bits = AddressPoolManagerBitmap::regular_pool_bits_;
    brp_pool_bits = AddressPoolManagerBitmap::brp_pool_bits_;
  }  // scoped_lock

  // Pool usage is read out from the address pool bitmaps.
  // The output stats are sized in super pages, so we interpret
  // the bitmaps into super page usage.
  static_assert(
      kSuperPageSize %
              AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap ==
          0,
      "information loss when calculating metrics");
  constexpr size_t kRegularPoolBitsPerSuperPage =
      kSuperPageSize /
      AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap;

  // Get 32-bit pool usage.
  stats->regular_pool_stats.usage =
      CountUsedSuperPages(regular_pool_bits, kRegularPoolBitsPerSuperPage);
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
  static_assert(
      kSuperPageSize % AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap ==
          0,
      "information loss when calculating metrics");
  constexpr size_t kBRPPoolBitsPerSuperPage =
      kSuperPageSize / AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap;
  stats->brp_pool_stats.usage =
      CountUsedSuperPages(brp_pool_bits, kBRPPoolBitsPerSuperPage);

  // Get blocklist size.
  for (const auto& blocked :
       AddressPoolManagerBitmap::brp_forbidden_super_page_map_) {
    if (blocked.load(std::memory_order_relaxed)) {
      stats->blocklist_size += 1;
    }
  }

  // Count failures in finding non-blocklisted addresses.
  stats->blocklist_hit_count =
      AddressPoolManagerBitmap::blocklist_hit_count_.load(
          std::memory_order_relaxed);
#endif  // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
  return true;
}

#endif  // PA_BUILDFLAG(HAS_64_BIT_POINTERS)

void AddressPoolManager::DumpStats(AddressSpaceStatsDumper* dumper) {}

#if PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)
// This function just exists to static_assert the layout of the private fields
// in Pool. It is never called.
void AddressPoolManager::AssertThreadIsolatedLayout() {}
#endif  // PA_BUILDFLAG(ENABLE_THREAD_ISOLATION)

}  // namespace partition_alloc::internal