chromium/base/allocator/partition_allocator/src/partition_alloc/partition_alloc_unittest.cc

// Copyright 2013 The Chromium Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)

#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <limits>
#include <memory>
#include <random>
#include <set>
#include <tuple>
#include <vector>

#include "partition_alloc/address_space_randomization.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/dangling_raw_ptr_checks.h"
#include "partition_alloc/freeslot_bitmap.h"
#include "partition_alloc/in_slot_metadata.h"
#include "partition_alloc/lightweight_quarantine.h"
#include "partition_alloc/memory_reclaimer.h"
#include "partition_alloc/page_allocator_constants.h"
#include "partition_alloc/partition_address_space.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_base/compiler_specific.h"
#include "partition_alloc/partition_alloc_base/cpu.h"
#include "partition_alloc/partition_alloc_base/logging.h"
#include "partition_alloc/partition_alloc_base/numerics/checked_math.h"
#include "partition_alloc/partition_alloc_base/rand_util.h"
#include "partition_alloc/partition_alloc_base/system/sys_info.h"
#include "partition_alloc/partition_alloc_base/test/gtest_util.h"
#include "partition_alloc/partition_alloc_base/thread_annotations.h"
#include "partition_alloc/partition_alloc_base/threading/platform_thread_for_testing.h"
#include "partition_alloc/partition_alloc_config.h"
#include "partition_alloc/partition_alloc_constants.h"
#include "partition_alloc/partition_alloc_for_testing.h"
#include "partition_alloc/partition_alloc_forward.h"
#include "partition_alloc/partition_bucket.h"
#include "partition_alloc/partition_cookie.h"
#include "partition_alloc/partition_freelist_entry.h"
#include "partition_alloc/partition_page.h"
#include "partition_alloc/partition_root.h"
#include "partition_alloc/partition_stats.h"
#include "partition_alloc/reservation_offset_table.h"
#include "partition_alloc/tagging.h"
#include "partition_alloc/thread_isolation/thread_isolation.h"
#include "partition_alloc/use_death_tests.h"
#include "testing/gtest/include/gtest/gtest.h"

#if defined(__ARM_FEATURE_MEMORY_TAGGING)
#include <arm_acle.h>
#endif

#if PA_BUILDFLAG(IS_POSIX)
#if PA_BUILDFLAG(IS_LINUX)
// We need PKEY_DISABLE_WRITE in this file; glibc defines it in sys/mman.h but
// it's actually Linux-specific and other Linux libcs define it in linux/mman.h.
// We have to include both to be sure we get the definition.
#include <linux/mman.h>
#endif  // PA_BUILDFLAG(IS_LINUX)
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/time.h>
#endif  // PA_BUILDFLAG(IS_POSIX)

#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && PA_BUILDFLAG(IS_MAC)
#include <OpenCL/opencl.h>
#endif

#if PA_BUILDFLAG(IS_MAC)
#include "partition_alloc/partition_alloc_base/mac/mac_util.h"
#endif

#if PA_BUILDFLAG(ENABLE_PKEYS)
#include <sys/syscall.h>
#endif

// Headers for the AmountOfPhysicalMemory() function.
#if PA_BUILDFLAG(IS_FUCHSIA)
#include <zircon/syscalls.h>
#elif PA_BUILDFLAG(IS_WIN)
#include <windows.h>
#elif PA_BUILDFLAG(IS_APPLE)
#include <mach/host_info.h>
#include <mach/mach.h>
#elif PA_BUILDFLAG(IS_POSIX)
#include <unistd.h>
#endif

// In the MTE world, the upper bits of a pointer can be decorated with a tag,
// thus allowing many versions of the same pointer to exist. These macros take
// that into account when comparing.
#define PA_EXPECT_PTR_EQ(ptr1, ptr2)
#define PA_EXPECT_PTR_NE(ptr1, ptr2)

namespace {

// Best effort to get the amount of physical memory available to the system.
// Returns 0 on failure.
uint64_t AmountOfPhysicalMemory() {}

bool IsLargeMemoryDevice() {}

bool SetAddressSpaceLimit() {}

bool ClearAddressSpaceLimit() {}

const size_t kTestSizes[] =;
constexpr size_t kTestSizesCount =;

template <
    partition_alloc::AllocFlags alloc_flags,
    partition_alloc::FreeFlags free_flags = partition_alloc::FreeFlags::kNone>
void AllocateRandomly(partition_alloc::PartitionRoot* root, size_t count) {}

void HandleOOM(size_t unused_size) {}

int g_dangling_raw_ptr_detected_count =;
int g_dangling_raw_ptr_released_count =;

class CountDanglingRawPtr {};

}  // namespace

// Note: This test exercises interfaces inside the `partition_alloc`
// namespace, but inspects objects inside `partition_alloc::internal`.
// For ease of reading, the tests are placed into the latter namespace.
namespace partition_alloc::internal {

BucketDistribution;
SlotSpan;

const size_t kTestAllocSize =;

constexpr size_t kPointerOffset =;
#if !PA_BUILDFLAG(DCHECKS_ARE_ON)
constexpr size_t kExtraAllocSizeWithoutMetadata = 0ull;
#else
constexpr size_t kExtraAllocSizeWithoutMetadata =;
#endif

const char* type_name =;

void SetDistributionForPartitionRoot(PartitionRoot* root,
                                     BucketDistribution distribution) {}

struct PartitionAllocTestParam {};

const std::vector<PartitionAllocTestParam> GetPartitionAllocTestParams() {}

class PartitionAllocTest
    : public testing::TestWithParam<PartitionAllocTestParam> {};

#if PA_USE_DEATH_TESTS()

class PartitionAllocDeathTest : public PartitionAllocTest {};

INSTANTIATE_TEST_SUITE_P();

#endif

namespace {

void FreeFullSlotSpan(PartitionRoot* root, SlotSpanMetadata* slot_span) {}

#if PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)
bool CheckPageInCore(void* ptr, bool in_core) {}

#define CHECK_PAGE_IN_CORE(ptr, in_core)
#else
#define CHECK_PAGE_IN_CORE
#endif  // PA_BUILDFLAG(IS_LINUX) || PA_BUILDFLAG(IS_CHROMEOS)

class MockPartitionStatsDumper : public PartitionStatsDumper {};

#if PA_BUILDFLAG(IS_APPLE)
// After deallocating the memory, another thread may allocate memory whose
// address region overlaps the deallocated memory's. This sometimes happen
// when running the test on apple os with PartitionAlloc-Everywhere.
// So if `IsManagedByNormalBuckets(address_to_check)` returns true, we
// will also check whether `allocator.root()` allocated the memory or not.
// Regarding IsManagedByDirectMap(), this rarely happens because of allocation
// size. But we should also check who allocates the memory.
bool IsNormalBucketsAllocatedByRoot(uintptr_t address, PartitionRoot* root) {
  partition_alloc::internal::PartitionSuperPageExtentEntry<
      partition_alloc::internal::MetadataKind::kReadOnly>* extent =
      root->first_extent;
  while (extent != nullptr) {
    uintptr_t super_page =
        partition_alloc::internal::SuperPagesBeginFromExtent(extent);
    uintptr_t super_page_end =
        partition_alloc::internal::SuperPagesEndFromExtent(extent);
    if (super_page <= address && address < super_page_end) {
      return true;
    }
    extent = extent->next;
  }
  return false;
}

bool IsDirectMapAllocatedByRoot(uintptr_t address, PartitionRoot* root) {
  ::partition_alloc::internal::ScopedGuard locker{
      partition_alloc::internal::PartitionRootLock(root)};

  partition_alloc::internal::PartitionDirectMapExtent<
      partition_alloc::internal::MetadataKind::kReadOnly>* extent =
      root->direct_map_list;
  while (extent != nullptr) {
    uintptr_t super_page =
        reinterpret_cast<uintptr_t>(extent) & kSuperPageBaseMask;
    uintptr_t super_page_end = super_page + extent->reservation_size;
    if (super_page <= address && address < super_page_end) {
      return true;
    }
    extent = extent->next_extent;
  }
  return false;
}
#endif  // PA_BUILDFLAG(IS_APPLE)

bool IsManagedByNormalBucketsForTesting(uintptr_t address,
                                        [[maybe_unused]] PartitionRoot* root) {}

bool IsManagedByDirectMapForTesting(uintptr_t address,
                                    [[maybe_unused]] PartitionRoot* root) {}

bool IsManagedByNormalBucketsOrDirectMapForTesting(
    uintptr_t address,
    [[maybe_unused]] PartitionRoot* root) {}

}  // namespace

INSTANTIATE_TEST_SUITE_P();

// Check that the most basic of allocate / free pairs work.
TEST_P(PartitionAllocTest, Basic) {}

// Test multiple allocations, and freelist handling.
TEST_P(PartitionAllocTest, MultiAlloc) {}

// Test a bucket with multiple slot spans.
TEST_P(PartitionAllocTest, MultiSlotSpans) {}

// Test some finer aspects of internal slot span transitions.
TEST_P(PartitionAllocTest, SlotSpanTransitions) {}

// Test that ExtraAllocSize() is exactly what PA takes away from the slot for
// extras.
TEST_P(PartitionAllocTest, ExtraAllocSize) {}

TEST_P(PartitionAllocTest, PreferSlotSpansWithProvisionedEntries) {}

// Test some corner cases relating to slot span transitions in the internal
// free slot span list metadata bucket.
TEST_P(PartitionAllocTest, FreeSlotSpanListSlotSpanTransitions) {}

// Test a large series of allocations that cross more than one underlying
// super page.
TEST_P(PartitionAllocTest, MultiPageAllocs) {}

// Test the generic allocation functions that can handle arbitrary sizes and
// reallocing etc.
TEST_P(PartitionAllocTest, Alloc) {}

// Test the generic allocation functions can handle some specific sizes of
// interest.
TEST_P(PartitionAllocTest, AllocSizes) {}

// Test that we can fetch the real allocated size after an allocation.
TEST_P(PartitionAllocTest, AllocGetSizeAndStart) {}

#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
TEST_P(PartitionAllocTest, MTEProtectsFreedPtr) {
  // This test checks that Arm's memory tagging extension (MTE) is correctly
  // protecting freed pointers.
  base::CPU cpu;
  if (!cpu.has_mte()) {
    // This test won't pass without MTE support.
    GTEST_SKIP();
  }

  // Create an arbitrarily-sized small allocation.
  size_t alloc_size = 64 - ExtraAllocSize(allocator);
  uint64_t* ptr1 =
      static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
  EXPECT_TRUE(ptr1);

  // Invalidate the pointer by freeing it.
  allocator.root()->Free(ptr1);

  // When we immediately reallocate a pointer, we should see the same allocation
  // slot but with a different tag (PA_EXPECT_PTR_EQ ignores the MTE tag).
  uint64_t* ptr2 =
      static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
  PA_EXPECT_PTR_EQ(ptr1, ptr2);
  // The different tag bits mean that ptr1 is not the same as ptr2.
  EXPECT_NE(ptr1, ptr2);

  // When we free again, we expect a new tag for that area that's different from
  // ptr1 and ptr2.
  allocator.root()->Free(ptr2);
  uint64_t* ptr3 =
      static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
  PA_EXPECT_PTR_EQ(ptr2, ptr3);
  EXPECT_NE(ptr1, ptr3);
  EXPECT_NE(ptr2, ptr3);

  // We don't check anything about ptr3, but we do clean it up to avoid DCHECKs.
  allocator.root()->Free(ptr3);
}
#endif  // PA_BUILDFLAG(HAS_MEMORY_TAGGING)

#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
TEST_P(PartitionAllocTest, IsPtrWithinSameAlloc) {}

TEST_P(PartitionAllocTest, GetSlotStartMultiplePages) {}
#endif  // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)

// Test the realloc() contract.
TEST_P(PartitionAllocTest, Realloc) {}

TEST_P(PartitionAllocTest, ReallocDirectMapAligned) {}

TEST_P(PartitionAllocTest, ReallocDirectMapAlignedRelocate) {}

// Tests the handing out of freelists for partial slot spans.
TEST_P(PartitionAllocTest, PartialPageFreelists) {}

// Test some of the fragmentation-resistant properties of the allocator.
TEST_P(PartitionAllocTest, SlotSpanRefilling) {}

// Basic tests to ensure that allocations work for partial page buckets.
TEST_P(PartitionAllocTest, PartialPages) {}

// Test correct handling if our mapping collides with another.
TEST_P(PartitionAllocTest, MappingCollision) {}

// Tests that slot spans in the free slot span cache do get freed as
// appropriate.
TEST_P(PartitionAllocTest, FreeCache) {}

// Tests for a bug we had with losing references to free slot spans.
TEST_P(PartitionAllocTest, LostFreeSlotSpansBug) {}

#if PA_USE_DEATH_TESTS()

// Unit tests that check if an allocation fails in "return null" mode,
// repeating it doesn't crash, and still returns null. The tests need to
// stress memory subsystem limits to do so, hence they try to allocate
// 6 GB of memory, each with a different per-allocation block sizes.
//
// On 64-bit systems we need to restrict the address space to force allocation
// failure, so these tests run only on POSIX systems that provide setrlimit(),
// and use it to limit address space to 6GB.
//
// Disable these tests on Android because, due to the allocation-heavy behavior,
// they tend to get OOM-killed rather than pass.
//
// Disable these test on Windows, since they run slower, so tend to timout and
// cause flake.
#if !PA_BUILDFLAG(IS_WIN) &&                                         \
        (!PA_BUILDFLAG(PA_ARCH_CPU_64_BITS) ||                       \
         (PA_BUILDFLAG(IS_POSIX) &&                                  \
          !(PA_BUILDFLAG(IS_APPLE) || PA_BUILDFLAG(IS_ANDROID)))) || \
    PA_BUILDFLAG(IS_FUCHSIA)
#define MAYBE_RepeatedAllocReturnNullDirect
#define MAYBE_RepeatedReallocReturnNullDirect
#else
#define MAYBE_RepeatedAllocReturnNullDirect
#define MAYBE_RepeatedReallocReturnNullDirect
#endif

// The following four tests wrap a called function in an expect death statement
// to perform their test, because they are non-hermetic. Specifically they are
// going to attempt to exhaust the allocatable memory, which leaves the
// allocator in a bad global state.
// Performing them as death tests causes them to be forked into their own
// process, so they won't pollute other tests.
//
// These tests are *very* slow when PA_BUILDFLAG(DCHECKS_ARE_ON), because they
// memset() many GiB of data (see crbug.com/1168168).
// TODO(lizeb): make these tests faster.
TEST_P(PartitionAllocDeathTest, MAYBE_RepeatedAllocReturnNullDirect) {}

// Repeating above test with Realloc
TEST_P(PartitionAllocDeathTest, MAYBE_RepeatedReallocReturnNullDirect) {}

// TODO(crbug.com/40855174) re-enable the tests below, once the allocator
// actually returns nullptr for non direct-mapped allocations.
// When doing so, they will need to be made MAYBE_ like those above.
//
// Tests "return null" with a 512 kB block size.
TEST_P(PartitionAllocDeathTest, DISABLED_RepeatedAllocReturnNull) {}

// Repeating above test with Realloc.
TEST_P(PartitionAllocDeathTest, DISABLED_RepeatedReallocReturnNull) {}

#if PA_BUILDFLAG(HAS_MEMORY_TAGGING)
// Check that Arm's memory tagging extension (MTE) is correctly protecting
// freed pointers. Writes to a free pointer should result in a crash.
TEST_P(PartitionAllocDeathTest, MTEProtectsFreedPtr) {
  base::CPU cpu;
  if (!cpu.has_mte()) {
    // This test won't pass on systems without MTE.
    GTEST_SKIP();
  }

  constexpr uint64_t kCookie = 0x1234567890ABCDEF;
  constexpr uint64_t kQuarantined = 0xEFEFEFEFEFEFEFEF;

  // Make an arbitrary-sized small allocation.
  size_t alloc_size = 64 - ExtraAllocSize(allocator);
  uint64_t* ptr =
      static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
  EXPECT_TRUE(ptr);

  // Check that the allocation's writable.
  *ptr = kCookie;

  // Invalidate ptr by freeing it.
  allocator.root()->Free(ptr);

  // Writing to ptr after free() should crash
  EXPECT_EXIT(
      {
        // Should be in synchronous MTE mode for running this test.
        *ptr = kQuarantined;
      },
      testing::KilledBySignal(SIGSEGV), "");
}

// Check that accessing freed memory will not trigger a crash in
// SuspendTagCheckingScope.
TEST_P(PartitionAllocDeathTest, SuspendTagCheckingScope) {
  base::CPU cpu;
  if (!cpu.has_mte()) {
    // This test won't pass on systems without MTE.
    GTEST_SKIP();
  }

  constexpr uint64_t kQuarantined = 0xEFEFEFEFEFEFEFEF;

  // Make an arbitrary-sized small allocation.
  size_t alloc_size = 64 - ExtraAllocSize(allocator);
  uint64_t* ptr =
      static_cast<uint64_t*>(allocator.root()->Alloc(alloc_size, type_name));
  EXPECT_TRUE(ptr);

  // Invalidate ptr by freeing it.
  allocator.root()->Free(ptr);

  // Writing to ptr after free() should usually crash but not in
  // |SuspendTagCheckingScope|.
  {
    partition_alloc::SuspendTagCheckingScope scope;
    *ptr = kQuarantined;
  }
  // Check that access after the scope will crash.
  EXPECT_EXIT(
      {
        // Should be in synchronous MTE mode for running this test.
        *ptr = kQuarantined;
      },
      testing::KilledBySignal(SIGSEGV), "");
}
#endif  // PA_BUILDFLAG(HAS_MEMORY_TAGGING)

// Make sure that malloc(-1) dies.
// In the past, we had an integer overflow that would alias malloc(-1) to
// malloc(0), which is not good.
TEST_P(PartitionAllocDeathTest, LargeAllocs) {}

// These tests don't work deterministically when BRP is enabled on certain
// architectures. On Free(), BRP's ref-count inside in-slot metadata gets
// overwritten by an encoded freelist pointer. On little-endian 64-bit
// architectures, this happens to be always an even number, which will trigger
// BRP's own CHECK (sic!). On other architectures, it's likely to be an odd
// number >1, which will fool BRP into thinking the memory isn't freed and still
// referenced, thus making it quarantine it and return early, before
// PA_CHECK(slot_start != freelist_head) is reached.
#if !PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
    (PA_BUILDFLAG(HAS_64_BIT_POINTERS) &&           \
     PA_BUILDFLAG(PA_ARCH_CPU_LITTLE_ENDIAN))

// Check that our immediate double-free detection works.
TEST_P(PartitionAllocDeathTest, ImmediateDoubleFree) {}

// As above, but when this isn't the only slot in the span.
TEST_P(PartitionAllocDeathTest, ImmediateDoubleFree2ndSlot) {}

// Check that our double-free detection based on |num_allocated_slots| not going
// below 0 works.
//
// Unlike in ImmediateDoubleFree test, we can't have a 2ndSlot version, as this
// protection wouldn't work when there is another slot present in the span. It
// will prevent |num_allocated_slots| from going below 0.
TEST_P(PartitionAllocDeathTest, NumAllocatedSlotsDoubleFree) {}

#endif  // !PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) || \
        // (PA_BUILDFLAG(HAS_64_BIT_POINTERS) && PA_BUILDFLAG(PA_ARCH_CPU_LITTLE_ENDIAN))

// Check that guard pages are present where expected.
TEST_P(PartitionAllocDeathTest, DirectMapGuardPages) {}

// These tests rely on precise layout. They handle cookie, not in-slot metadata.
#if !PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) && \
    PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)

TEST_P(PartitionAllocDeathTest, UseAfterFreeDetection) {
  base::CPU cpu;
  void* data = allocator.root()->Alloc(100);
  allocator.root()->Free(data);

  // use after free, not crashing here, but the next allocation should crash,
  // since we corrupted the freelist.
  memset(data, 0x42, 100);
  EXPECT_DEATH(allocator.root()->Alloc(100), "");
}

TEST_P(PartitionAllocDeathTest, FreelistCorruption) {
  base::CPU cpu;
  const size_t alloc_size = 2 * sizeof(void*);
  void** fake_freelist_entry =
      static_cast<void**>(allocator.root()->Alloc(alloc_size));
  fake_freelist_entry[0] = nullptr;
  fake_freelist_entry[1] = nullptr;

  void** uaf_data = static_cast<void**>(allocator.root()->Alloc(alloc_size));
  allocator.root()->Free(uaf_data);
  // Try to confuse the allocator. This is still easy to circumvent willingly,
  // "just" need to set uaf_data[1] to ~uaf_data[0].
  void* previous_uaf_data = uaf_data[0];
  uaf_data[0] = fake_freelist_entry;
  EXPECT_DEATH(allocator.root()->Alloc(alloc_size), "");

  // Restore the freelist entry value, otherwise freelist corruption is detected
  // in TearDown(), crashing this process.
  uaf_data[0] = previous_uaf_data;

  allocator.root()->Free(fake_freelist_entry);
}

// With PA_BUILDFLAG(DCHECKS_ARE_ON), cookie already handles off-by-one
// detection.
#if !PA_BUILDFLAG(DCHECKS_ARE_ON)
TEST_P(PartitionAllocDeathTest, OffByOneDetection) {
  base::CPU cpu;
  const size_t alloc_size = 2 * sizeof(void*);
  char* array = static_cast<char*>(allocator.root()->Alloc(alloc_size));
  if (cpu.has_mte()) {
    EXPECT_DEATH(array[alloc_size] = 'A', "");
  } else {
    char previous_value = array[alloc_size];
    // volatile is required to prevent the compiler from getting too clever and
    // eliding the out-of-bounds write. The root cause is that the PA_MALLOC_FN
    // annotation tells the compiler (among other things) that the returned
    // value cannot alias anything.
    *const_cast<volatile char*>(&array[alloc_size]) = 'A';
    // Crash at the next allocation. This assumes that we are touching a new,
    // non-randomized slot span, where the next slot to be handed over to the
    // application directly follows the current one.
    EXPECT_DEATH(allocator.root()->Alloc(alloc_size), "");

    // Restore integrity, otherwise the process will crash in TearDown().
    array[alloc_size] = previous_value;
  }
}

TEST_P(PartitionAllocDeathTest, OffByOneDetectionWithRealisticData) {
  base::CPU cpu;
  const size_t alloc_size = 2 * sizeof(void*);
  void** array = static_cast<void**>(allocator.root()->Alloc(alloc_size));
  char valid;
  if (cpu.has_mte()) {
    EXPECT_DEATH(array[2] = &valid, "");
  } else {
    void* previous_value = array[2];
    // As above, needs volatile to convince the compiler to perform the write.
    *const_cast<void* volatile*>(&array[2]) = &valid;
    // Crash at the next allocation. This assumes that we are touching a new,
    // non-randomized slot span, where the next slot to be handed over to the
    // application directly follows the current one.
    EXPECT_DEATH(allocator.root()->Alloc(alloc_size), "");
    array[2] = previous_value;
  }
}
#endif  // !PA_BUILDFLAG(DCHECKS_ARE_ON)

#endif  // !PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) &&
        // PA_CONFIG(HAS_FREELIST_SHADOW_ENTRY)

#endif  // PA_USE_DEATH_TESTS()

// Tests that |PartitionDumpStats| and |PartitionDumpStats| run without
// crashing and return non-zero values when memory is allocated.
TEST_P(PartitionAllocTest, DumpMemoryStats) {}

// Tests the API to purge freeable memory.
TEST_P(PartitionAllocTest, Purge) {}

// Tests that we prefer to allocate into a non-empty partition page over an
// empty one. This is an important aspect of minimizing memory usage for some
// allocation sizes, particularly larger ones.
TEST_P(PartitionAllocTest, PreferActiveOverEmpty) {}

// Tests the API to purge discardable memory.
TEST_P(PartitionAllocTest, PurgeDiscardableSecondPage) {}

TEST_P(PartitionAllocTest, PurgeDiscardableFirstPage) {}

TEST_P(PartitionAllocTest, PurgeDiscardableNonPageSizedAlloc) {}

TEST_P(PartitionAllocTest, PurgeDiscardableNonPageSizedAllocOnSlotBoundary) {}

TEST_P(PartitionAllocTest, PurgeDiscardableManyPages) {}

TEST_P(PartitionAllocTest, PurgeDiscardableWithFreeListStraightening) {}

TEST_P(PartitionAllocTest, PurgeDiscardableDoubleTruncateFreeList) {}

TEST_P(PartitionAllocTest, PurgeDiscardableSmallSlotsWithTruncate) {}

TEST_P(PartitionAllocTest, ActiveListMaintenance) {}

TEST_P(PartitionAllocTest, ReallocMovesCookie) {}

TEST_P(PartitionAllocTest, SmallReallocDoesNotMoveTrailingCookie) {}

TEST_P(PartitionAllocTest, ZeroFill) {}

TEST_P(PartitionAllocTest, SchedulerLoopQuarantine) {}

// Ensures `Free<kSchedulerLoopQuarantine>` works as `Free<kNone>` if disabled.
// See: https://crbug.com/324994233.
TEST_P(PartitionAllocTest, SchedulerLoopQuarantineDisabled) {}

TEST_P(PartitionAllocTest, ZapOnFree) {}

TEST_P(PartitionAllocTest, Bug_897585) {}

TEST_P(PartitionAllocTest, OverrideHooks) {}

TEST_P(PartitionAllocTest, Alignment) {}

TEST_P(PartitionAllocTest, FundamentalAlignment) {}

void VerifyAlignment(PartitionRoot* root, size_t size, size_t alignment) {}

TEST_P(PartitionAllocTest, AlignedAllocations) {}

// Test that the optimized `GetSlotNumber` implementation produces valid
// results.
TEST_P(PartitionAllocTest, OptimizedGetSlotNumber) {}

TEST_P(PartitionAllocTest, GetUsableSizeNull) {}

TEST_P(PartitionAllocTest, GetUsableSize) {}

#if PA_CONFIG(MAYBE_ENABLE_MAC11_MALLOC_SIZE_HACK)
TEST_P(PartitionAllocTest, GetUsableSizeWithMac11MallocSizeHack) {
  if (internal::base::mac::MacOSMajorVersion() != 11) {
    GTEST_SKIP() << "Skipping because the test is for Mac11.";
  }

  allocator.root()->EnableMac11MallocSizeHackForTesting();
  size_t size = internal::kMac11MallocSizeHackRequestedSize;
  void* ptr = allocator.root()->Alloc(size);
  size_t usable_size = PartitionRoot::GetUsableSize(ptr);
  size_t usable_size_with_hack =
      PartitionRoot::GetUsableSizeWithMac11MallocSizeHack(ptr);
  EXPECT_EQ(usable_size,
            allocator.root()->settings.mac11_malloc_size_hack_usable_size_);
  EXPECT_EQ(usable_size_with_hack, size);

  allocator.root()->Free(ptr);
}
#endif  // PA_CONFIG(MAYBE_ENABLE_MAC11_MALLOC_SIZE_HACK)

TEST_P(PartitionAllocTest, Bookkeeping) {}

#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)

TEST_P(PartitionAllocTest, RefCountBasic) {}

void PartitionAllocTest::RunRefCountReallocSubtest(size_t orig_size,
                                                   size_t new_size) {}

TEST_P(PartitionAllocTest, RefCountRealloc) {}

int g_unretained_dangling_raw_ptr_detected_count =;

class UnretainedDanglingRawPtrTest : public PartitionAllocTest {};

INSTANTIATE_TEST_SUITE_P();

TEST_P(UnretainedDanglingRawPtrTest, UnretainedDanglingPtrNoReport) {}

TEST_P(UnretainedDanglingRawPtrTest, UnretainedDanglingPtrShouldReport) {}

#if !PA_BUILDFLAG(HAS_64_BIT_POINTERS)
TEST_P(PartitionAllocTest, BackupRefPtrGuardRegion) {
  if (!UseBRPPool()) {
    return;
  }

  size_t alignment = internal::PageAllocationGranularity();

  uintptr_t requested_address;
  memset(&requested_address, internal::kQuarantinedByte,
         sizeof(requested_address));
  requested_address = RoundDownToPageAllocationGranularity(requested_address);

  uintptr_t allocated_address =
      AllocPages(requested_address, alignment, alignment,
                 PageAccessibilityConfiguration(
                     PageAccessibilityConfiguration::kReadWrite),
                 PageTag::kPartitionAlloc);
  EXPECT_NE(allocated_address, requested_address);

  if (allocated_address) {
    FreePages(allocated_address, alignment);
  }
}
#endif  // !PA_BUILDFLAG(HAS_64_BIT_POINTERS)
#endif  // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)

#if PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)

// Allocate memory, and reference it from 3 raw_ptr. Among them 2 will be
// dangling.
TEST_P(PartitionAllocTest, DanglingPtr) {}

// Allocate memory, and reference it from 3
// raw_ptr<T, DisableDanglingPtrDetection>. Among them 2 will be dangling. This
// doesn't trigger any dangling raw_ptr checks.
TEST_P(PartitionAllocTest, DanglingDanglingPtr) {}

// When 'free' is called, it remain one raw_ptr<> and one
// raw_ptr<T, DisableDanglingPtrDetection>. The raw_ptr<> is released first.
TEST_P(PartitionAllocTest, DanglingMixedReleaseRawPtrFirst) {}

// When 'free' is called, it remain one raw_ptr<> and one
// raw_ptr<T, DisableDanglingPtrDetection>.
// The raw_ptr<T, DisableDanglingPtrDetection> is released first. This
// triggers the dangling raw_ptr<> checks.
TEST_P(PartitionAllocTest, DanglingMixedReleaseDanglingPtrFirst) {}

// When 'free' is called, it remains one
// raw_ptr<T, DisableDanglingPtrDetection>, then it is used to acquire one
// dangling raw_ptr<>. Release the raw_ptr<> first.
TEST_P(PartitionAllocTest, DanglingPtrUsedToAcquireNewRawPtr) {}

// Same as 'DanglingPtrUsedToAcquireNewRawPtr', but release the
// raw_ptr<T, DisableDanglingPtrDetection> before the raw_ptr<>.
TEST_P(PartitionAllocTest, DanglingPtrUsedToAcquireNewRawPtrVariant) {}

// Acquire a raw_ptr<T>, and release it before freeing memory. In the
// background, there is one raw_ptr<T, DisableDanglingPtrDetection>. This
// doesn't trigger any dangling raw_ptr<T> checks.
TEST_P(PartitionAllocTest, RawPtrReleasedBeforeFree) {}

// Similar to `PartitionAllocTest.DanglingPtr`, but using
// `PartitionRoot::Free<FreeFlags::kSchedulerLoopQuarantine>`.
// 1. `PartitionRoot::Free<kSchedulerLoopQuarantine>`
//   - The allocation is owned by Scheduler-Loop Quarantine.
// 2. `InSlotMetadata::Release`
//   - The allocation is still owned by Scheduler-Loop Quarantine.
// 3. The allocation gets purged from Scheduler-Loop Quarantine.
//   - Actual free happens here.
TEST_P(PartitionAllocTest,
       DanglingPtrReleaseBeforeSchedulerLoopQuarantineExit) {}

// Similar to `PartitionAllocTest.DanglingPtr`, but using
// `PartitionRoot::Free<FreeFlags::kSchedulerLoopQuarantine>`.
// 1. `PartitionRoot::Free<kSchedulerLoopQuarantine>`
//   - The allocation is owned by Scheduler-Loop Quarantine.
// 2. The allocation gets purged from Scheduler-Loop Quarantine.
//   - The allocation is now moved to BRP-quarantine.
// 3. `InSlotMetadata::Release`
//   - Actual free happens here.
TEST_P(PartitionAllocTest, DanglingPtrReleaseAfterSchedulerLoopQuarantineExit) {}

#if PA_USE_DEATH_TESTS()
// DCHECK message are stripped in official build. It causes death tests with
// matchers to fail.
#if !defined(OFFICIAL_BUILD) || !defined(NDEBUG)

// Acquire() once, Release() twice => CRASH
TEST_P(PartitionAllocDeathTest, ReleaseUnderflowRawPtr) {}

// AcquireFromUnprotectedPtr() once, ReleaseFromUnprotectedPtr() twice => CRASH
TEST_P(PartitionAllocDeathTest, ReleaseUnderflowDanglingPtr) {}

#endif  //! defined(OFFICIAL_BUILD) || !defined(NDEBUG)
#endif  // PA_USE_DEATH_TESTS()
#endif  // PA_BUILDFLAG(ENABLE_DANGLING_RAW_PTR_CHECKS)

TEST_P(PartitionAllocTest, ReservationOffset) {}

TEST_P(PartitionAllocTest, GetReservationStart) {}

#if PA_BUILDFLAG(IS_FUCHSIA)
// TODO: https://crbug.com/331366007 - re-enable on Fuchsia once bug is fixed.
TEST_P(PartitionAllocTest, DISABLED_CheckReservationType) {
#else
TEST_P(PartitionAllocTest, CheckReservationType) {}

// Test for crash http://crbug.com/1169003.
TEST_P(PartitionAllocTest, CrossPartitionRootRealloc) {}

TEST_P(PartitionAllocTest, FastPathOrReturnNull) {}

#if PA_USE_DEATH_TESTS()
// DCHECK message are stripped in official build. It causes death tests with
// matchers to fail.
#if !defined(OFFICIAL_BUILD) || !defined(NDEBUG)

TEST_P(PartitionAllocDeathTest, CheckTriggered) {}

#endif  // !defined(OFFICIAL_BUILD) && !defined(NDEBUG)
#endif  // PA_USE_DEATH_TESTS()

// Not on chromecast, since gtest considers extra output from itself as a test
// failure:
// https://ci.chromium.org/ui/p/chromium/builders/ci/Cast%20Audio%20Linux/98492/overview
#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && PA_USE_DEATH_TESTS() && \
    !PA_BUILDFLAG(IS_CASTOS)

namespace {

PA_NOINLINE void FreeForTest(void* data) {}

class ThreadDelegateForPreforkHandler
    : public base::PlatformThreadForTesting::Delegate {};

}  // namespace

// Disabled because executing it causes Gtest to show a warning in the output,
// which confuses the runner on some platforms, making the test report an
// "UNKNOWN" status even though it succeeded.
TEST_P(PartitionAllocTest, DISABLED_PreforkHandler) {}

#endif  // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
        // PA_USE_DEATH_TESTS() && !PA_BUILDFLAG(IS_CASTOS)

// Checks the bucket index logic.
TEST_P(PartitionAllocTest, GetIndex) {}

// Used to check alignment. If the compiler understands the annotations, the
// zeroing in the constructor uses aligned SIMD instructions.
TEST_P(PartitionAllocTest, MallocFunctionAnnotations) {}

// Test that the ConfigurablePool works properly.
TEST_P(PartitionAllocTest, ConfigurablePool) {}

TEST_P(PartitionAllocTest, EmptySlotSpanSizeIsCapped) {}

TEST_P(PartitionAllocTest, IncreaseEmptySlotSpanRingSize) {}

#if PA_BUILDFLAG(IS_CAST_ANDROID) && PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
extern "C" {
void* __real_malloc(size_t);
}  // extern "C"

TEST_P(PartitionAllocTest, HandleMixedAllocations) {
  void* ptr = __real_malloc(12);
  // Should not crash, no test assertion.
  free(ptr);
}
#endif

TEST_P(PartitionAllocTest, SortFreelist) {}

#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && PA_BUILDFLAG(IS_LINUX) && \
    PA_BUILDFLAG(PA_ARCH_CPU_64_BITS)
TEST_P(PartitionAllocTest, CrashOnUnknownPointer) {}
#endif  // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
        // PA_BUILDFLAG(IS_LINUX) && PA_BUILDFLAG(PA_ARCH_CPU_64_BITS)

#if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && PA_BUILDFLAG(IS_MAC)

// Adapted from crashpad tests.
class ScopedOpenCLNoOpKernel {
 public:
  ScopedOpenCLNoOpKernel()
      : context_(nullptr),
        program_(nullptr),
        kernel_(nullptr),
        success_(false) {}

  ScopedOpenCLNoOpKernel(const ScopedOpenCLNoOpKernel&) = delete;
  ScopedOpenCLNoOpKernel& operator=(const ScopedOpenCLNoOpKernel&) = delete;

  ~ScopedOpenCLNoOpKernel() {
    if (kernel_) {
      cl_int rv = clReleaseKernel(kernel_);
      EXPECT_EQ(rv, CL_SUCCESS) << "clReleaseKernel";
    }

    if (program_) {
      cl_int rv = clReleaseProgram(program_);
      EXPECT_EQ(rv, CL_SUCCESS) << "clReleaseProgram";
    }

    if (context_) {
      cl_int rv = clReleaseContext(context_);
      EXPECT_EQ(rv, CL_SUCCESS) << "clReleaseContext";
    }
  }

  void SetUp() {
    cl_platform_id platform_id;
    cl_int rv = clGetPlatformIDs(1, &platform_id, nullptr);
    ASSERT_EQ(rv, CL_SUCCESS) << "clGetPlatformIDs";
    cl_device_id device_id;
    rv =
        clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_CPU, 1, &device_id, nullptr);
#if PA_BUILDFLAG(PA_ARCH_CPU_ARM64)
    // CL_DEVICE_TYPE_CPU doesn’t seem to work at all on arm64, meaning that
    // these weird OpenCL modules probably don’t show up there at all. Keep this
    // test even on arm64 in case this ever does start working.
    if (rv == CL_INVALID_VALUE) {
      return;
    }
#endif  // ARCH_CPU_ARM64
    ASSERT_EQ(rv, CL_SUCCESS) << "clGetDeviceIDs";

    context_ = clCreateContext(nullptr, 1, &device_id, nullptr, nullptr, &rv);
    ASSERT_EQ(rv, CL_SUCCESS) << "clCreateContext";

    const char* sources[] = {
        "__kernel void NoOp(void) {barrier(CLK_LOCAL_MEM_FENCE);}",
    };
    const size_t source_lengths[] = {
        strlen(sources[0]),
    };
    static_assert(std::size(sources) == std::size(source_lengths),
                  "arrays must be parallel");

    program_ = clCreateProgramWithSource(context_, std::size(sources), sources,
                                         source_lengths, &rv);
    ASSERT_EQ(rv, CL_SUCCESS) << "clCreateProgramWithSource";

    rv = clBuildProgram(program_, 1, &device_id, "-cl-opt-disable", nullptr,
                        nullptr);
    ASSERT_EQ(rv, CL_SUCCESS) << "clBuildProgram";

    kernel_ = clCreateKernel(program_, "NoOp", &rv);
    ASSERT_EQ(rv, CL_SUCCESS) << "clCreateKernel";

    success_ = true;
  }

  bool success() const { return success_; }

 private:
  cl_context context_;
  cl_program program_;
  cl_kernel kernel_;
  bool success_;
};

// On macOS 10.11, allocations are made with PartitionAlloc, but the pointer
// is incorrectly passed by CoreFoundation to the previous default zone,
// causing crashes. This is intended to detect these issues regressing in future
// versions of macOS.
TEST_P(PartitionAllocTest, OpenCL) {
  ScopedOpenCLNoOpKernel kernel;
  kernel.SetUp();
#if !PA_BUILDFLAG(PA_ARCH_CPU_ARM64)
  ASSERT_TRUE(kernel.success());
#endif
}

#endif  // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) &&
        // PA_BUILDFLAG(IS_MAC)

TEST_P(PartitionAllocTest, SmallSlotSpanWaste) {}

TEST_P(PartitionAllocTest, SortActiveSlotSpans) {}

#if PA_BUILDFLAG(USE_FREESLOT_BITMAP)
TEST_P(PartitionAllocTest, FreeSlotBitmapMarkedAsUsedAfterAlloc) {
  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
  uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
  EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));

  allocator.root()->Free(ptr);
}

TEST_P(PartitionAllocTest, FreeSlotBitmapMarkedAsFreeAfterFree) {
  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
  uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr);
  EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));

  allocator.root()->Free(ptr);
  EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(slot_start));
}

TEST_P(PartitionAllocTest, FreeSlotBitmapResetAfterDecommit) {
  void* ptr1 = allocator.root()->Alloc(
      SystemPageSize() - ExtraAllocSize(allocator), type_name);
  uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr1);
  allocator.root()->Free(ptr1);

  EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(slot_start));
  // Decommit the slot span. Bitmap will be rewritten in Decommit().
  allocator.root()->PurgeMemory(PurgeFlags::kDecommitEmptySlotSpans);
  EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));
}

TEST_P(PartitionAllocTest, FreeSlotBitmapResetAfterPurge) {
  void* ptr1 = allocator.root()->Alloc(
      SystemPageSize() - ExtraAllocSize(allocator), type_name);
  char* ptr2 = static_cast<char*>(allocator.root()->Alloc(
      SystemPageSize() - ExtraAllocSize(allocator), type_name));
  uintptr_t slot_start = allocator.root()->ObjectToSlotStart(ptr2);
  allocator.root()->Free(ptr2);

  CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, true);
  EXPECT_FALSE(FreeSlotBitmapSlotIsUsed(slot_start));
  // Bitmap will be rewritten in PartitionPurgeSlotSpan().
  allocator.root()->PurgeMemory(PurgeFlags::kDiscardUnusedSystemPages);
  CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, false);
  EXPECT_TRUE(FreeSlotBitmapSlotIsUsed(slot_start));

  allocator.root()->Free(ptr1);
}

#endif  // PA_BUILDFLAG(USE_FREESLOT_BITMAP)

#if PA_BUILDFLAG(USE_LARGE_EMPTY_SLOT_SPAN_RING)
TEST_P(PartitionAllocTest, GlobalEmptySlotSpanRingIndexResets) {
  // Switch to the larger slot span size, and set the
  // global_empty_slot_span_ring_index to one less than max.
  allocator.root()->AdjustForForeground();
  allocator.root()->SetGlobalEmptySlotSpanRingIndexForTesting(
      internal::kMaxFreeableSpans - 1);

  // Switch to the smaller size, allocate, free, and clear the empty cache.
  allocator.root()->AdjustForBackground();
  void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
  allocator.root()->Free(ptr);
  ClearEmptySlotSpanCache();

  // This should result in 0 empty_slot_span_dirty_bytes, and more importantly,
  // not crash.
  EXPECT_EQ(
      0u, PA_TS_UNCHECKED_READ(allocator.root()->empty_slot_spans_dirty_bytes));
}
#endif

TEST_P(PartitionAllocTest, FastReclaim) {}

TEST_P(PartitionAllocTest, FastReclaimEventuallyLooksAtAllBuckets) {}

}  // namespace partition_alloc::internal

#endif  // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)