#include "partition_alloc/address_pool_manager.h"
#include <cstdint>
#include "partition_alloc/address_space_stats.h"
#include "partition_alloc/build_config.h"
#include "partition_alloc/buildflags.h"
#include "partition_alloc/page_allocator.h"
#include "partition_alloc/partition_alloc_base/bits.h"
#include "partition_alloc/partition_alloc_constants.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace partition_alloc::internal {
class AddressSpaceStatsDumperForTesting final : public AddressSpaceStatsDumper { … };
#if PA_BUILDFLAG(HAS_64_BIT_POINTERS)
class AddressPoolManagerForTesting : public AddressPoolManager { … };
class PartitionAllocAddressPoolManagerTest : public testing::Test { … };
TEST_F(PartitionAllocAddressPoolManagerTest, TooLargePool) { … }
TEST_F(PartitionAllocAddressPoolManagerTest, ManyPages) { … }
TEST_F(PartitionAllocAddressPoolManagerTest, PagesFragmented) { … }
TEST_F(PartitionAllocAddressPoolManagerTest, GetUsedSuperpages) { … }
TEST_F(PartitionAllocAddressPoolManagerTest, IrregularPattern) { … }
TEST_F(PartitionAllocAddressPoolManagerTest, DecommittedDataIsErased) { … }
TEST_F(PartitionAllocAddressPoolManagerTest, RegularPoolUsageChanges) { … }
#else
TEST(PartitionAllocAddressPoolManagerTest, IsManagedByRegularPool) {
constexpr size_t kAllocCount = 8;
static const size_t kNumPages[kAllocCount] = {1, 4, 7, 8, 13, 16, 31, 60};
uintptr_t addrs[kAllocCount];
for (size_t i = 0; i < kAllocCount; ++i) {
addrs[i] = AddressPoolManager::GetInstance().Reserve(
kRegularPoolHandle, 0,
AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap *
kNumPages[i]);
EXPECT_TRUE(addrs[i]);
EXPECT_TRUE(!(addrs[i] & kSuperPageOffsetMask));
AddressPoolManager::GetInstance().MarkUsed(
kRegularPoolHandle, addrs[i],
AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap *
kNumPages[i]);
}
for (size_t i = 0; i < kAllocCount; ++i) {
uintptr_t address = addrs[i];
size_t num_pages =
base::bits::AlignUp(
kNumPages[i] *
AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap,
kSuperPageSize) /
AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap;
for (size_t j = 0; j < num_pages; ++j) {
if (j < kNumPages[i]) {
EXPECT_TRUE(AddressPoolManager::IsManagedByRegularPool(address));
} else {
EXPECT_FALSE(AddressPoolManager::IsManagedByRegularPool(address));
}
EXPECT_FALSE(AddressPoolManager::IsManagedByBRPPool(address));
address += AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap;
}
}
for (size_t i = 0; i < kAllocCount; ++i) {
AddressPoolManager::GetInstance().MarkUnused(
kRegularPoolHandle, addrs[i],
AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap *
kNumPages[i]);
AddressPoolManager::GetInstance().UnreserveAndDecommit(
kRegularPoolHandle, addrs[i],
AddressPoolManagerBitmap::kBytesPer1BitOfRegularPoolBitmap *
kNumPages[i]);
EXPECT_FALSE(AddressPoolManager::IsManagedByRegularPool(addrs[i]));
EXPECT_FALSE(AddressPoolManager::IsManagedByBRPPool(addrs[i]));
}
}
#if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
TEST(PartitionAllocAddressPoolManagerTest, IsManagedByBRPPool) {
constexpr size_t kAllocCount = 4;
static const size_t kNumPages[kAllocCount] = {1, 3, 7, 11};
uintptr_t addrs[kAllocCount];
for (size_t i = 0; i < kAllocCount; ++i) {
addrs[i] = AddressPoolManager::GetInstance().Reserve(
kBRPPoolHandle, 0, kSuperPageSize * kNumPages[i]);
EXPECT_TRUE(addrs[i]);
EXPECT_TRUE(!(addrs[i] & kSuperPageOffsetMask));
AddressPoolManager::GetInstance().MarkUsed(kBRPPoolHandle, addrs[i],
kSuperPageSize * kNumPages[i]);
}
constexpr size_t first_guard_size =
AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap;
constexpr size_t last_guard_size =
AddressPoolManagerBitmap::kBytesPer1BitOfBRPPoolBitmap *
(AddressPoolManagerBitmap::kGuardBitsOfBRPPoolBitmap -
AddressPoolManagerBitmap::kGuardOffsetOfBRPPoolBitmap);
for (size_t i = 0; i < kAllocCount; ++i) {
uintptr_t address = addrs[i];
size_t num_allocated_size = kNumPages[i] * kSuperPageSize;
size_t num_system_pages = num_allocated_size / SystemPageSize();
for (size_t j = 0; j < num_system_pages; ++j) {
size_t offset = address - addrs[i];
if (offset < first_guard_size ||
offset >= (num_allocated_size - last_guard_size)) {
EXPECT_FALSE(AddressPoolManager::IsManagedByBRPPool(address));
} else {
EXPECT_TRUE(AddressPoolManager::IsManagedByBRPPool(address));
}
EXPECT_FALSE(AddressPoolManager::IsManagedByRegularPool(address));
address += SystemPageSize();
}
}
for (size_t i = 0; i < kAllocCount; ++i) {
AddressPoolManager::GetInstance().MarkUnused(kBRPPoolHandle, addrs[i],
kSuperPageSize * kNumPages[i]);
AddressPoolManager::GetInstance().UnreserveAndDecommit(
kBRPPoolHandle, addrs[i], kSuperPageSize * kNumPages[i]);
EXPECT_FALSE(AddressPoolManager::IsManagedByRegularPool(addrs[i]));
EXPECT_FALSE(AddressPoolManager::IsManagedByBRPPool(addrs[i]));
}
}
#endif
TEST(PartitionAllocAddressPoolManagerTest, RegularPoolUsageChanges) {
AddressSpaceStatsDumperForTesting dumper{};
AddressPoolManager::GetInstance().DumpStats(&dumper);
const size_t usage_before = dumper.regular_pool_usage_;
const uintptr_t address = AddressPoolManager::GetInstance().Reserve(
kRegularPoolHandle, 0, kSuperPageSize);
ASSERT_TRUE(address);
AddressPoolManager::GetInstance().MarkUsed(kRegularPoolHandle, address,
kSuperPageSize);
AddressPoolManager::GetInstance().DumpStats(&dumper);
EXPECT_GT(dumper.regular_pool_usage_, usage_before);
AddressPoolManager::GetInstance().MarkUnused(kRegularPoolHandle, address,
kSuperPageSize);
AddressPoolManager::GetInstance().UnreserveAndDecommit(
kRegularPoolHandle, address, kSuperPageSize);
AddressPoolManager::GetInstance().DumpStats(&dumper);
EXPECT_EQ(dumper.regular_pool_usage_, usage_before);
}
#endif
}