chromium/v8/src/wasm/wasm-code-manager.cc

// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/wasm/wasm-code-manager.h"

#include <algorithm>
#include <iomanip>
#include <numeric>
#include <optional>

#include "src/base/atomicops.h"
#include "src/base/build_config.h"
#include "src/base/iterator.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h"
#include "src/base/small-vector.h"
#include "src/base/string-format.h"
#include "src/base/vector.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/codegen/macro-assembler.h"
#include "src/common/code-memory-access.h"
#include "src/common/globals.h"
#include "src/deoptimizer/deoptimizer.h"
#include "src/diagnostics/disassembler.h"
#include "src/logging/counters.h"
#include "src/logging/log.h"
#include "src/objects/objects-inl.h"
#include "src/snapshot/embedded/embedded-data-inl.h"
#include "src/utils/ostreams.h"
#include "src/wasm/code-space-access.h"
#include "src/wasm/compilation-environment.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/names-provider.h"
#include "src/wasm/pgo.h"
#include "src/wasm/std-object-sizes.h"
#include "src/wasm/wasm-builtin-list.h"
#include "src/wasm/wasm-debug.h"
#include "src/wasm/wasm-deopt-data.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-module-sourcemap.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-objects.h"
#include "src/wasm/well-known-imports.h"

#if V8_ENABLE_DRUMBRAKE
#include "src/wasm/interpreter/wasm-interpreter-runtime.h"
#endif  // V8_ENABLE_DRUMBRAKE

#if defined(V8_OS_WIN64)
#include "src/diagnostics/unwinding-info-win64.h"
#endif  // V8_OS_WIN64

#define TRACE_HEAP

namespace v8 {
namespace internal {
namespace wasm {

ProtectedInstructionData;

// Check that {WasmCode} objects are sufficiently small. We create many of them,
// often for rather small functions.
// Increase the limit if needed, but first check if the size increase is
// justified.
#ifndef V8_GC_MOLE
static_assert;
#endif

base::AddressRegion DisjointAllocationPool::Merge(
    base::AddressRegion new_region) {}

base::AddressRegion DisjointAllocationPool::Allocate(size_t size) {}

base::AddressRegion DisjointAllocationPool::AllocateInRegion(
    size_t size, base::AddressRegion region) {}

Address WasmCode::constant_pool() const {}

Address WasmCode::handler_table() const {}

int WasmCode::handler_table_size() const {}

Address WasmCode::code_comments() const {}

int WasmCode::code_comments_size() const {}

std::unique_ptr<const uint8_t[]> WasmCode::ConcatenateBytes(
    std::initializer_list<base::Vector<const uint8_t>> vectors) {}

void WasmCode::RegisterTrapHandlerData() {}

bool WasmCode::ShouldBeLogged(Isolate* isolate) {}

std::string WasmCode::DebugName() const {}

void WasmCode::LogCode(Isolate* isolate, const char* source_url,
                       int script_id) const {}

namespace {
bool ProtectedInstructionDataCompare(const ProtectedInstructionData& left,
                                     const ProtectedInstructionData& right) {}
}  // namespace

bool WasmCode::IsProtectedInstruction(Address pc) {}

void WasmCode::Validate() const {}

void WasmCode::MaybePrint() const {}

void WasmCode::Print(const char* name) const {}

void WasmCode::Disassemble(const char* name, std::ostream& os,
                           Address current_pc) const {}

const char* GetWasmCodeKindAsString(WasmCode::Kind kind) {}

WasmCode::~WasmCode() {}

V8_WARN_UNUSED_RESULT bool WasmCode::DecRefOnPotentiallyDeadCode() {}

// static
void WasmCode::DecrementRefCount(base::Vector<WasmCode* const> code_vec) {}

SourcePosition WasmCode::GetSourcePositionBefore(int code_offset) {}

int WasmCode::GetSourceOffsetBefore(int code_offset) {}

std::tuple<int, bool, SourcePosition> WasmCode::GetInliningPosition(
    int inlining_id) const {}

size_t WasmCode::EstimateCurrentMemoryConsumption() const {}

WasmCodeAllocator::WasmCodeAllocator(std::shared_ptr<Counters> async_counters)
    :{}

WasmCodeAllocator::~WasmCodeAllocator() {}

void WasmCodeAllocator::Init(VirtualMemory code_space) {}

namespace {
// On Windows, we cannot commit a region that straddles different reservations
// of virtual memory. Because we bump-allocate, and because, if we need more
// memory, we append that memory at the end of the owned_code_space_ list, we
// traverse that list in reverse order to find the reservation(s) that guide how
// to chunk the region to commit.
#if V8_OS_WIN
constexpr bool kNeedsToSplitRangeByReservations = true;
#else
constexpr bool kNeedsToSplitRangeByReservations =;
#endif

base::SmallVector<base::AddressRegion, 1> SplitRangeByReservationsIfNeeded(
    base::AddressRegion range,
    const std::vector<VirtualMemory>& owned_code_space) {}

int NumWasmFunctionsInFarJumpTable(uint32_t num_declared_functions) {}

// Returns an overapproximation of the code size overhead per new code space
// created by the jump tables.
size_t OverheadPerCodeSpace(uint32_t num_declared_functions) {}

// Returns an estimate how much code space should be reserved. This can be
// smaller than the passed-in {code_size_estimate}, see comments in the code.
size_t ReservationSize(size_t code_size_estimate, int num_declared_functions,
                       size_t total_reserved) {}

// Sentinel value to be used for {AllocateForCodeInRegion} for specifying no
// restriction on the region to allocate in.
constexpr base::AddressRegion kUnrestrictedRegion{};

}  // namespace

void WasmCodeAllocator::InitializeCodeRange(NativeModule* native_module,
                                            base::AddressRegion region) {}

base::Vector<uint8_t> WasmCodeAllocator::AllocateForCode(
    NativeModule* native_module, size_t size) {}

base::Vector<uint8_t> WasmCodeAllocator::AllocateForWrapper(size_t size) {}

// {native_module} may be {nullptr} when allocating wrapper code.
base::Vector<uint8_t> WasmCodeAllocator::AllocateForCodeInRegion(
    NativeModule* native_module, size_t size, base::AddressRegion region) {}

void WasmCodeAllocator::FreeCode(base::Vector<WasmCode* const> codes) {}

size_t WasmCodeAllocator::GetNumCodeSpaces() const {}

NativeModule::NativeModule(WasmEnabledFeatures enabled,
                           CompileTimeImports compile_imports,
                           DynamicTiering dynamic_tiering,
                           VirtualMemory code_space,
                           std::shared_ptr<const WasmModule> module,
                           std::shared_ptr<Counters> async_counters,
                           std::shared_ptr<NativeModule>* shared_this)
    :{}

void NativeModule::ReserveCodeTableForTesting(uint32_t max_functions) {}

void NativeModule::LogWasmCodes(Isolate* isolate, Tagged<Script> script) {}

WasmCode* NativeModule::AddCodeForTesting(DirectHandle<Code> code) {}

void NativeModule::InitializeJumpTableForLazyCompilation(
    uint32_t num_wasm_functions) {}

void NativeModule::UseLazyStubLocked(uint32_t func_index) {}

std::unique_ptr<WasmCode> NativeModule::AddCode(
    int index, const CodeDesc& desc, int stack_slots, int ool_spill_count,
    uint32_t tagged_parameter_slots,
    base::Vector<const uint8_t> protected_instructions_data,
    base::Vector<const uint8_t> source_position_table,
    base::Vector<const uint8_t> inlining_positions,
    base::Vector<const uint8_t> deopt_data, WasmCode::Kind kind,
    ExecutionTier tier, ForDebugging for_debugging) {}

std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
    int index, const CodeDesc& desc, int stack_slots, int ool_spill_count,
    uint32_t tagged_parameter_slots,
    base::Vector<const uint8_t> protected_instructions_data,
    base::Vector<const uint8_t> source_position_table,
    base::Vector<const uint8_t> inlining_positions,
    base::Vector<const uint8_t> deopt_data, WasmCode::Kind kind,
    ExecutionTier tier, ForDebugging for_debugging,
    bool frame_has_feedback_slot, base::Vector<uint8_t> dst_code_bytes,
    const JumpTablesRef& jump_tables) {}

WasmCode* NativeModule::PublishCode(std::unique_ptr<WasmCode> code,
                                    AssumptionsJournal* assumptions) {}

std::vector<WasmCode*> NativeModule::PublishCode(
    base::Vector<std::unique_ptr<WasmCode>> codes) {}

WasmCode::Kind GetCodeKind(const WasmCompilationResult& result) {}

WasmCode* NativeModule::PublishCodeLocked(
    std::unique_ptr<WasmCode> owned_code) {}

bool NativeModule::should_update_code_table(WasmCode* new_code,
                                            WasmCode* prior_code) const {}

void NativeModule::ReinstallDebugCode(WasmCode* code) {}

std::pair<base::Vector<uint8_t>, NativeModule::JumpTablesRef>
NativeModule::AllocateForDeserializedCode(size_t total_code_size) {}

std::unique_ptr<WasmCode> NativeModule::AddDeserializedCode(
    int index, base::Vector<uint8_t> instructions, int stack_slots,
    int ool_spills, uint32_t tagged_parameter_slots, int safepoint_table_offset,
    int handler_table_offset, int constant_pool_offset,
    int code_comments_offset, int unpadded_binary_size,
    base::Vector<const uint8_t> protected_instructions_data,
    base::Vector<const uint8_t> reloc_info,
    base::Vector<const uint8_t> source_position_table,
    base::Vector<const uint8_t> inlining_positions,
    base::Vector<const uint8_t> deopt_data, WasmCode::Kind kind,
    ExecutionTier tier) {}

std::pair<std::vector<WasmCode*>, std::vector<WellKnownImport>>
NativeModule::SnapshotCodeTable() const {
  base::RecursiveMutexGuard lock(&allocation_mutex_);
  WasmCode** start = code_table_.get();
  WasmCode** end = start + module_->num_declared_functions;
  for (WasmCode* code : base::VectorOf(start, end - start)) {
    if (code) WasmCodeRefScope::AddRef(code);
  }
  std::vector<WellKnownImport> import_statuses(module_->num_imported_functions);
  for (uint32_t i = 0; i < module_->num_imported_functions; i++) {
    import_statuses[i] = module_->type_feedback.well_known_imports.get(i);
  }
  return {std::vector<WasmCode*>{start, end}, std::move(import_statuses)};
}

std::vector<WasmCode*> NativeModule::SnapshotAllOwnedCode() const {}

WasmCode* NativeModule::GetCode(uint32_t index) const {}

bool NativeModule::HasCode(uint32_t index) const {}

bool NativeModule::HasCodeWithTier(uint32_t index, ExecutionTier tier) const {}

void NativeModule::SetWasmSourceMap(
    std::unique_ptr<WasmModuleSourceMap> source_map) {}

WasmModuleSourceMap* NativeModule::GetWasmSourceMap() const {}

WasmCode* NativeModule::CreateEmptyJumpTableLocked(int jump_table_size,
                                                   JumpTableType type) {}

namespace {

ThreadIsolation::JitAllocationType ToAllocationType(
    v8::internal::wasm::NativeModule::JumpTableType type) {}

}  // namespace

WasmCode* NativeModule::CreateEmptyJumpTableInRegionLocked(
    int jump_table_size, base::AddressRegion region, JumpTableType type) {}

void NativeModule::UpdateCodeSize(size_t size, ExecutionTier tier,
                                  ForDebugging for_debugging) {}

void NativeModule::PatchJumpTablesLocked(uint32_t slot_index, Address target) {}

void NativeModule::PatchJumpTableLocked(const CodeSpaceData& code_space_data,
                                        uint32_t slot_index, Address target) {}

void NativeModule::AddCodeSpaceLocked(base::AddressRegion region) {}

namespace {
class NativeModuleWireBytesStorage final : public WireBytesStorage {};
}  // namespace

void NativeModule::SetWireBytes(base::OwnedVector<const uint8_t> wire_bytes) {}

void NativeModule::AddLazyCompilationTimeSample(int64_t sample_in_micro_sec) {}

void NativeModule::TransferNewOwnedCodeLocked() const {}

WasmCode* NativeModule::Lookup(Address pc) const {}

NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegionLocked(
    base::AddressRegion code_region) const {}

Address NativeModule::GetNearCallTargetForFunction(
    uint32_t func_index, const JumpTablesRef& jump_tables) const {}

Address NativeModule::GetJumpTableEntryForBuiltin(
    Builtin builtin, const JumpTablesRef& jump_tables) const {}

uint32_t NativeModule::GetFunctionIndexFromJumpTableSlot(
    Address slot_address) const {}

Builtin NativeModule::GetBuiltinInJumptableSlot(Address target) const {}

NativeModule::~NativeModule() {}

WasmCodeManager::WasmCodeManager()
    :{}

WasmCodeManager::~WasmCodeManager() {}

#if defined(V8_OS_WIN64)
// static
bool WasmCodeManager::CanRegisterUnwindInfoForNonABICompliantCodeRange() {
  return win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
         v8_flags.win64_unwinding_info;
}
#endif  // V8_OS_WIN64

void WasmCodeManager::Commit(base::AddressRegion region) {}

void WasmCodeManager::Decommit(base::AddressRegion region) {}

void WasmCodeManager::AssignRange(base::AddressRegion region,
                                  NativeModule* native_module) {}

VirtualMemory WasmCodeManager::TryAllocate(size_t size) {}

namespace {
// The numbers here are rough estimates, used to calculate the size of the
// initial code reservation and for estimating the amount of external memory
// reported to the GC.
// They do not need to be accurate. Choosing them too small will result in
// separate code spaces being allocated (compile time and runtime overhead),
// choosing them too large results in over-reservation (virtual address space
// only).
// In doubt, choose the numbers slightly too large on 64-bit systems (where
// {kNeedsFarJumpsBetweenCodeSpaces} is {true}). Over-reservation is less
// critical in a 64-bit address space, but separate code spaces cause overhead.
// On 32-bit systems (where {kNeedsFarJumpsBetweenCodeSpaces} is {false}), the
// opposite is true: Multiple code spaces are cheaper, and address space is
// scarce, hence choose numbers slightly too small.
//
// Numbers can be determined by running benchmarks with
// --trace-wasm-compilation-times, and piping the output through
// tools/wasm/code-size-factors.py.
#if V8_TARGET_ARCH_X64
constexpr size_t kTurbofanFunctionOverhead =;
constexpr size_t kTurbofanCodeSizeMultiplier =;
constexpr size_t kLiftoffFunctionOverhead =;
constexpr size_t kLiftoffCodeSizeMultiplier =;
constexpr size_t kImportSize =;
#elif V8_TARGET_ARCH_IA32
constexpr size_t kTurbofanFunctionOverhead = 20;
constexpr size_t kTurbofanCodeSizeMultiplier = 3;
constexpr size_t kLiftoffFunctionOverhead = 48;
constexpr size_t kLiftoffCodeSizeMultiplier = 3;
constexpr size_t kImportSize = 600;
#elif V8_TARGET_ARCH_ARM
constexpr size_t kTurbofanFunctionOverhead = 44;
constexpr size_t kTurbofanCodeSizeMultiplier = 3;
constexpr size_t kLiftoffFunctionOverhead = 96;
constexpr size_t kLiftoffCodeSizeMultiplier = 5;
constexpr size_t kImportSize = 550;
#elif V8_TARGET_ARCH_ARM64
constexpr size_t kTurbofanFunctionOverhead = 40;
constexpr size_t kTurbofanCodeSizeMultiplier = 3;
constexpr size_t kLiftoffFunctionOverhead = 68;
constexpr size_t kLiftoffCodeSizeMultiplier = 4;
constexpr size_t kImportSize = 750;
#else
// Other platforms should add their own estimates for best performance. Numbers
// below are the maximum of other architectures.
constexpr size_t kTurbofanFunctionOverhead = 44;
constexpr size_t kTurbofanCodeSizeMultiplier = 4;
constexpr size_t kLiftoffFunctionOverhead = 96;
constexpr size_t kLiftoffCodeSizeMultiplier = 5;
constexpr size_t kImportSize = 750;
#endif
}  // namespace

// static
size_t WasmCodeManager::EstimateLiftoffCodeSize(int body_size) {}

// static
size_t WasmCodeManager::EstimateNativeModuleCodeSize(
    const WasmModule* module, bool include_liftoff,
    DynamicTiering dynamic_tiering) {}

// static
size_t WasmCodeManager::EstimateNativeModuleCodeSize(
    int num_functions, int num_imported_functions, int code_section_length,
    bool include_liftoff, DynamicTiering dynamic_tiering) {}

// static
size_t WasmCodeManager::EstimateNativeModuleMetaDataSize(
    const WasmModule* module) {}

// static
bool WasmCodeManager::HasMemoryProtectionKeySupport() {}

// static
bool WasmCodeManager::MemoryProtectionKeysEnabled() {}

// static
bool WasmCodeManager::MemoryProtectionKeyWritable() {}

std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
    Isolate* isolate, WasmEnabledFeatures enabled,
    CompileTimeImports compile_imports, size_t code_size_estimate,
    std::shared_ptr<const WasmModule> module) {}

void NativeModule::SampleCodeSize(Counters* counters) const {}

std::unique_ptr<WasmCode> NativeModule::AddCompiledCode(
    const WasmCompilationResult& result) {}

std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
    base::Vector<const WasmCompilationResult> results) {}

void NativeModule::SetDebugState(DebugState new_debug_state) {}

namespace {
bool ShouldRemoveCode(WasmCode* code, NativeModule::RemoveFilter filter) {}
}  // namespace

std::pair<size_t, size_t> NativeModule::RemoveCompiledCode(
    RemoveFilter filter) {}

size_t NativeModule::SumLiftoffCodeSizeForTesting() const {}

void NativeModule::FreeCode(base::Vector<WasmCode* const> codes) {}

size_t NativeModule::GetNumberOfCodeSpacesForTesting() const {}

bool NativeModule::HasDebugInfo() const {}

DebugInfo* NativeModule::GetDebugInfo() {}

NamesProvider* NativeModule::GetNamesProvider() {}

size_t NativeModule::EstimateCurrentMemoryConsumption() const {}

void WasmCodeManager::FreeNativeModule(
    base::Vector<VirtualMemory> owned_code_space, size_t committed_size) {}

NativeModule* WasmCodeManager::LookupNativeModule(Address pc) const {}

WasmCode* WasmCodeManager::LookupCode(Address pc) const {}

WasmCode* WasmCodeManager::LookupCode(Isolate* isolate, Address pc) const {}

std::pair<WasmCode*, SafepointEntry> WasmCodeManager::LookupCodeAndSafepoint(
    Isolate* isolate, Address pc) {}

void WasmCodeManager::FlushCodeLookupCache(Isolate* isolate) {}

namespace {
thread_local WasmCodeRefScope* current_code_refs_scope =;
}  // namespace

WasmCodeRefScope::WasmCodeRefScope()
    :{}

WasmCodeRefScope::~WasmCodeRefScope() {}

// static
void WasmCodeRefScope::AddRef(WasmCode* code) {}

void WasmCodeLookupCache::Flush() {}

WasmCodeLookupCache::CacheEntry* WasmCodeLookupCache::GetCacheEntry(
    Address pc) {}
}  // namespace wasm
}  // namespace internal
}  // namespace v8
#undef TRACE_HEAP