chromium/v8/src/wasm/baseline/liftoff-assembler.cc

// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include "src/wasm/baseline/liftoff-assembler.h"

#include <optional>
#include <sstream>

#include "src/base/platform/memory.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/compiler/linkage.h"
#include "src/compiler/wasm-compiler.h"
#include "src/utils/ostreams.h"
#include "src/wasm/baseline/liftoff-assembler-inl.h"
#include "src/wasm/baseline/liftoff-register.h"
#include "src/wasm/baseline/parallel-move-inl.h"
#include "src/wasm/object-access.h"
#include "src/wasm/wasm-linkage.h"
#include "src/wasm/wasm-opcodes.h"

namespace v8::internal::wasm {

VarState;
ValueKindSig;

constexpr ValueKind LiftoffAssembler::kIntPtrKind;
constexpr ValueKind LiftoffAssembler::kSmiKind;

namespace {

class RegisterReuseMap {};

enum MergeKeepStackSlots : bool {};
enum MergeAllowConstants : bool {};
enum MergeAllowRegisters : bool {};
enum ReuseRegisters : bool {};
// {InitMergeRegion} is a helper used by {MergeIntoNewState} to initialize
// a part of the target stack ([target, target+count]) from [source,
// source+count]. The parameters specify how to initialize the part. The goal is
// to set up the region such that later merges (via {MergeStackWith} /
// {MergeFullStackWith} can successfully transfer their values to this new
// state.
void InitMergeRegion(LiftoffAssembler::CacheState* target_state,
                     const VarState* source, VarState* target, uint32_t count,
                     MergeKeepStackSlots keep_stack_slots,
                     MergeAllowConstants allow_constants,
                     MergeAllowRegisters allow_registers,
                     ReuseRegisters reuse_registers, LiftoffRegList used_regs,
                     int new_stack_offset, ParallelMove& parallel_move) {}

}  // namespace

LiftoffAssembler::CacheState LiftoffAssembler::MergeIntoNewState(
    uint32_t num_locals, uint32_t arity, uint32_t stack_depth) {}

void LiftoffAssembler::CacheState::Steal(CacheState& source) {}

void LiftoffAssembler::CacheState::Split(const CacheState& source) {}

namespace {
int GetSafepointIndexForStackSlot(const VarState& slot) {}
}  // namespace

void LiftoffAssembler::CacheState::GetTaggedSlotsForOOLCode(
    ZoneVector<int>* slots, LiftoffRegList* spills,
    SpillLocation spill_location) {}

void LiftoffAssembler::CacheState::DefineSafepoint(
    SafepointTableBuilder::Safepoint& safepoint) {}

void LiftoffAssembler::CacheState::DefineSafepointWithCalleeSavedRegisters(
    SafepointTableBuilder::Safepoint& safepoint) {}

int LiftoffAssembler::GetTotalFrameSlotCountForGC() const {}

int LiftoffAssembler::OolSpillCount() const {}

namespace {

AssemblerOptions DefaultLiftoffOptions() {}

}  // namespace

LiftoffAssembler::LiftoffAssembler(Zone* zone,
                                   std::unique_ptr<AssemblerBuffer> buffer)
    :{}

LiftoffAssembler::~LiftoffAssembler() {}

LiftoffRegister LiftoffAssembler::LoadToRegister_Slow(VarState slot,
                                                      LiftoffRegList pinned) {}

LiftoffRegister LiftoffAssembler::LoadI64HalfIntoRegister(
    VarState slot, RegPairHalf half, LiftoffRegList pinned) {}

void LiftoffAssembler::DropExceptionValueAtOffset(int offset) {}

void LiftoffAssembler::SpillLoopArgs(int num) {}

void LiftoffAssembler::PrepareForBranch(uint32_t arity, LiftoffRegList pinned) {}

#ifdef DEBUG
namespace {
bool SlotInterference(const VarState& a, const VarState& b) {}

bool SlotInterference(const VarState& a, base::Vector<const VarState> v) {}
}  // namespace
#endif

void LiftoffAssembler::MergeFullStackWith(CacheState& target) {}

void LiftoffAssembler::MergeStackWith(CacheState& target, uint32_t arity,
                                      JumpDirection jump_direction) {}

void LiftoffAssembler::Spill(VarState* slot) {}

void LiftoffAssembler::SpillLocals() {}

void LiftoffAssembler::SpillAllRegisters() {}

void LiftoffAssembler::ClearRegister(
    Register reg, std::initializer_list<Register*> possible_uses,
    LiftoffRegList pinned) {}

namespace {
void PrepareStackTransfers(const ValueKindSig* sig,
                           compiler::CallDescriptor* call_descriptor,
                           const VarState* slots,
                           LiftoffStackSlots* stack_slots,
                           ParallelMove* parallel_move,
                           LiftoffRegList* param_regs) {}

}  // namespace

void LiftoffAssembler::PrepareBuiltinCall(
    const ValueKindSig* sig, compiler::CallDescriptor* call_descriptor,
    std::initializer_list<VarState> params) {}

void LiftoffAssembler::PrepareCall(const ValueKindSig* sig,
                                   compiler::CallDescriptor* call_descriptor,
                                   Register* target,
                                   Register target_instance_data) {}

namespace {
constexpr LiftoffRegList AllReturnRegs() {}
}  // namespace

void LiftoffAssembler::FinishCall(const ValueKindSig* sig,
                                  compiler::CallDescriptor* call_descriptor) {}

void LiftoffAssembler::Move(LiftoffRegister dst, LiftoffRegister src,
                            ValueKind kind) {}

void LiftoffAssembler::ParallelRegisterMove(
    base::Vector<const ParallelRegisterMoveTuple> tuples) {}

void LiftoffAssembler::MoveToReturnLocations(
    const FunctionSig* sig, compiler::CallDescriptor* descriptor) {}

void LiftoffAssembler::MoveToReturnLocationsMultiReturn(
    const FunctionSig* sig, compiler::CallDescriptor* descriptor) {}

#if DEBUG
void LiftoffRegList::Print() const {}
#endif

#ifdef ENABLE_SLOW_DCHECKS
bool LiftoffAssembler::ValidateCacheState() const {
  uint32_t register_use_count[kAfterMaxLiftoffRegCode] = {0};
  LiftoffRegList used_regs;
  int offset = StaticStackFrameSize();
  for (const VarState& var : cache_state_.stack_state) {
    // Check for continuous stack offsets.
    offset = NextSpillOffset(var.kind(), offset);
    DCHECK_EQ(offset, var.offset());
    if (!var.is_reg()) continue;
    LiftoffRegister reg = var.reg();
    if ((kNeedI64RegPair || kNeedS128RegPair) && reg.is_pair()) {
      ++register_use_count[reg.low().liftoff_code()];
      ++register_use_count[reg.high().liftoff_code()];
    } else {
      ++register_use_count[reg.liftoff_code()];
    }
    used_regs.set(reg);
  }
  for (Register cache_reg :
       {cache_state_.cached_instance_data, cache_state_.cached_mem_start}) {
    if (cache_reg != no_reg) {
      DCHECK(!used_regs.has(cache_reg));
      int liftoff_code = LiftoffRegister{cache_reg}.liftoff_code();
      used_regs.set(cache_reg);
      DCHECK_EQ(0, register_use_count[liftoff_code]);
      register_use_count[liftoff_code] = 1;
    }
  }
  bool valid = memcmp(register_use_count, cache_state_.register_use_count,
                      sizeof(register_use_count)) == 0 &&
               used_regs == cache_state_.used_registers;
  if (valid) return true;
  std::ostringstream os;
  os << "Error in LiftoffAssembler::ValidateCacheState().\n";
  os << "expected: used_regs " << used_regs << ", counts "
     << PrintCollection(register_use_count) << "\n";
  os << "found:    used_regs " << cache_state_.used_registers << ", counts "
     << PrintCollection(cache_state_.register_use_count) << "\n";
  os << "Use --trace-wasm-decoder and --trace-liftoff to debug.";
  FATAL("%s", os.str().c_str());
}
#endif

LiftoffRegister LiftoffAssembler::SpillOneRegister(LiftoffRegList candidates) {}

LiftoffRegister LiftoffAssembler::SpillAdjacentFpRegisters(
    LiftoffRegList pinned) {}

void LiftoffAssembler::SpillRegister(LiftoffRegister reg) {}

void LiftoffAssembler::set_num_locals(uint32_t num_locals) {}

std::ostream& operator<<(std::ostream& os, LiftoffVarState slot) {}

#if DEBUG
bool CompatibleStackSlotTypes(ValueKind a, ValueKind b) {}
#endif

}  // namespace v8::internal::wasm