#ifndef V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_INL_H_
#define V8_WASM_BASELINE_LIFTOFF_ASSEMBLER_INL_H_
#include "src/wasm/baseline/liftoff-assembler.h"
#if V8_TARGET_ARCH_IA32
#include "src/wasm/baseline/ia32/liftoff-assembler-ia32-inl.h"
#elif V8_TARGET_ARCH_X64
#include "src/wasm/baseline/x64/liftoff-assembler-x64-inl.h"
#elif V8_TARGET_ARCH_ARM64
#include "src/wasm/baseline/arm64/liftoff-assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "src/wasm/baseline/arm/liftoff-assembler-arm-inl.h"
#elif V8_TARGET_ARCH_PPC64
#include "src/wasm/baseline/ppc/liftoff-assembler-ppc-inl.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/wasm/baseline/mips64/liftoff-assembler-mips64-inl.h"
#elif V8_TARGET_ARCH_LOONG64
#include "src/wasm/baseline/loong64/liftoff-assembler-loong64-inl.h"
#elif V8_TARGET_ARCH_S390
#include "src/wasm/baseline/s390/liftoff-assembler-s390-inl.h"
#elif V8_TARGET_ARCH_RISCV64
#include "src/wasm/baseline/riscv/liftoff-assembler-riscv64-inl.h"
#elif V8_TARGET_ARCH_RISCV32
#include "src/wasm/baseline/riscv/liftoff-assembler-riscv32-inl.h"
#else
#error Unsupported architecture.
#endif
namespace v8::internal::wasm {
int LiftoffAssembler::NextSpillOffset(ValueKind kind, int top_spill_offset) { … }
int LiftoffAssembler::NextSpillOffset(ValueKind kind) { … }
int LiftoffAssembler::TopSpillOffset() const { … }
void LiftoffAssembler::PushRegister(ValueKind kind, LiftoffRegister reg) { … }
void LiftoffAssembler::PushException() { … }
void LiftoffAssembler::PushConstant(ValueKind kind, int32_t i32_const) { … }
void LiftoffAssembler::PushStack(ValueKind kind) { … }
void LiftoffAssembler::LoadToFixedRegister(VarState slot, LiftoffRegister reg) { … }
void LiftoffAssembler::PopToFixedRegister(LiftoffRegister reg) { … }
void LiftoffAssembler::LoadFixedArrayLengthAsInt32(LiftoffRegister dst,
Register array,
LiftoffRegList pinned) { … }
void LiftoffAssembler::LoadSmiAsInt32(LiftoffRegister dst, Register src_addr,
int32_t offset) { … }
void LiftoffAssembler::emit_ptrsize_add(Register dst, Register lhs,
Register rhs) { … }
void LiftoffAssembler::emit_ptrsize_sub(Register dst, Register lhs,
Register rhs) { … }
void LiftoffAssembler::emit_ptrsize_and(Register dst, Register lhs,
Register rhs) { … }
void LiftoffAssembler::emit_ptrsize_shri(Register dst, Register src,
int amount) { … }
void LiftoffAssembler::emit_ptrsize_addi(Register dst, Register lhs,
intptr_t imm) { … }
void LiftoffAssembler::emit_ptrsize_muli(Register dst, Register lhs,
int32_t imm) { … }
void LiftoffAssembler::emit_ptrsize_set_cond(Condition condition, Register dst,
LiftoffRegister lhs,
LiftoffRegister rhs) { … }
void LiftoffAssembler::bailout(LiftoffBailoutReason reason,
const char* detail) { … }
#ifdef V8_TARGET_ARCH_32_BIT
void LiftoffAssembler::emit_ptrsize_cond_jumpi(Condition cond, Label* label,
Register lhs, int32_t imm,
const FreezeCacheState& frozen) {
emit_i32_cond_jumpi(cond, label, lhs, imm, frozen);
}
namespace liftoff {
template <void (LiftoffAssembler::*op)(Register, Register, Register)>
void EmitI64IndependentHalfOperation(LiftoffAssembler* assm,
LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
if (dst.low() != lhs.high() && dst.low() != rhs.high()) {
(assm->*op)(dst.low_gp(), lhs.low_gp(), rhs.low_gp());
(assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
return;
}
if (dst.high() != lhs.low() && dst.high() != rhs.low()) {
(assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
(assm->*op)(dst.low_gp(), lhs.low_gp(), rhs.low_gp());
return;
}
Register tmp = assm->GetUnusedRegister(kGpReg, LiftoffRegList{lhs, rhs}).gp();
(assm->*op)(tmp, lhs.low_gp(), rhs.low_gp());
(assm->*op)(dst.high_gp(), lhs.high_gp(), rhs.high_gp());
assm->Move(dst.low_gp(), tmp, kI32);
}
template <void (LiftoffAssembler::*op)(Register, Register, int32_t)>
void EmitI64IndependentHalfOperationImm(LiftoffAssembler* assm,
LiftoffRegister dst,
LiftoffRegister lhs, int64_t imm) {
int32_t low_word = static_cast<int32_t>(imm);
int32_t high_word = static_cast<int32_t>(imm >> 32);
if (dst.low() != lhs.high()) {
(assm->*op)(dst.low_gp(), lhs.low_gp(), low_word);
(assm->*op)(dst.high_gp(), lhs.high_gp(), high_word);
return;
}
if (dst.high() != lhs.low()) {
(assm->*op)(dst.high_gp(), lhs.high_gp(), high_word);
(assm->*op)(dst.low_gp(), lhs.low_gp(), low_word);
return;
}
Register tmp = assm->GetUnusedRegister(kGpReg, LiftoffRegList{lhs}).gp();
(assm->*op)(tmp, lhs.low_gp(), low_word);
(assm->*op)(dst.high_gp(), lhs.high_gp(), high_word);
assm->Move(dst.low_gp(), tmp, kI32);
}
}
void LiftoffAssembler::emit_i64_and(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_and>(
this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i64_andi(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm) {
liftoff::EmitI64IndependentHalfOperationImm<&LiftoffAssembler::emit_i32_andi>(
this, dst, lhs, imm);
}
void LiftoffAssembler::emit_i64_or(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_or>(
this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i64_ori(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm) {
liftoff::EmitI64IndependentHalfOperationImm<&LiftoffAssembler::emit_i32_ori>(
this, dst, lhs, imm);
}
void LiftoffAssembler::emit_i64_xor(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
liftoff::EmitI64IndependentHalfOperation<&LiftoffAssembler::emit_i32_xor>(
this, dst, lhs, rhs);
}
void LiftoffAssembler::emit_i64_xori(LiftoffRegister dst, LiftoffRegister lhs,
int32_t imm) {
liftoff::EmitI64IndependentHalfOperationImm<&LiftoffAssembler::emit_i32_xori>(
this, dst, lhs, imm);
}
void LiftoffAssembler::emit_u32_to_uintptr(Register dst, Register src) {
if (dst != src) Move(dst, src, kI32);
}
void LiftoffAssembler::clear_i32_upper_half(Register dst) { UNREACHABLE(); }
#endif
}
#endif