chromium/v8/src/maglev/maglev-assembler-inl.h

// Copyright 2023 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef V8_MAGLEV_MAGLEV_ASSEMBLER_INL_H_
#define V8_MAGLEV_MAGLEV_ASSEMBLER_INL_H_

#include <algorithm>
#include <type_traits>

#include "src/base/iterator.h"
#include "src/base/template-utils.h"
#include "src/codegen/machine-type.h"
#include "src/maglev/maglev-assembler.h"

#ifdef V8_TARGET_ARCH_ARM
#include "src/maglev/arm/maglev-assembler-arm-inl.h"
#elif V8_TARGET_ARCH_ARM64
#include "src/maglev/arm64/maglev-assembler-arm64-inl.h"
#elif V8_TARGET_ARCH_X64
#include "src/maglev/x64/maglev-assembler-x64-inl.h"
#elif V8_TARGET_ARCH_S390X
#include "src/maglev/s390/maglev-assembler-s390-inl.h"
#else
#error "Maglev does not supported this architecture."
#endif

namespace v8 {
namespace internal {
namespace maglev {

namespace detail {

// Base case provides an error.
template <typename T, typename Enable = void>
struct CopyForDeferredHelper {};

// Helper for copies by value.
template <typename T, typename Enable = void>
struct CopyForDeferredByValue {};

// Node pointers are copied by value.
template <typename T>
struct CopyForDeferredHelper<
    T*, typename std::enable_if<std::is_base_of<NodeBase, T>::value>::type>
    : public CopyForDeferredByValue<T*> {};
// Arithmetic values and enums are copied by value.
template <typename T>
struct CopyForDeferredHelper<
    T, typename std::enable_if<std::is_arithmetic<T>::value>::type>
    : public CopyForDeferredByValue<T> {};
template <typename T>
struct CopyForDeferredHelper<
    T, typename std::enable_if<std::is_enum<T>::value>::type>
    : public CopyForDeferredByValue<T> {};
// MaglevCompilationInfos are copied by value.
template <>
struct CopyForDeferredHelper<MaglevCompilationInfo*>
    : public CopyForDeferredByValue<MaglevCompilationInfo*> {};
// Machine registers are copied by value.
template <>
struct CopyForDeferredHelper<Register>
    : public CopyForDeferredByValue<Register> {};
template <>
struct CopyForDeferredHelper<DoubleRegister>
    : public CopyForDeferredByValue<DoubleRegister> {};
// Bytecode offsets are copied by value.
template <>
struct CopyForDeferredHelper<BytecodeOffset>
    : public CopyForDeferredByValue<BytecodeOffset> {};
// EagerDeoptInfo pointers are copied by value.
template <>
struct CopyForDeferredHelper<EagerDeoptInfo*>
    : public CopyForDeferredByValue<EagerDeoptInfo*> {};
// LazyDeoptInfo pointers are copied by value.
template <>
struct CopyForDeferredHelper<LazyDeoptInfo*>
    : public CopyForDeferredByValue<LazyDeoptInfo*> {};
// ZoneLabelRef is copied by value.
template <>
struct CopyForDeferredHelper<ZoneLabelRef>
    : public CopyForDeferredByValue<ZoneLabelRef> {};
// MapCompare is copied by value.
template <>
struct CopyForDeferredHelper<MapCompare>
    : public CopyForDeferredByValue<MapCompare> {};
// RegList are copied by value.
template <>
struct CopyForDeferredHelper<RegList> : public CopyForDeferredByValue<RegList> {
};
// Register snapshots are copied by value.
template <>
struct CopyForDeferredHelper<RegisterSnapshot>
    : public CopyForDeferredByValue<RegisterSnapshot> {};
// Feedback slots are copied by value.
template <>
struct CopyForDeferredHelper<FeedbackSlot>
    : public CopyForDeferredByValue<FeedbackSlot> {};
// Heap Refs are copied by value.
template <typename T>
struct CopyForDeferredHelper<T, typename std::enable_if<std::is_base_of<
                                    compiler::ObjectRef, T>::value>::type>
    : public CopyForDeferredByValue<T> {};

template <typename T>
T CopyForDeferred(MaglevCompilationInfo* compilation_info, T&& value) {}

template <typename T>
T CopyForDeferred(MaglevCompilationInfo* compilation_info, T& value) {}

template <typename T>
T CopyForDeferred(MaglevCompilationInfo* compilation_info, const T& value) {}

template <typename Function>
struct FunctionArgumentsTupleHelper
    : public FunctionArgumentsTupleHelper<decltype(&Function::operator())> {};

FunctionArgumentsTupleHelper<R (C::*)(A...) const>;

FunctionArgumentsTupleHelper<R (&)(A...)>;

template <typename T>
struct StripFirstTupleArg;

StripFirstTupleArg<std::tuple<T1, T...>>;

template <typename Function>
class DeferredCodeInfoImpl final : public DeferredCodeInfo {};

}  // namespace detail

template <typename Function, typename... Args>
inline Label* MaglevAssembler::MakeDeferredCode(Function&& deferred_code_gen,
                                                Args&&... args) {}

// Note this doesn't take capturing lambdas by design, since state may
// change until `deferred_code_gen` is actually executed. Use either a
// non-capturing lambda, or a plain function pointer.
template <typename Function, typename... Args>
inline void MaglevAssembler::JumpToDeferredIf(Condition cond,
                                              Function&& deferred_code_gen,
                                              Args&&... args) {}

template <typename T>
inline void AllocateSlow(MaglevAssembler* masm,
                         RegisterSnapshot register_snapshot, Register object,
                         Builtin builtin, T size_in_bytes, ZoneLabelRef done) {}

inline void MaglevAssembler::SmiToDouble(DoubleRegister result, Register smi) {}

inline void MaglevAssembler::CompareInt32AndBranch(Register r1, int32_t value,
                                                   Condition cond,
                                                   BasicBlock* if_true,
                                                   BasicBlock* if_false,
                                                   BasicBlock* next_block) {}

inline void MaglevAssembler::CompareInt32AndBranch(Register r1, Register r2,
                                                   Condition cond,
                                                   BasicBlock* if_true,
                                                   BasicBlock* if_false,
                                                   BasicBlock* next_block) {}

inline void MaglevAssembler::Branch(Condition condition, BasicBlock* if_true,
                                    BasicBlock* if_false,
                                    BasicBlock* next_block) {}

inline void MaglevAssembler::Branch(Condition condition, Label* if_true,
                                    Label::Distance true_distance,
                                    bool fallthrough_when_true, Label* if_false,
                                    Label::Distance false_distance,
                                    bool fallthrough_when_false) {}

inline void MaglevAssembler::LoadTaggedField(Register result,
                                             MemOperand operand) {}

inline void MaglevAssembler::LoadTaggedField(Register result, Register object,
                                             int offset) {}

inline void MaglevAssembler::LoadTaggedFieldWithoutDecompressing(
    Register result, Register object, int offset) {}

inline void MaglevAssembler::LoadTaggedSignedField(Register result,
                                                   MemOperand operand) {}

inline void MaglevAssembler::LoadTaggedSignedField(Register result,
                                                   Register object,
                                                   int offset) {}

inline void MaglevAssembler::LoadAndUntagTaggedSignedField(Register result,
                                                           Register object,
                                                           int offset) {}

inline void MaglevAssembler::LoadHeapNumberOrOddballValue(DoubleRegister result,
                                                          Register object) {}

namespace detail {

#ifdef DEBUG
inline bool ClobberedBy(RegList written_registers, Register reg) {}
inline bool ClobberedBy(RegList written_registers, DoubleRegister reg) {}
inline bool ClobberedBy(RegList written_registers,
                        DirectHandle<Object> handle) {}
inline bool ClobberedBy(RegList written_registers, Tagged<Smi> smi) {}
inline bool ClobberedBy(RegList written_registers, Tagged<TaggedIndex> index) {}
inline bool ClobberedBy(RegList written_registers, int32_t imm) {}
inline bool ClobberedBy(RegList written_registers, RootIndex index) {}
inline bool ClobberedBy(RegList written_registers, const Input& input) {}

inline bool ClobberedBy(DoubleRegList written_registers, Register reg) {}
inline bool ClobberedBy(DoubleRegList written_registers, DoubleRegister reg) {}
inline bool ClobberedBy(DoubleRegList written_registers,
                        DirectHandle<Object> handle) {}
inline bool ClobberedBy(DoubleRegList written_registers, Tagged<Smi> smi) {}
inline bool ClobberedBy(DoubleRegList written_registers,
                        Tagged<TaggedIndex> index) {}
inline bool ClobberedBy(DoubleRegList written_registers, int32_t imm) {}
inline bool ClobberedBy(DoubleRegList written_registers, RootIndex index) {}
inline bool ClobberedBy(DoubleRegList written_registers, const Input& input) {}

// We don't know what's inside machine registers or operands, so assume they
// match.
inline bool MachineTypeMatches(MachineType type, Register reg) {}
inline bool MachineTypeMatches(MachineType type, DoubleRegister reg) {}
inline bool MachineTypeMatches(MachineType type, MemOperand reg) {}
inline bool MachineTypeMatches(MachineType type,
                               DirectHandle<HeapObject> handle) {}
inline bool MachineTypeMatches(MachineType type, Tagged<Smi> smi) {}
inline bool MachineTypeMatches(MachineType type, Tagged<TaggedIndex> index) {}
inline bool MachineTypeMatches(MachineType type, int32_t imm) {}
inline bool MachineTypeMatches(MachineType type, RootIndex index) {}
inline bool MachineTypeMatches(MachineType type, const Input& input) {}

template <typename Descriptor, typename Arg>
void CheckArg(MaglevAssembler* masm, Arg& arg, int& i) {}

template <typename Descriptor, typename Iterator>
void CheckArg(MaglevAssembler* masm,
              const base::iterator_range<Iterator>& range, int& i) {}

template <typename Descriptor, typename... Args>
void CheckArgs(MaglevAssembler* masm, const std::tuple<Args...>& args) {}

#else  // DEBUG

template <typename Descriptor, typename... Args>
void CheckArgs(Args&&... args) {}

#endif  // DEBUG

template <typename Descriptor, typename... Args>
void PushArgumentsForBuiltin(MaglevAssembler* masm, std::tuple<Args...> args) {}

template <typename Descriptor>
void PushArgumentsForBuiltin(MaglevAssembler* masm, std::tuple<> empty_args) {}

template <Builtin kBuiltin, typename... Args>
void MoveArgumentsForBuiltin(MaglevAssembler* masm, Args&&... args) {}

}  // namespace detail

inline void MaglevAssembler::CallBuiltin(Builtin builtin) {}

template <Builtin kBuiltin, typename... Args>
inline void MaglevAssembler::CallBuiltin(Args&&... args) {}

inline void MaglevAssembler::CallRuntime(Runtime::FunctionId fid) {}

inline void MaglevAssembler::CallRuntime(Runtime::FunctionId fid,
                                         int num_args) {}

inline void MaglevAssembler::SetMapAsRoot(Register object, RootIndex map) {}

inline void MaglevAssembler::SmiTagInt32AndJumpIfFail(
    Register dst, Register src, Label* fail, Label::Distance distance) {}

inline void MaglevAssembler::SmiTagInt32AndJumpIfFail(
    Register reg, Label* fail, Label::Distance distance) {}

inline void MaglevAssembler::SmiTagInt32AndJumpIfSuccess(
    Register dst, Register src, Label* success, Label::Distance distance) {}

inline void MaglevAssembler::SmiTagInt32AndJumpIfSuccess(
    Register reg, Label* success, Label::Distance distance) {}

inline void MaglevAssembler::UncheckedSmiTagInt32(Register dst, Register src) {}

inline void MaglevAssembler::UncheckedSmiTagInt32(Register reg) {}

inline void MaglevAssembler::SmiTagUint32AndJumpIfFail(
    Register dst, Register src, Label* fail, Label::Distance distance) {}

inline void MaglevAssembler::SmiTagUint32AndJumpIfFail(
    Register reg, Label* fail, Label::Distance distance) {}

inline void MaglevAssembler::SmiTagUint32AndJumpIfSuccess(
    Register dst, Register src, Label* success, Label::Distance distance) {}

inline void MaglevAssembler::SmiTagUint32AndJumpIfSuccess(
    Register reg, Label* success, Label::Distance distance) {}

inline void MaglevAssembler::UncheckedSmiTagUint32(Register dst, Register src) {}

inline void MaglevAssembler::UncheckedSmiTagUint32(Register reg) {}

inline void MaglevAssembler::SmiAddConstant(Register reg, int value,
                                            Label* fail,
                                            Label::Distance distance) {}

inline void MaglevAssembler::SmiSubConstant(Register reg, int value,
                                            Label* fail,
                                            Label::Distance distance) {}

inline void MaglevAssembler::JumpIfStringMap(Register map, Label* target,
                                             Label::Distance distance,
                                             bool jump_if_true) {}

inline void MaglevAssembler::JumpIfString(Register heap_object, Label* target,
                                          Label::Distance distance) {}

inline void MaglevAssembler::JumpIfNotString(Register heap_object,
                                             Label* target,
                                             Label::Distance distance) {}

inline void MaglevAssembler::CheckJSAnyIsStringAndBranch(
    Register heap_object, Label* if_true, Label::Distance true_distance,
    bool fallthrough_when_true, Label* if_false, Label::Distance false_distance,
    bool fallthrough_when_false) {}

inline void MaglevAssembler::StringLength(Register result, Register string) {}

void MaglevAssembler::LoadMapForCompare(Register dst, Register obj) {}

inline void MaglevAssembler::DefineLazyDeoptPoint(LazyDeoptInfo* info) {}

inline void MaglevAssembler::DefineExceptionHandlerPoint(NodeBase* node) {}

inline void MaglevAssembler::DefineExceptionHandlerAndLazyDeoptPoint(
    NodeBase* node) {}

inline void SaveRegisterStateForCall::DefineSafepointWithLazyDeopt(
    LazyDeoptInfo* lazy_deopt_info) {}

}  // namespace maglev
}  // namespace internal
}  // namespace v8

#endif  // V8_MAGLEV_MAGLEV_ASSEMBLER_INL_H_