chromium/v8/src/compiler/backend/x64/code-generator-x64.cc

// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#include <limits>
#include <optional>

#include "src/base/logging.h"
#include "src/base/overflowing-math.h"
#include "src/builtins/builtins.h"
#include "src/codegen/assembler.h"
#include "src/codegen/cpu-features.h"
#include "src/codegen/external-reference.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/codegen/x64/assembler-x64.h"
#include "src/codegen/x64/register-x64.h"
#include "src/common/globals.h"
#include "src/common/ptr-compr-inl.h"
#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/backend/instruction-codes.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/execution/frame-constants.h"
#include "src/heap/mutable-page-metadata.h"
#include "src/objects/code-kind.h"
#include "src/objects/smi.h"

#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-objects.h"
#endif  // V8_ENABLE_WEBASSEMBLY

namespace v8::internal::compiler {

#define __

enum class FirstMacroFusionInstKind {};

enum class SecondMacroFusionInstKind {};

bool IsMacroFused(FirstMacroFusionInstKind first_kind,
                  SecondMacroFusionInstKind second_kind) {}

SecondMacroFusionInstKind GetSecondMacroFusionInstKind(
    FlagsCondition condition) {}

bool ShouldAlignForJCCErratum(Instruction* instr,
                              FirstMacroFusionInstKind first_kind) {}

// Adds X64 specific methods for decoding operands.
class X64OperandConverter : public InstructionOperandConverter {};

namespace {

bool HasAddressingMode(Instruction* instr) {}

bool HasImmediateInput(Instruction* instr, size_t index) {}

bool HasRegisterInput(Instruction* instr, size_t index) {}

class OutOfLineLoadFloat32NaN final : public OutOfLineCode {};

class OutOfLineLoadFloat64NaN final : public OutOfLineCode {};

class OutOfLineTruncateDoubleToI final : public OutOfLineCode {};

class OutOfLineRecordWrite final : public OutOfLineCode {};

template <std::memory_order order>
int EmitStore(MacroAssembler* masm, Operand operand, Register value,
              MachineRepresentation rep) {}

template <std::memory_order order>
int EmitStore(MacroAssembler* masm, Operand operand, Immediate value,
              MachineRepresentation rep);

template <>
int EmitStore<std::memory_order_relaxed>(MacroAssembler* masm, Operand operand,
                                         Immediate value,
                                         MachineRepresentation rep) {}

#if V8_ENABLE_WEBASSEMBLY
class WasmOutOfLineTrap : public OutOfLineCode {};

void RecordTrapInfoIfNeeded(Zone* zone, CodeGenerator* codegen,
                            InstructionCode opcode, Instruction* instr,
                            int pc) {}

#else

void RecordTrapInfoIfNeeded(Zone* zone, CodeGenerator* codegen,
                            InstructionCode opcode, Instruction* instr,
                            int pc) {
  DCHECK_EQ(kMemoryAccessDirect, instr->memory_access_mode());
}

#endif  // V8_ENABLE_WEBASSEMBLY

#ifdef V8_IS_TSAN
void EmitMemoryProbeForTrapHandlerIfNeeded(MacroAssembler* masm,
                                           Register scratch, Operand operand,
                                           StubCallMode mode, int size) {
#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
  // The wasm OOB trap handler needs to be able to look up the faulting
  // instruction pointer to handle the SIGSEGV raised by an OOB access. It
  // will not handle SIGSEGVs raised by the TSAN store helpers. Emit a
  // redundant load here to give the trap handler a chance to handle any
  // OOB SIGSEGVs.
  if (trap_handler::IsTrapHandlerEnabled() &&
      mode == StubCallMode::kCallWasmRuntimeStub) {
    switch (size) {
      case kInt8Size:
        masm->movb(scratch, operand);
        break;
      case kInt16Size:
        masm->movw(scratch, operand);
        break;
      case kInt32Size:
        masm->movl(scratch, operand);
        break;
      case kInt64Size:
        masm->movq(scratch, operand);
        break;
      default:
        UNREACHABLE();
    }
  }
#endif
}

class OutOfLineTSANStore : public OutOfLineCode {
 public:
  OutOfLineTSANStore(CodeGenerator* gen, Operand operand, Register value,
                     Register scratch0, StubCallMode stub_mode, int size,
                     std::memory_order order)
      : OutOfLineCode(gen),
        operand_(operand),
        value_(value),
        scratch0_(scratch0),
#if V8_ENABLE_WEBASSEMBLY
        stub_mode_(stub_mode),
#endif  // V8_ENABLE_WEBASSEMBLY
        size_(size),
        memory_order_(order),
        zone_(gen->zone()) {
    DCHECK(!AreAliased(value, scratch0));
  }

  void Generate() final {
    const SaveFPRegsMode save_fp_mode = frame()->DidAllocateDoubleRegisters()
                                            ? SaveFPRegsMode::kSave
                                            : SaveFPRegsMode::kIgnore;
    __ leaq(scratch0_, operand_);

#if V8_ENABLE_WEBASSEMBLY
    if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
      // A direct call to a wasm runtime stub defined in this module.
      // Just encode the stub index. This will be patched when the code
      // is added to the native module and copied into wasm code space.
      masm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_,
                                StubCallMode::kCallWasmRuntimeStub,
                                memory_order_);
      return;
    }
#endif  // V8_ENABLE_WEBASSEMBLY

    masm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_,
                              StubCallMode::kCallBuiltinPointer, memory_order_);
  }

 private:
  Operand const operand_;
  Register const value_;
  Register const scratch0_;
#if V8_ENABLE_WEBASSEMBLY
  StubCallMode const stub_mode_;
#endif  // V8_ENABLE_WEBASSEMBLY
  int size_;
  const std::memory_order memory_order_;
  Zone* zone_;
};

void EmitTSANStoreOOL(Zone* zone, CodeGenerator* codegen, MacroAssembler* masm,
                      Operand operand, Register value_reg,
                      X64OperandConverter& i, StubCallMode mode, int size,
                      std::memory_order order) {
  // The FOR_TESTING code doesn't initialize the root register. We can't call
  // the TSAN builtin since we need to load the external reference through the
  // root register.
  // TODO(solanes, v8:7790, v8:11600): See if we can support the FOR_TESTING
  // path. It is not crucial, but it would be nice to remove this restriction.
  DCHECK_NE(codegen->code_kind(), CodeKind::FOR_TESTING);

  Register scratch0 = i.TempRegister(0);
  auto tsan_ool = zone->New<OutOfLineTSANStore>(codegen, operand, value_reg,
                                                scratch0, mode, size, order);
  masm->jmp(tsan_ool->entry());
  masm->bind(tsan_ool->exit());
}

template <std::memory_order order>
Register GetTSANValueRegister(MacroAssembler* masm, Register value,
                              X64OperandConverter& i,
                              MachineRepresentation rep) {
  if (rep == MachineRepresentation::kSandboxedPointer) {
    // SandboxedPointers need to be encoded.
    Register value_reg = i.TempRegister(1);
    masm->movq(value_reg, value);
    masm->EncodeSandboxedPointer(value_reg);
    return value_reg;
  } else if (rep == MachineRepresentation::kIndirectPointer) {
    // Indirect pointer fields contain an index to a pointer table entry, which
    // is obtained from the referenced object.
    Register value_reg = i.TempRegister(1);
    masm->movl(
        value_reg,
        FieldOperand(value, ExposedTrustedObject::kSelfIndirectPointerOffset));
    return value_reg;
  }
  return value;
}

template <std::memory_order order>
Register GetTSANValueRegister(MacroAssembler* masm, Immediate value,
                              X64OperandConverter& i,
                              MachineRepresentation rep);

template <>
Register GetTSANValueRegister<std::memory_order_relaxed>(
    MacroAssembler* masm, Immediate value, X64OperandConverter& i,
    MachineRepresentation rep) {
  Register value_reg = i.TempRegister(1);
  masm->movq(value_reg, value);
  if (rep == MachineRepresentation::kSandboxedPointer) {
    // SandboxedPointers need to be encoded.
    masm->EncodeSandboxedPointer(value_reg);
  } else if (rep == MachineRepresentation::kIndirectPointer) {
    // Indirect pointer fields contain an index to a pointer table entry, which
    // is obtained from the referenced object.
    masm->movl(value_reg,
               FieldOperand(value_reg,
                            ExposedTrustedObject::kSelfIndirectPointerOffset));
  }
  return value_reg;
}

template <std::memory_order order, typename ValueT>
void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
                        MacroAssembler* masm, Operand operand, ValueT value,
                        X64OperandConverter& i, StubCallMode stub_call_mode,
                        MachineRepresentation rep, Instruction* instr) {
  // The FOR_TESTING code doesn't initialize the root register. We can't call
  // the TSAN builtin since we need to load the external reference through the
  // root register.
  // TODO(solanes, v8:7790, v8:11600): See if we can support the FOR_TESTING
  // path. It is not crucial, but it would be nice to remove this restriction.
  if (codegen->code_kind() != CodeKind::FOR_TESTING) {
    if (instr->HasMemoryAccessMode()) {
      RecordTrapInfoIfNeeded(zone, codegen, instr->opcode(), instr,
                             masm->pc_offset());
    }
    int size = ElementSizeInBytes(rep);
    EmitMemoryProbeForTrapHandlerIfNeeded(masm, i.TempRegister(0), operand,
                                          stub_call_mode, size);
    Register value_reg = GetTSANValueRegister<order>(masm, value, i, rep);
    EmitTSANStoreOOL(zone, codegen, masm, operand, value_reg, i, stub_call_mode,
                     size, order);
  } else {
    int store_instr_offset = EmitStore<order>(masm, operand, value, rep);
    if (instr->HasMemoryAccessMode()) {
      RecordTrapInfoIfNeeded(zone, codegen, instr->opcode(), instr,
                             store_instr_offset);
    }
  }
}

class OutOfLineTSANRelaxedLoad final : public OutOfLineCode {
 public:
  OutOfLineTSANRelaxedLoad(CodeGenerator* gen, Operand operand,
                           Register scratch0, StubCallMode stub_mode, int size)
      : OutOfLineCode(gen),
        operand_(operand),
        scratch0_(scratch0),
#if V8_ENABLE_WEBASSEMBLY
        stub_mode_(stub_mode),
#endif  // V8_ENABLE_WEBASSEMBLY
        size_(size),
        zone_(gen->zone()) {
  }

  void Generate() final {
    const SaveFPRegsMode save_fp_mode = frame()->DidAllocateDoubleRegisters()
                                            ? SaveFPRegsMode::kSave
                                            : SaveFPRegsMode::kIgnore;
    __ leaq(scratch0_, operand_);

#if V8_ENABLE_WEBASSEMBLY
    if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
      // A direct call to a wasm runtime stub defined in this module.
      // Just encode the stub index. This will be patched when the code
      // is added to the native module and copied into wasm code space.
      __ CallTSANRelaxedLoadStub(scratch0_, save_fp_mode, size_,
                                 StubCallMode::kCallWasmRuntimeStub);
      return;
    }
#endif  // V8_ENABLE_WEBASSEMBLY

    __ CallTSANRelaxedLoadStub(scratch0_, save_fp_mode, size_,
                               StubCallMode::kCallBuiltinPointer);
  }

 private:
  Operand const operand_;
  Register const scratch0_;
#if V8_ENABLE_WEBASSEMBLY
  StubCallMode const stub_mode_;
#endif  // V8_ENABLE_WEBASSEMBLY
  int size_;
  Zone* zone_;
};

void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
                                    MacroAssembler* masm, Operand operand,
                                    X64OperandConverter& i, StubCallMode mode,
                                    int size) {
  // The FOR_TESTING code doesn't initialize the root register. We can't call
  // the TSAN builtin since we need to load the external reference through the
  // root register.
  // TODO(solanes, v8:7790, v8:11600): See if we can support the FOR_TESTING
  // path. It is not crucial, but it would be nice to remove this if.
  if (codegen->code_kind() == CodeKind::FOR_TESTING) return;

  Register scratch0 = i.TempRegister(0);
  auto tsan_ool = zone->New<OutOfLineTSANRelaxedLoad>(codegen, operand,
                                                      scratch0, mode, size);
  masm->jmp(tsan_ool->entry());
  masm->bind(tsan_ool->exit());
}

#else
template <std::memory_order order, typename ValueT>
void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
                        MacroAssembler* masm, Operand operand, ValueT value,
                        X64OperandConverter& i, StubCallMode stub_call_mode,
                        MachineRepresentation rep, Instruction* instr) {}

void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
                                    MacroAssembler* masm, Operand operand,
                                    X64OperandConverter& i, StubCallMode mode,
                                    int size) {}
#endif  // V8_IS_TSAN

}  // namespace

#define ASSEMBLE_UNOP

#define ASSEMBLE_BINOP

#define ASSEMBLE_COMPARE

#define ASSEMBLE_TEST(asm_instr)

#define ASSEMBLE_MULT

#define ASSEMBLE_SHIFT

#define ASSEMBLE_MOVX

#define ASSEMBLE_SSE_BINOP

#define ASSEMBLE_SSE_UNOP

#define ASSEMBLE_AVX_BINOP

#define ASSEMBLE_IEEE754_BINOP

#define ASSEMBLE_IEEE754_UNOP

#define ASSEMBLE_ATOMIC_BINOP

#define ASSEMBLE_ATOMIC64_BINOP

// Handles both SSE and AVX codegen. For SSE we use DefineSameAsFirst, so the
// dst and first src will be the same. For AVX we don't restrict it that way, so
// we will omit unnecessary moves.
#define ASSEMBLE_SIMD_BINOP(opcode)

#define ASSEMBLE_SIMD_F16x8_BINOP(instr)

#define ASSEMBLE_SIMD_F16x8_RELOP(instr)

#define ASSEMBLE_SIMD256_BINOP(opcode, cpu_feature)

#define ASSEMBLE_SIMD_INSTR

#define ASSEMBLE_SIMD_IMM_INSTR

#define ASSEMBLE_SIMD_PUNPCK_SHUFFLE

#define ASSEMBLE_SIMD_IMM_SHUFFLE

#define ASSEMBLE_SIMD_ALL_TRUE

// This macro will directly emit the opcode if the shift is an immediate - the
// shift value will be taken modulo 2^width. Otherwise, it will emit code to
// perform the modulus operation.
#define ASSEMBLE_SIMD_SHIFT

#define ASSEMBLE_SIMD256_SHIFT(opcode, width)

#define ASSEMBLE_PINSR

#define ASSEMBLE_SEQ_CST_STORE

void CodeGenerator::AssembleDeconstructFrame() {}

void CodeGenerator::AssemblePrepareTailCall() {}

namespace {

void AdjustStackPointerForTailCall(Instruction* instr,
                                   MacroAssembler* assembler, Linkage* linkage,
                                   OptimizedCompilationInfo* info,
                                   FrameAccessState* state,
                                   int new_slot_above_sp,
                                   bool allow_shrinkage = true) {}

void SetupSimdImmediateInRegister(MacroAssembler* assembler, uint32_t* imms,
                                  XMMRegister reg) {}

void SetupSimd256ImmediateInRegister(MacroAssembler* assembler, uint32_t* imms,
                                     YMMRegister reg, XMMRegister scratch) {}

}  // namespace

void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
                                              int first_unused_slot_offset) {}

void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
                                             int first_unused_slot_offset) {}

// Check that {kJavaScriptCallCodeStartRegister} is correct.
void CodeGenerator::AssembleCodeStartRegisterCheck() {}

void CodeGenerator::BailoutIfDeoptimized() {}

bool ShouldClearOutputRegisterBeforeInstruction(CodeGenerator* g,
                                                Instruction* instr) {}

void CodeGenerator::AssemblePlaceHolderForLazyDeopt(Instruction* instr) {}

// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
    Instruction* instr) {}  // NOLadability/fn_size)

#undef ASSEMBLE_PINSR
#undef ASSEMBLE_UNOP
#undef ASSEMBLE_BINOP
#undef ASSEMBLE_COMPARE
#undef ASSEMBLE_MULT
#undef ASSEMBLE_SHIFT
#undef ASSEMBLE_MOVX
#undef ASSEMBLE_SSE_BINOP
#undef ASSEMBLE_SSE_UNOP
#undef ASSEMBLE_AVX_BINOP
#undef ASSEMBLE_IEEE754_BINOP
#undef ASSEMBLE_IEEE754_UNOP
#undef ASSEMBLE_ATOMIC_BINOP
#undef ASSEMBLE_ATOMIC64_BINOP
#undef ASSEMBLE_SIMD_INSTR
#undef ASSEMBLE_SIMD_IMM_INSTR
#undef ASSEMBLE_SIMD_PUNPCK_SHUFFLE
#undef ASSEMBLE_SIMD_IMM_SHUFFLE
#undef ASSEMBLE_SIMD_ALL_TRUE
#undef ASSEMBLE_SIMD_SHIFT
#undef ASSEMBLE_SEQ_CST_STORE

namespace {

constexpr Condition FlagsConditionToCondition(FlagsCondition condition) {}

}  // namespace

// Assembles branches after this instruction.
void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {}

void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
                                            BranchInfo* branch) {}

void CodeGenerator::AssembleArchJumpRegardlessOfAssemblyOrder(
    RpoNumber target) {}

#if V8_ENABLE_WEBASSEMBLY
void CodeGenerator::AssembleArchTrap(Instruction* instr,
                                     FlagsCondition condition) {}
#endif  // V8_ENABLE_WEBASSEMBLY

// Assembles boolean materializations after this instruction.
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
                                        FlagsCondition condition) {}

void CodeGenerator::AssembleArchConditionalBoolean(Instruction* instr) {}

void CodeGenerator::AssembleArchConditionalBranch(Instruction* instr,
                                                  BranchInfo* branch) {}

void CodeGenerator::AssembleArchBinarySearchSwitchRange(
    Register input, RpoNumber def_block, std::pair<int32_t, Label*>* begin,
    std::pair<int32_t, Label*>* end, std::optional<int32_t>& last_cmp_value) {}

void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {}

void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {}

void CodeGenerator::AssembleArchSelect(Instruction* instr,
                                       FlagsCondition condition) {}

namespace {

static const int kQuadWordSize =;

}  // namespace

void CodeGenerator::FinishFrame(Frame* frame) {}

void CodeGenerator::AssembleConstructFrame() {}

void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {}

void CodeGenerator::FinishCode() {}

void CodeGenerator::PrepareForDeoptimizationExits(
    ZoneDeque<DeoptimizationExit*>* exits) {}

void CodeGenerator::IncrementStackAccessCounter(
    InstructionOperand* source, InstructionOperand* destination) {}

AllocatedOperand CodeGenerator::Push(InstructionOperand* source) {}

void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) {}

void CodeGenerator::PopTempStackSlots() {}

void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
                                       MachineRepresentation rep) {}

void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
                                       MachineRepresentation rep) {}

void CodeGenerator::SetPendingMove(MoveOperands* move) {}

namespace {

bool Is32BitOperand(InstructionOperand* operand) {}

// When we need only 32 bits, move only 32 bits. Benefits:
// - Save a byte here and there (depending on the destination
//   register; "movl eax, ..." is smaller than "movq rax, ...").
// - Safeguard against accidental decompression of compressed slots.
// We must check both {source} and {destination} to be 32-bit values,
// because treating 32-bit sources as 64-bit values can be perfectly
// fine as a result of virtual register renaming (to avoid redundant
// explicit zero-extensions that also happen implicitly).
bool Use32BitMove(InstructionOperand* source, InstructionOperand* destination) {}

}  // namespace

void CodeGenerator::AssembleMove(InstructionOperand* source,
                                 InstructionOperand* destination) {}

void CodeGenerator::AssembleSwap(InstructionOperand* source,
                                 InstructionOperand* destination) {}

void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {}

#undef __

}  // namespace v8::internal::compiler