#include <limits>
#include <optional>
#include "src/base/logging.h"
#include "src/base/overflowing-math.h"
#include "src/builtins/builtins.h"
#include "src/codegen/assembler.h"
#include "src/codegen/cpu-features.h"
#include "src/codegen/external-reference.h"
#include "src/codegen/macro-assembler.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/codegen/x64/assembler-x64.h"
#include "src/codegen/x64/register-x64.h"
#include "src/common/globals.h"
#include "src/common/ptr-compr-inl.h"
#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/backend/code-generator.h"
#include "src/compiler/backend/gap-resolver.h"
#include "src/compiler/backend/instruction-codes.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/execution/frame-constants.h"
#include "src/heap/mutable-page-metadata.h"
#include "src/objects/code-kind.h"
#include "src/objects/smi.h"
#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/wasm-objects.h"
#endif
namespace v8::internal::compiler {
#define __ …
enum class FirstMacroFusionInstKind { … };
enum class SecondMacroFusionInstKind { … };
bool IsMacroFused(FirstMacroFusionInstKind first_kind,
SecondMacroFusionInstKind second_kind) { … }
SecondMacroFusionInstKind GetSecondMacroFusionInstKind(
FlagsCondition condition) { … }
bool ShouldAlignForJCCErratum(Instruction* instr,
FirstMacroFusionInstKind first_kind) { … }
class X64OperandConverter : public InstructionOperandConverter { … };
namespace {
bool HasAddressingMode(Instruction* instr) { … }
bool HasImmediateInput(Instruction* instr, size_t index) { … }
bool HasRegisterInput(Instruction* instr, size_t index) { … }
class OutOfLineLoadFloat32NaN final : public OutOfLineCode { … };
class OutOfLineLoadFloat64NaN final : public OutOfLineCode { … };
class OutOfLineTruncateDoubleToI final : public OutOfLineCode { … };
class OutOfLineRecordWrite final : public OutOfLineCode { … };
template <std::memory_order order>
int EmitStore(MacroAssembler* masm, Operand operand, Register value,
MachineRepresentation rep) { … }
template <std::memory_order order>
int EmitStore(MacroAssembler* masm, Operand operand, Immediate value,
MachineRepresentation rep);
template <>
int EmitStore<std::memory_order_relaxed>(MacroAssembler* masm, Operand operand,
Immediate value,
MachineRepresentation rep) { … }
#if V8_ENABLE_WEBASSEMBLY
class WasmOutOfLineTrap : public OutOfLineCode { … };
void RecordTrapInfoIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
int pc) { … }
#else
void RecordTrapInfoIfNeeded(Zone* zone, CodeGenerator* codegen,
InstructionCode opcode, Instruction* instr,
int pc) {
DCHECK_EQ(kMemoryAccessDirect, instr->memory_access_mode());
}
#endif
#ifdef V8_IS_TSAN
void EmitMemoryProbeForTrapHandlerIfNeeded(MacroAssembler* masm,
Register scratch, Operand operand,
StubCallMode mode, int size) {
#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
if (trap_handler::IsTrapHandlerEnabled() &&
mode == StubCallMode::kCallWasmRuntimeStub) {
switch (size) {
case kInt8Size:
masm->movb(scratch, operand);
break;
case kInt16Size:
masm->movw(scratch, operand);
break;
case kInt32Size:
masm->movl(scratch, operand);
break;
case kInt64Size:
masm->movq(scratch, operand);
break;
default:
UNREACHABLE();
}
}
#endif
}
class OutOfLineTSANStore : public OutOfLineCode {
public:
OutOfLineTSANStore(CodeGenerator* gen, Operand operand, Register value,
Register scratch0, StubCallMode stub_mode, int size,
std::memory_order order)
: OutOfLineCode(gen),
operand_(operand),
value_(value),
scratch0_(scratch0),
#if V8_ENABLE_WEBASSEMBLY
stub_mode_(stub_mode),
#endif
size_(size),
memory_order_(order),
zone_(gen->zone()) {
DCHECK(!AreAliased(value, scratch0));
}
void Generate() final {
const SaveFPRegsMode save_fp_mode = frame()->DidAllocateDoubleRegisters()
? SaveFPRegsMode::kSave
: SaveFPRegsMode::kIgnore;
__ leaq(scratch0_, operand_);
#if V8_ENABLE_WEBASSEMBLY
if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
masm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_,
StubCallMode::kCallWasmRuntimeStub,
memory_order_);
return;
}
#endif
masm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_,
StubCallMode::kCallBuiltinPointer, memory_order_);
}
private:
Operand const operand_;
Register const value_;
Register const scratch0_;
#if V8_ENABLE_WEBASSEMBLY
StubCallMode const stub_mode_;
#endif
int size_;
const std::memory_order memory_order_;
Zone* zone_;
};
void EmitTSANStoreOOL(Zone* zone, CodeGenerator* codegen, MacroAssembler* masm,
Operand operand, Register value_reg,
X64OperandConverter& i, StubCallMode mode, int size,
std::memory_order order) {
DCHECK_NE(codegen->code_kind(), CodeKind::FOR_TESTING);
Register scratch0 = i.TempRegister(0);
auto tsan_ool = zone->New<OutOfLineTSANStore>(codegen, operand, value_reg,
scratch0, mode, size, order);
masm->jmp(tsan_ool->entry());
masm->bind(tsan_ool->exit());
}
template <std::memory_order order>
Register GetTSANValueRegister(MacroAssembler* masm, Register value,
X64OperandConverter& i,
MachineRepresentation rep) {
if (rep == MachineRepresentation::kSandboxedPointer) {
Register value_reg = i.TempRegister(1);
masm->movq(value_reg, value);
masm->EncodeSandboxedPointer(value_reg);
return value_reg;
} else if (rep == MachineRepresentation::kIndirectPointer) {
Register value_reg = i.TempRegister(1);
masm->movl(
value_reg,
FieldOperand(value, ExposedTrustedObject::kSelfIndirectPointerOffset));
return value_reg;
}
return value;
}
template <std::memory_order order>
Register GetTSANValueRegister(MacroAssembler* masm, Immediate value,
X64OperandConverter& i,
MachineRepresentation rep);
template <>
Register GetTSANValueRegister<std::memory_order_relaxed>(
MacroAssembler* masm, Immediate value, X64OperandConverter& i,
MachineRepresentation rep) {
Register value_reg = i.TempRegister(1);
masm->movq(value_reg, value);
if (rep == MachineRepresentation::kSandboxedPointer) {
masm->EncodeSandboxedPointer(value_reg);
} else if (rep == MachineRepresentation::kIndirectPointer) {
masm->movl(value_reg,
FieldOperand(value_reg,
ExposedTrustedObject::kSelfIndirectPointerOffset));
}
return value_reg;
}
template <std::memory_order order, typename ValueT>
void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
MacroAssembler* masm, Operand operand, ValueT value,
X64OperandConverter& i, StubCallMode stub_call_mode,
MachineRepresentation rep, Instruction* instr) {
if (codegen->code_kind() != CodeKind::FOR_TESTING) {
if (instr->HasMemoryAccessMode()) {
RecordTrapInfoIfNeeded(zone, codegen, instr->opcode(), instr,
masm->pc_offset());
}
int size = ElementSizeInBytes(rep);
EmitMemoryProbeForTrapHandlerIfNeeded(masm, i.TempRegister(0), operand,
stub_call_mode, size);
Register value_reg = GetTSANValueRegister<order>(masm, value, i, rep);
EmitTSANStoreOOL(zone, codegen, masm, operand, value_reg, i, stub_call_mode,
size, order);
} else {
int store_instr_offset = EmitStore<order>(masm, operand, value, rep);
if (instr->HasMemoryAccessMode()) {
RecordTrapInfoIfNeeded(zone, codegen, instr->opcode(), instr,
store_instr_offset);
}
}
}
class OutOfLineTSANRelaxedLoad final : public OutOfLineCode {
public:
OutOfLineTSANRelaxedLoad(CodeGenerator* gen, Operand operand,
Register scratch0, StubCallMode stub_mode, int size)
: OutOfLineCode(gen),
operand_(operand),
scratch0_(scratch0),
#if V8_ENABLE_WEBASSEMBLY
stub_mode_(stub_mode),
#endif
size_(size),
zone_(gen->zone()) {
}
void Generate() final {
const SaveFPRegsMode save_fp_mode = frame()->DidAllocateDoubleRegisters()
? SaveFPRegsMode::kSave
: SaveFPRegsMode::kIgnore;
__ leaq(scratch0_, operand_);
#if V8_ENABLE_WEBASSEMBLY
if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
__ CallTSANRelaxedLoadStub(scratch0_, save_fp_mode, size_,
StubCallMode::kCallWasmRuntimeStub);
return;
}
#endif
__ CallTSANRelaxedLoadStub(scratch0_, save_fp_mode, size_,
StubCallMode::kCallBuiltinPointer);
}
private:
Operand const operand_;
Register const scratch0_;
#if V8_ENABLE_WEBASSEMBLY
StubCallMode const stub_mode_;
#endif
int size_;
Zone* zone_;
};
void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
MacroAssembler* masm, Operand operand,
X64OperandConverter& i, StubCallMode mode,
int size) {
if (codegen->code_kind() == CodeKind::FOR_TESTING) return;
Register scratch0 = i.TempRegister(0);
auto tsan_ool = zone->New<OutOfLineTSANRelaxedLoad>(codegen, operand,
scratch0, mode, size);
masm->jmp(tsan_ool->entry());
masm->bind(tsan_ool->exit());
}
#else
template <std::memory_order order, typename ValueT>
void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
MacroAssembler* masm, Operand operand, ValueT value,
X64OperandConverter& i, StubCallMode stub_call_mode,
MachineRepresentation rep, Instruction* instr) { … }
void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
MacroAssembler* masm, Operand operand,
X64OperandConverter& i, StubCallMode mode,
int size) { … }
#endif
}
#define ASSEMBLE_UNOP …
#define ASSEMBLE_BINOP …
#define ASSEMBLE_COMPARE …
#define ASSEMBLE_TEST(asm_instr) …
#define ASSEMBLE_MULT …
#define ASSEMBLE_SHIFT …
#define ASSEMBLE_MOVX …
#define ASSEMBLE_SSE_BINOP …
#define ASSEMBLE_SSE_UNOP …
#define ASSEMBLE_AVX_BINOP …
#define ASSEMBLE_IEEE754_BINOP …
#define ASSEMBLE_IEEE754_UNOP …
#define ASSEMBLE_ATOMIC_BINOP …
#define ASSEMBLE_ATOMIC64_BINOP …
#define ASSEMBLE_SIMD_BINOP(opcode) …
#define ASSEMBLE_SIMD_F16x8_BINOP(instr) …
#define ASSEMBLE_SIMD_F16x8_RELOP(instr) …
#define ASSEMBLE_SIMD256_BINOP(opcode, cpu_feature) …
#define ASSEMBLE_SIMD_INSTR …
#define ASSEMBLE_SIMD_IMM_INSTR …
#define ASSEMBLE_SIMD_PUNPCK_SHUFFLE …
#define ASSEMBLE_SIMD_IMM_SHUFFLE …
#define ASSEMBLE_SIMD_ALL_TRUE …
#define ASSEMBLE_SIMD_SHIFT …
#define ASSEMBLE_SIMD256_SHIFT(opcode, width) …
#define ASSEMBLE_PINSR …
#define ASSEMBLE_SEQ_CST_STORE …
void CodeGenerator::AssembleDeconstructFrame() { … }
void CodeGenerator::AssemblePrepareTailCall() { … }
namespace {
void AdjustStackPointerForTailCall(Instruction* instr,
MacroAssembler* assembler, Linkage* linkage,
OptimizedCompilationInfo* info,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) { … }
void SetupSimdImmediateInRegister(MacroAssembler* assembler, uint32_t* imms,
XMMRegister reg) { … }
void SetupSimd256ImmediateInRegister(MacroAssembler* assembler, uint32_t* imms,
YMMRegister reg, XMMRegister scratch) { … }
}
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
int first_unused_slot_offset) { … }
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_slot_offset) { … }
void CodeGenerator::AssembleCodeStartRegisterCheck() { … }
void CodeGenerator::BailoutIfDeoptimized() { … }
bool ShouldClearOutputRegisterBeforeInstruction(CodeGenerator* g,
Instruction* instr) { … }
void CodeGenerator::AssemblePlaceHolderForLazyDeopt(Instruction* instr) { … }
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) { … }
#undef ASSEMBLE_PINSR
#undef ASSEMBLE_UNOP
#undef ASSEMBLE_BINOP
#undef ASSEMBLE_COMPARE
#undef ASSEMBLE_MULT
#undef ASSEMBLE_SHIFT
#undef ASSEMBLE_MOVX
#undef ASSEMBLE_SSE_BINOP
#undef ASSEMBLE_SSE_UNOP
#undef ASSEMBLE_AVX_BINOP
#undef ASSEMBLE_IEEE754_BINOP
#undef ASSEMBLE_IEEE754_UNOP
#undef ASSEMBLE_ATOMIC_BINOP
#undef ASSEMBLE_ATOMIC64_BINOP
#undef ASSEMBLE_SIMD_INSTR
#undef ASSEMBLE_SIMD_IMM_INSTR
#undef ASSEMBLE_SIMD_PUNPCK_SHUFFLE
#undef ASSEMBLE_SIMD_IMM_SHUFFLE
#undef ASSEMBLE_SIMD_ALL_TRUE
#undef ASSEMBLE_SIMD_SHIFT
#undef ASSEMBLE_SEQ_CST_STORE
namespace {
constexpr Condition FlagsConditionToCondition(FlagsCondition condition) { … }
}
void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) { … }
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) { … }
void CodeGenerator::AssembleArchJumpRegardlessOfAssemblyOrder(
RpoNumber target) { … }
#if V8_ENABLE_WEBASSEMBLY
void CodeGenerator::AssembleArchTrap(Instruction* instr,
FlagsCondition condition) { … }
#endif
void CodeGenerator::AssembleArchBoolean(Instruction* instr,
FlagsCondition condition) { … }
void CodeGenerator::AssembleArchConditionalBoolean(Instruction* instr) { … }
void CodeGenerator::AssembleArchConditionalBranch(Instruction* instr,
BranchInfo* branch) { … }
void CodeGenerator::AssembleArchBinarySearchSwitchRange(
Register input, RpoNumber def_block, std::pair<int32_t, Label*>* begin,
std::pair<int32_t, Label*>* end, std::optional<int32_t>& last_cmp_value) { … }
void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) { … }
void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) { … }
void CodeGenerator::AssembleArchSelect(Instruction* instr,
FlagsCondition condition) { … }
namespace {
static const int kQuadWordSize = …;
}
void CodeGenerator::FinishFrame(Frame* frame) { … }
void CodeGenerator::AssembleConstructFrame() { … }
void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) { … }
void CodeGenerator::FinishCode() { … }
void CodeGenerator::PrepareForDeoptimizationExits(
ZoneDeque<DeoptimizationExit*>* exits) { … }
void CodeGenerator::IncrementStackAccessCounter(
InstructionOperand* source, InstructionOperand* destination) { … }
AllocatedOperand CodeGenerator::Push(InstructionOperand* source) { … }
void CodeGenerator::Pop(InstructionOperand* dest, MachineRepresentation rep) { … }
void CodeGenerator::PopTempStackSlots() { … }
void CodeGenerator::MoveToTempLocation(InstructionOperand* source,
MachineRepresentation rep) { … }
void CodeGenerator::MoveTempLocationTo(InstructionOperand* dest,
MachineRepresentation rep) { … }
void CodeGenerator::SetPendingMove(MoveOperands* move) { … }
namespace {
bool Is32BitOperand(InstructionOperand* operand) { … }
bool Use32BitMove(InstructionOperand* source, InstructionOperand* destination) { … }
}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) { … }
void CodeGenerator::AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) { … }
void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) { … }
#undef __
}