llvm/lld/ELF/Arch/X86_64.cpp

//===- X86_64.cpp ---------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#include "OutputSections.h"
#include "Relocations.h"
#include "Symbols.h"
#include "SyntheticSections.h"
#include "Target.h"
#include "lld/Common/ErrorHandler.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/MathExtras.h"

usingnamespacellvm;
usingnamespacellvm::object;
usingnamespacellvm::support::endian;
usingnamespacellvm::ELF;
usingnamespacelld;
usingnamespacelld::elf;

namespace {
class X86_64 : public TargetInfo {};
} // namespace

// This is vector of NOP instructions of sizes from 1 to 8 bytes.  The
// appropriately sized instructions are used to fill the gaps between sections
// which are executed during fall through.
static const std::vector<std::vector<uint8_t>> nopInstructions =;

X86_64::X86_64(Ctx &ctx) :{}

int X86_64::getTlsGdRelaxSkip(RelType type) const {}

// Opcodes for the different X86_64 jmp instructions.
enum JmpInsnOpcode : uint32_t {};

// Given the first (optional) and second byte of the insn's opcode, this
// returns the corresponding enum value.
static JmpInsnOpcode getJmpInsnType(const uint8_t *first,
                                    const uint8_t *second) {}

// Return the relocation index for input section IS with a specific Offset.
// Returns the maximum size of the vector if no such relocation is found.
static unsigned getRelocationWithOffset(const InputSection &is,
                                        uint64_t offset) {}

// Returns true if R corresponds to a relocation used for a jump instruction.
// TODO: Once special relocations for relaxable jump instructions are available,
// this should be modified to use those relocations.
static bool isRelocationForJmpInsn(Relocation &R) {}

// Return true if Relocation R points to the first instruction in the
// next section.
// TODO: Delete this once psABI reserves a new relocation type for fall thru
// jumps.
static bool isFallThruRelocation(InputSection &is, InputFile *file,
                                 InputSection *nextIS, Relocation &r) {}

// Return the jmp instruction opcode that is the inverse of the given
// opcode.  For example, JE inverted is JNE.
static JmpInsnOpcode invertJmpOpcode(const JmpInsnOpcode opcode) {}

// Deletes direct jump instruction in input sections that jumps to the
// following section as it is not required.  If there are two consecutive jump
// instructions, it checks if they can be flipped and one can be deleted.
// For example:
// .section .text
// a.BB.foo:
//    ...
//    10: jne aa.BB.foo
//    16: jmp bar
// aa.BB.foo:
//    ...
//
// can be converted to:
// a.BB.foo:
//   ...
//   10: je bar  #jne flipped to je and the jmp is deleted.
// aa.BB.foo:
//   ...
bool X86_64::deleteFallThruJmpInsn(InputSection &is, InputFile *file,
                                   InputSection *nextIS) const {}

bool X86_64::relaxOnce(int pass) const {}

RelExpr X86_64::getRelExpr(RelType type, const Symbol &s,
                           const uint8_t *loc) const {}

void X86_64::writeGotPltHeader(uint8_t *buf) const {}

void X86_64::writeGotPlt(uint8_t *buf, const Symbol &s) const {}

void X86_64::writeIgotPlt(uint8_t *buf, const Symbol &s) const {}

void X86_64::writePltHeader(uint8_t *buf) const {}

void X86_64::writePlt(uint8_t *buf, const Symbol &sym,
                      uint64_t pltEntryAddr) const {}

RelType X86_64::getDynRel(RelType type) const {}

static void relaxTlsGdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) {}

static void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel, uint64_t val) {}

// In some conditions, R_X86_64_GOTTPOFF relocation can be optimized to
// R_X86_64_TPOFF32 so that it does not use GOT.
static void relaxTlsIeToLe(uint8_t *loc, const Relocation &, uint64_t val) {}

static void relaxTlsLdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) {}

// A JumpInstrMod at a specific offset indicates that the jump instruction
// opcode at that offset must be modified.  This is specifically used to relax
// jump instructions with basic block sections.  This function looks at the
// JumpMod and effects the change.
void X86_64::applyJumpInstrMod(uint8_t *loc, JumpModType type,
                               unsigned size) const {}

int64_t X86_64::getImplicitAddend(const uint8_t *buf, RelType type) const {}

static void relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val);

void X86_64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const {}

RelExpr X86_64::adjustGotPcExpr(RelType type, int64_t addend,
                                const uint8_t *loc) const {}

// A subset of relaxations can only be applied for no-PIC. This method
// handles such relaxations. Instructions encoding information was taken from:
// "Intel 64 and IA-32 Architectures Software Developer's Manual V2"
// (http://www.intel.com/content/dam/www/public/us/en/documents/manuals/
//    64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf)
static void relaxGotNoPic(uint8_t *loc, uint64_t val, uint8_t op, uint8_t modRm,
                          bool isRex2) {}

static void relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val) {}

// A split-stack prologue starts by checking the amount of stack remaining
// in one of two ways:
// A) Comparing of the stack pointer to a field in the tcb.
// B) Or a load of a stack pointer offset with an lea to r10 or r11.
bool X86_64::adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
                                              uint8_t stOther) const {}

void X86_64::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {}

// If Intel Indirect Branch Tracking is enabled, we have to emit special PLT
// entries containing endbr64 instructions. A PLT entry will be split into two
// parts, one in .plt.sec (writePlt), and the other in .plt (writeIBTPlt).
namespace {
class IntelIBT : public X86_64 {};
} // namespace

void IntelIBT::writeGotPlt(uint8_t *buf, const Symbol &s) const {}

void IntelIBT::writePlt(uint8_t *buf, const Symbol &sym,
                        uint64_t pltEntryAddr) const {}

void IntelIBT::writeIBTPlt(uint8_t *buf, size_t numEntries) const {}

// These nonstandard PLT entries are to migtigate Spectre v2 security
// vulnerability. In order to mitigate Spectre v2, we want to avoid indirect
// branch instructions such as `jmp *GOTPLT(%rip)`. So, in the following PLT
// entries, we use a CALL followed by MOV and RET to do the same thing as an
// indirect jump. That instruction sequence is so-called "retpoline".
//
// We have two types of retpoline PLTs as a size optimization. If `-z now`
// is specified, all dynamic symbols are resolved at load-time. Thus, when
// that option is given, we can omit code for symbol lazy resolution.
namespace {
class Retpoline : public X86_64 {};

class RetpolineZNow : public X86_64 {};
} // namespace

Retpoline::Retpoline(Ctx &ctx) :{}

void Retpoline::writeGotPlt(uint8_t *buf, const Symbol &s) const {}

void Retpoline::writePltHeader(uint8_t *buf) const {}

void Retpoline::writePlt(uint8_t *buf, const Symbol &sym,
                         uint64_t pltEntryAddr) const {}

RetpolineZNow::RetpolineZNow(Ctx &ctx) :{}

void RetpolineZNow::writePltHeader(uint8_t *buf) const {}

void RetpolineZNow::writePlt(uint8_t *buf, const Symbol &sym,
                             uint64_t pltEntryAddr) const {}

TargetInfo *elf::getX86_64TargetInfo(Ctx &ctx) {}