llvm/lld/ELF/Arch/AArch64.cpp

//===- AArch64.cpp --------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#include "InputFiles.h"
#include "OutputSections.h"
#include "Symbols.h"
#include "SyntheticSections.h"
#include "Target.h"
#include "lld/Common/ErrorHandler.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/Support/Endian.h"

usingnamespacellvm;
usingnamespacellvm::support::endian;
usingnamespacellvm::ELF;
usingnamespacelld;
usingnamespacelld::elf;

// Page(Expr) is the page address of the expression Expr, defined
// as (Expr & ~0xFFF). (This applies even if the machine page size
// supported by the platform has a different value.)
uint64_t elf::getAArch64Page(uint64_t expr) {}

// A BTI landing pad is a valid target for an indirect branch when the Branch
// Target Identification has been enabled.  As linker generated branches are
// via x16 the BTI landing pads are defined as: BTI C, BTI J, BTI JC, PACIASP,
// PACIBSP.
bool elf::isAArch64BTILandingPad(Symbol &s, int64_t a) {}

namespace {
class AArch64 : public TargetInfo {};

struct AArch64Relaxer {};
} // namespace

// Return the bits [Start, End] from Val shifted Start bits.
// For instance, getBits(0xF0, 4, 8) returns 0xF.
static uint64_t getBits(uint64_t val, int start, int end) {}

AArch64::AArch64(Ctx &ctx) :{}

RelExpr AArch64::getRelExpr(RelType type, const Symbol &s,
                            const uint8_t *loc) const {}

RelExpr AArch64::adjustTlsExpr(RelType type, RelExpr expr) const {}

bool AArch64::usesOnlyLowPageBits(RelType type) const {}

RelType AArch64::getDynRel(RelType type) const {}

int64_t AArch64::getImplicitAddend(const uint8_t *buf, RelType type) const {}

void AArch64::writeGotPlt(uint8_t *buf, const Symbol &) const {}

void AArch64::writeIgotPlt(uint8_t *buf, const Symbol &s) const {}

void AArch64::writePltHeader(uint8_t *buf) const {}

void AArch64::writePlt(uint8_t *buf, const Symbol &sym,
                       uint64_t pltEntryAddr) const {}

bool AArch64::needsThunk(RelExpr expr, RelType type, const InputFile *file,
                         uint64_t branchAddr, const Symbol &s,
                         int64_t a) const {}

uint32_t AArch64::getThunkSectionSpacing() const {}

bool AArch64::inBranchRange(RelType type, uint64_t src, uint64_t dst) const {}

static void write32AArch64Addr(uint8_t *l, uint64_t imm) {}

static void writeMaskedBits32le(uint8_t *p, int32_t v, uint32_t mask) {}

// Update the immediate field in a AARCH64 ldr, str, and add instruction.
static void write32Imm12(uint8_t *l, uint64_t imm) {}

// Update the immediate field in an AArch64 movk, movn or movz instruction
// for a signed relocation, and update the opcode of a movn or movz instruction
// to match the sign of the operand.
static void writeSMovWImm(uint8_t *loc, uint32_t imm) {}

void AArch64::relocate(uint8_t *loc, const Relocation &rel,
                       uint64_t val) const {}

void AArch64::relaxTlsGdToLe(uint8_t *loc, const Relocation &rel,
                             uint64_t val) const {}

void AArch64::relaxTlsGdToIe(uint8_t *loc, const Relocation &rel,
                             uint64_t val) const {}

void AArch64::relaxTlsIeToLe(uint8_t *loc, const Relocation &rel,
                             uint64_t val) const {}

AArch64Relaxer::AArch64Relaxer(ArrayRef<Relocation> relocs) {}

bool AArch64Relaxer::tryRelaxAdrpAdd(const Relocation &adrpRel,
                                     const Relocation &addRel, uint64_t secAddr,
                                     uint8_t *buf) const {}

bool AArch64Relaxer::tryRelaxAdrpLdr(const Relocation &adrpRel,
                                     const Relocation &ldrRel, uint64_t secAddr,
                                     uint8_t *buf) const {}

// Tagged symbols have upper address bits that are added by the dynamic loader,
// and thus need the full 64-bit GOT entry. Do not relax such symbols.
static bool needsGotForMemtag(const Relocation &rel) {}

void AArch64::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const {}

// AArch64 may use security features in variant PLT sequences. These are:
// Pointer Authentication (PAC), introduced in armv8.3-a and Branch Target
// Indicator (BTI) introduced in armv8.5-a. The additional instructions used
// in the variant Plt sequences are encoded in the Hint space so they can be
// deployed on older architectures, which treat the instructions as a nop.
// PAC and BTI can be combined leading to the following combinations:
// writePltHeader
// writePltHeaderBti (no PAC Header needed)
// writePlt
// writePltBti (BTI only)
// writePltPac (PAC only)
// writePltBtiPac (BTI and PAC)
//
// When PAC is enabled the dynamic loader encrypts the address that it places
// in the .got.plt using the pacia1716 instruction which encrypts the value in
// x17 using the modifier in x16. The static linker places autia1716 before the
// indirect branch to x17 to authenticate the address in x17 with the modifier
// in x16. This makes it more difficult for an attacker to modify the value in
// the .got.plt.
//
// When BTI is enabled all indirect branches must land on a bti instruction.
// The static linker must place a bti instruction at the start of any PLT entry
// that may be the target of an indirect branch. As the PLT entries call the
// lazy resolver indirectly this must have a bti instruction at start. In
// general a bti instruction is not needed for a PLT entry as indirect calls
// are resolved to the function address and not the PLT entry for the function.
// There are a small number of cases where the PLT address can escape, such as
// taking the address of a function or ifunc via a non got-generating
// relocation, and a shared library refers to that symbol.
//
// We use the bti c variant of the instruction which permits indirect branches
// (br) via x16/x17 and indirect function calls (blr) via any register. The ABI
// guarantees that all indirect branches from code requiring BTI protection
// will go via x16/x17

namespace {
class AArch64BtiPac final : public AArch64 {};
} // namespace

AArch64BtiPac::AArch64BtiPac(Ctx &ctx) :{}

void AArch64BtiPac::writePltHeader(uint8_t *buf) const {}

void AArch64BtiPac::writePlt(uint8_t *buf, const Symbol &sym,
                             uint64_t pltEntryAddr) const {}

template <class ELFT>
static void
addTaggedSymbolReferences(InputSectionBase &sec,
                          DenseMap<Symbol *, unsigned> &referenceCount) {}

// A tagged symbol must be denoted as being tagged by all references and the
// chosen definition. For simplicity, here, it must also be denoted as tagged
// for all definitions. Otherwise:
//
//  1. A tagged definition can be used by an untagged declaration, in which case
//     the untagged access may be PC-relative, causing a tag mismatch at
//     runtime.
//  2. An untagged definition can be used by a tagged declaration, where the
//     compiler has taken advantage of the increased alignment of the tagged
//     declaration, but the alignment at runtime is wrong, causing a fault.
//
// Ideally, this isn't a problem, as any TU that imports or exports tagged
// symbols should also be built with tagging. But, to handle these cases, we
// demote the symbol to be untagged.
void lld::elf::createTaggedSymbols(const SmallVector<ELFFileBase *, 0> &files) {}

TargetInfo *elf::getAArch64TargetInfo(Ctx &ctx) {}