llvm/lld/ELF/SyntheticSections.cpp

//===- SyntheticSections.cpp ----------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains linker-synthesized sections. Currently,
// synthetic sections are created either output sections or input sections,
// but we are rewriting code so that all synthetic sections are created as
// input sections.
//
//===----------------------------------------------------------------------===//

#include "SyntheticSections.h"
#include "Config.h"
#include "DWARF.h"
#include "EhFrame.h"
#include "InputFiles.h"
#include "LinkerScript.h"
#include "OutputSections.h"
#include "SymbolTable.h"
#include "Symbols.h"
#include "Target.h"
#include "Thunks.h"
#include "Writer.h"
#include "lld/Common/CommonLinkerContext.h"
#include "lld/Common/DWARF.h"
#include "lld/Common/Strings.h"
#include "lld/Common/Version.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/SetOperations.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/BinaryFormat/ELF.h"
#include "llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h"
#include "llvm/DebugInfo/DWARF/DWARFDebugPubTable.h"
#include "llvm/Support/DJB.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/LEB128.h"
#include "llvm/Support/Parallel.h"
#include "llvm/Support/TimeProfiler.h"
#include <cinttypes>
#include <cstdlib>

usingnamespacellvm;
usingnamespacellvm::dwarf;
usingnamespacellvm::ELF;
usingnamespacellvm::object;
usingnamespacellvm::support;
usingnamespacelld;
usingnamespacelld::elf;

read32le;
write32le;
write64le;

constexpr size_t MergeNoTailSection::numShards;

static uint64_t readUint(Ctx &ctx, uint8_t *buf) {}

static void writeUint(uint8_t *buf, uint64_t val) {}

// Returns an LLD version string.
static ArrayRef<uint8_t> getVersion() {}

// Creates a .comment section containing LLD version info.
// With this feature, you can identify LLD-generated binaries easily
// by "readelf --string-dump .comment <file>".
// The returned object is a mergeable string section.
MergeInputSection *elf::createCommentSection() {}

// .MIPS.abiflags section.
template <class ELFT>
MipsAbiFlagsSection<ELFT>::MipsAbiFlagsSection(Ctx &ctx,
                                               Elf_Mips_ABIFlags flags)
    :{}

template <class ELFT> void MipsAbiFlagsSection<ELFT>::writeTo(uint8_t *buf) {}

template <class ELFT>
std::unique_ptr<MipsAbiFlagsSection<ELFT>>
MipsAbiFlagsSection<ELFT>::create(Ctx &ctx) {}

// .MIPS.options section.
template <class ELFT>
MipsOptionsSection<ELFT>::MipsOptionsSection(Ctx &ctx, Elf_Mips_RegInfo reginfo)
    :{}

template <class ELFT> void MipsOptionsSection<ELFT>::writeTo(uint8_t *buf) {}

template <class ELFT>
std::unique_ptr<MipsOptionsSection<ELFT>>
MipsOptionsSection<ELFT>::create(Ctx &ctx) {}

// MIPS .reginfo section.
template <class ELFT>
MipsReginfoSection<ELFT>::MipsReginfoSection(Ctx &ctx, Elf_Mips_RegInfo reginfo)
    :{}

template <class ELFT> void MipsReginfoSection<ELFT>::writeTo(uint8_t *buf) {}

template <class ELFT>
std::unique_ptr<MipsReginfoSection<ELFT>>
MipsReginfoSection<ELFT>::create(Ctx &ctx) {}

InputSection *elf::createInterpSection(Ctx &) {}

Defined *elf::addSyntheticLocal(StringRef name, uint8_t type, uint64_t value,
                                uint64_t size, InputSectionBase &section) {}

static size_t getHashSize() {}

// This class represents a linker-synthesized .note.gnu.property section.
//
// In x86 and AArch64, object files may contain feature flags indicating the
// features that they have used. The flags are stored in a .note.gnu.property
// section.
//
// lld reads the sections from input files and merges them by computing AND of
// the flags. The result is written as a new .note.gnu.property section.
//
// If the flag is zero (which indicates that the intersection of the feature
// sets is empty, or some input files didn't have .note.gnu.property sections),
// we don't create this section.
GnuPropertySection::GnuPropertySection(Ctx &ctx)
    :{}

void GnuPropertySection::writeTo(uint8_t *buf) {}

size_t GnuPropertySection::getSize() const {}

BuildIdSection::BuildIdSection(Ctx &ctx)
    :{}

void BuildIdSection::writeTo(uint8_t *buf) {}

void BuildIdSection::writeBuildId(ArrayRef<uint8_t> buf) {}

BssSection::BssSection(Ctx &ctx, StringRef name, uint64_t size,
                       uint32_t alignment)
    :{}

EhFrameSection::EhFrameSection(Ctx &ctx)
    :{}

// Search for an existing CIE record or create a new one.
// CIE records from input object files are uniquified by their contents
// and where their relocations point to.
template <class ELFT, class RelTy>
CieRecord *EhFrameSection::addCie(EhSectionPiece &cie, ArrayRef<RelTy> rels) {}

// There is one FDE per function. Returns a non-null pointer to the function
// symbol if the given FDE points to a live function.
template <class ELFT, class RelTy>
Defined *EhFrameSection::isFdeLive(EhSectionPiece &fde, ArrayRef<RelTy> rels) {}

// .eh_frame is a sequence of CIE or FDE records. In general, there
// is one CIE record per input object file which is followed by
// a list of FDEs. This function searches an existing CIE or create a new
// one and associates FDEs to the CIE.
template <class ELFT, class RelTy>
void EhFrameSection::addRecords(EhInputSection *sec, ArrayRef<RelTy> rels) {}

template <class ELFT>
void EhFrameSection::addSectionAux(EhInputSection *sec) {}

// Used by ICF<ELFT>::handleLSDA(). This function is very similar to
// EhFrameSection::addRecords().
template <class ELFT, class RelTy>
void EhFrameSection::iterateFDEWithLSDAAux(
    EhInputSection &sec, ArrayRef<RelTy> rels, DenseSet<size_t> &ciesWithLSDA,
    llvm::function_ref<void(InputSection &)> fn) {}

template <class ELFT>
void EhFrameSection::iterateFDEWithLSDA(
    llvm::function_ref<void(InputSection &)> fn) {}

static void writeCieFde(uint8_t *buf, ArrayRef<uint8_t> d) {}

void EhFrameSection::finalizeContents() {}

// Returns data for .eh_frame_hdr. .eh_frame_hdr is a binary search table
// to get an FDE from an address to which FDE is applied. This function
// returns a list of such pairs.
SmallVector<EhFrameSection::FdeData, 0> EhFrameSection::getFdeData() const {}

static uint64_t readFdeAddr(Ctx &ctx, uint8_t *buf, int size) {}

// Returns the VA to which a given FDE (on a mmap'ed buffer) is applied to.
// We need it to create .eh_frame_hdr section.
uint64_t EhFrameSection::getFdePc(uint8_t *buf, size_t fdeOff,
                                  uint8_t enc) const {}

void EhFrameSection::writeTo(uint8_t *buf) {}

GotSection::GotSection(Ctx &ctx)
    :{}

void GotSection::addConstant(const Relocation &r) {}
void GotSection::addEntry(const Symbol &sym) {}

bool GotSection::addTlsDescEntry(const Symbol &sym) {}

bool GotSection::addDynTlsEntry(const Symbol &sym) {}

// Reserves TLS entries for a TLS module ID and a TLS block offset.
// In total it takes two GOT slots.
bool GotSection::addTlsIndex() {}

uint32_t GotSection::getTlsDescOffset(const Symbol &sym) const {}

uint64_t GotSection::getTlsDescAddr(const Symbol &sym) const {}

uint64_t GotSection::getGlobalDynAddr(const Symbol &b) const {}

uint64_t GotSection::getGlobalDynOffset(const Symbol &b) const {}

void GotSection::finalizeContents() {}

bool GotSection::isNeeded() const {}

void GotSection::writeTo(uint8_t *buf) {}

static uint64_t getMipsPageAddr(uint64_t addr) {}

static uint64_t getMipsPageCount(uint64_t size) {}

MipsGotSection::MipsGotSection(Ctx &ctx)
    :{}

void MipsGotSection::addEntry(InputFile &file, Symbol &sym, int64_t addend,
                              RelExpr expr) {}

void MipsGotSection::addDynTlsEntry(InputFile &file, Symbol &sym) {}

void MipsGotSection::addTlsIndex(InputFile &file) {}

size_t MipsGotSection::FileGot::getEntriesNum() const {}

size_t MipsGotSection::FileGot::getPageEntriesNum() const {}

size_t MipsGotSection::FileGot::getIndexedEntriesNum() const {}

MipsGotSection::FileGot &MipsGotSection::getGot(InputFile &f) {}

uint64_t MipsGotSection::getPageEntryOffset(const InputFile *f,
                                            const Symbol &sym,
                                            int64_t addend) const {}

uint64_t MipsGotSection::getSymEntryOffset(const InputFile *f, const Symbol &s,
                                           int64_t addend) const {}

uint64_t MipsGotSection::getTlsIndexOffset(const InputFile *f) const {}

uint64_t MipsGotSection::getGlobalDynOffset(const InputFile *f,
                                            const Symbol &s) const {}

const Symbol *MipsGotSection::getFirstGlobalEntry() const {}

unsigned MipsGotSection::getLocalEntriesNum() const {}

bool MipsGotSection::tryMergeGots(FileGot &dst, FileGot &src, bool isPrimary) {}

void MipsGotSection::finalizeContents() {}

bool MipsGotSection::updateAllocSize(Ctx &ctx) {}

void MipsGotSection::build() {}

bool MipsGotSection::isNeeded() const {}

uint64_t MipsGotSection::getGp(const InputFile *f) const {}

void MipsGotSection::writeTo(uint8_t *buf) {}

// On PowerPC the .plt section is used to hold the table of function addresses
// instead of the .got.plt, and the type is SHT_NOBITS similar to a .bss
// section. I don't know why we have a BSS style type for the section but it is
// consistent across both 64-bit PowerPC ABIs as well as the 32-bit PowerPC ABI.
GotPltSection::GotPltSection(Ctx &ctx)
    :{}

void GotPltSection::addEntry(Symbol &sym) {}

size_t GotPltSection::getSize() const {}

void GotPltSection::writeTo(uint8_t *buf) {}

bool GotPltSection::isNeeded() const {}

static StringRef getIgotPltName() {}

// On PowerPC64 the GotPltSection type is SHT_NOBITS so we have to follow suit
// with the IgotPltSection.
IgotPltSection::IgotPltSection(Ctx &ctx)
    :{}

void IgotPltSection::addEntry(Symbol &sym) {}

size_t IgotPltSection::getSize() const {}

void IgotPltSection::writeTo(uint8_t *buf) {}

StringTableSection::StringTableSection(Ctx &ctx, StringRef name, bool dynamic)
    :{}

// Adds a string to the string table. If `hashIt` is true we hash and check for
// duplicates. It is optional because the name of global symbols are already
// uniqued and hashing them again has a big cost for a small value: uniquing
// them with some other string that happens to be the same.
unsigned StringTableSection::addString(StringRef s, bool hashIt) {}

void StringTableSection::writeTo(uint8_t *buf) {}

// Returns the number of entries in .gnu.version_d: the number of
// non-VER_NDX_LOCAL-non-VER_NDX_GLOBAL definitions, plus 1.
// Note that we don't support vd_cnt > 1 yet.
static unsigned getVerDefNum() {}

template <class ELFT>
DynamicSection<ELFT>::DynamicSection(Ctx &ctx)
    :{}

// The output section .rela.dyn may include these synthetic sections:
//
// - part.relaDyn
// - ctx.in.relaPlt: this is included if a linker script places .rela.plt inside
//   .rela.dyn
//
// DT_RELASZ is the total size of the included sections.
static uint64_t addRelaSz(const RelocationBaseSection &relaDyn) {}

// A Linker script may assign the RELA relocation sections to the same
// output section. When this occurs we cannot just use the OutputSection
// Size. Moreover the [DT_JMPREL, DT_JMPREL + DT_PLTRELSZ) is permitted to
// overlap with the [DT_RELA, DT_RELA + DT_RELASZ).
static uint64_t addPltRelSz() {}

// Add remaining entries to complete .dynamic contents.
template <class ELFT>
std::vector<std::pair<int32_t, uint64_t>>
DynamicSection<ELFT>::computeContents() {}

template <class ELFT> void DynamicSection<ELFT>::finalizeContents() {}

template <class ELFT> void DynamicSection<ELFT>::writeTo(uint8_t *buf) {}

uint64_t DynamicReloc::getOffset() const {}

int64_t DynamicReloc::computeAddend() const {}

uint32_t DynamicReloc::getSymIndex(SymbolTableBaseSection *symTab) const {}

RelocationBaseSection::RelocationBaseSection(Ctx &ctx, StringRef name,
                                             uint32_t type, int32_t dynamicTag,
                                             int32_t sizeDynamicTag,
                                             bool combreloc,
                                             unsigned concurrency)
    :{}

void RelocationBaseSection::addSymbolReloc(
    RelType dynType, InputSectionBase &isec, uint64_t offsetInSec, Symbol &sym,
    int64_t addend, std::optional<RelType> addendRelType) {}

void RelocationBaseSection::addAddendOnlyRelocIfNonPreemptible(
    RelType dynType, InputSectionBase &isec, uint64_t offsetInSec, Symbol &sym,
    RelType addendRelType) {}

void RelocationBaseSection::mergeRels() {}

void RelocationBaseSection::partitionRels() {}

void RelocationBaseSection::finalizeContents() {}

void DynamicReloc::computeRaw(SymbolTableBaseSection *symt) {}

void RelocationBaseSection::computeRels() {}

template <class ELFT>
RelocationSection<ELFT>::RelocationSection(Ctx &ctx, StringRef name,
                                           bool combreloc, unsigned concurrency)
    :{}

template <class ELFT> void RelocationSection<ELFT>::writeTo(uint8_t *buf) {}

RelrBaseSection::RelrBaseSection(Ctx &ctx, unsigned concurrency,
                                 bool isAArch64Auth)
    :{}

void RelrBaseSection::mergeRels() {}

template <class ELFT>
AndroidPackedRelocationSection<ELFT>::AndroidPackedRelocationSection(
    Ctx &ctx, StringRef name, unsigned concurrency)
    :{}

template <class ELFT>
bool AndroidPackedRelocationSection<ELFT>::updateAllocSize(Ctx &ctx) {}

template <class ELFT>
RelrSection<ELFT>::RelrSection(Ctx &ctx, unsigned concurrency,
                               bool isAArch64Auth)
    :{}

template <class ELFT> bool RelrSection<ELFT>::updateAllocSize(Ctx &ctx) {}

SymbolTableBaseSection::SymbolTableBaseSection(Ctx &ctx,
                                               StringTableSection &strTabSec)
    :{}

// Orders symbols according to their positions in the GOT,
// in compliance with MIPS ABI rules.
// See "Global Offset Table" in Chapter 5 in the following document
// for detailed description:
// ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
static bool sortMipsSymbols(const SymbolTableEntry &l,
                            const SymbolTableEntry &r) {}

void SymbolTableBaseSection::finalizeContents() {}

// The ELF spec requires that all local symbols precede global symbols, so we
// sort symbol entries in this function. (For .dynsym, we don't do that because
// symbols for dynamic linking are inherently all globals.)
//
// Aside from above, we put local symbols in groups starting with the STT_FILE
// symbol. That is convenient for purpose of identifying where are local symbols
// coming from.
void SymbolTableBaseSection::sortSymTabSymbols() {}

void SymbolTableBaseSection::addSymbol(Symbol *b) {}

size_t SymbolTableBaseSection::getSymbolIndex(const Symbol &sym) {}

template <class ELFT>
SymbolTableSection<ELFT>::SymbolTableSection(Ctx &ctx,
                                             StringTableSection &strTabSec)
    :{}

static BssSection *getCommonSec(Symbol *sym) {}

static uint32_t getSymSectionIndex(Symbol *sym) {}

// Write the internal symbol table contents to the output symbol table.
template <class ELFT> void SymbolTableSection<ELFT>::writeTo(uint8_t *buf) {}

SymtabShndxSection::SymtabShndxSection(Ctx &ctx)
    :{}

void SymtabShndxSection::writeTo(uint8_t *buf) {}

bool SymtabShndxSection::isNeeded() const {}

void SymtabShndxSection::finalizeContents() {}

size_t SymtabShndxSection::getSize() const {}

// .hash and .gnu.hash sections contain on-disk hash tables that map
// symbol names to their dynamic symbol table indices. Their purpose
// is to help the dynamic linker resolve symbols quickly. If ELF files
// don't have them, the dynamic linker has to do linear search on all
// dynamic symbols, which makes programs slower. Therefore, a .hash
// section is added to a DSO by default.
//
// The Unix semantics of resolving dynamic symbols is somewhat expensive.
// Each ELF file has a list of DSOs that the ELF file depends on and a
// list of dynamic symbols that need to be resolved from any of the
// DSOs. That means resolving all dynamic symbols takes O(m)*O(n)
// where m is the number of DSOs and n is the number of dynamic
// symbols. For modern large programs, both m and n are large.  So
// making each step faster by using hash tables substantially
// improves time to load programs.
//
// (Note that this is not the only way to design the shared library.
// For instance, the Windows DLL takes a different approach. On
// Windows, each dynamic symbol has a name of DLL from which the symbol
// has to be resolved. That makes the cost of symbol resolution O(n).
// This disables some hacky techniques you can use on Unix such as
// LD_PRELOAD, but this is arguably better semantics than the Unix ones.)
//
// Due to historical reasons, we have two different hash tables, .hash
// and .gnu.hash. They are for the same purpose, and .gnu.hash is a new
// and better version of .hash. .hash is just an on-disk hash table, but
// .gnu.hash has a bloom filter in addition to a hash table to skip
// DSOs very quickly. If you are sure that your dynamic linker knows
// about .gnu.hash, you want to specify --hash-style=gnu. Otherwise, a
// safe bet is to specify --hash-style=both for backward compatibility.
GnuHashTableSection::GnuHashTableSection(Ctx &ctx)
    :{}

void GnuHashTableSection::finalizeContents() {}

void GnuHashTableSection::writeTo(uint8_t *buf) {}

// Add symbols to this symbol hash table. Note that this function
// destructively sort a given vector -- which is needed because
// GNU-style hash table places some sorting requirements.
void GnuHashTableSection::addSymbols(SmallVectorImpl<SymbolTableEntry> &v) {}

HashTableSection::HashTableSection(Ctx &ctx)
    :{}

void HashTableSection::finalizeContents() {}

void HashTableSection::writeTo(uint8_t *buf) {}

PltSection::PltSection(Ctx &ctx)
    :{}

void PltSection::writeTo(uint8_t *buf) {}

void PltSection::addEntry(Symbol &sym) {}

size_t PltSection::getSize() const {}

bool PltSection::isNeeded() const {}

// Used by ARM to add mapping symbols in the PLT section, which aid
// disassembly.
void PltSection::addSymbols() {}

IpltSection::IpltSection(Ctx &ctx)
    :{}

void IpltSection::writeTo(uint8_t *buf) {}

size_t IpltSection::getSize() const {}

void IpltSection::addEntry(Symbol &sym) {}

// ARM uses mapping symbols to aid disassembly.
void IpltSection::addSymbols() {}

PPC32GlinkSection::PPC32GlinkSection(Ctx &ctx) :{}

void PPC32GlinkSection::writeTo(uint8_t *buf) {}

size_t PPC32GlinkSection::getSize() const {}

// This is an x86-only extra PLT section and used only when a security
// enhancement feature called CET is enabled. In this comment, I'll explain what
// the feature is and why we have two PLT sections if CET is enabled.
//
// So, what does CET do? CET introduces a new restriction to indirect jump
// instructions. CET works this way. Assume that CET is enabled. Then, if you
// execute an indirect jump instruction, the processor verifies that a special
// "landing pad" instruction (which is actually a repurposed NOP instruction and
// now called "endbr32" or "endbr64") is at the jump target. If the jump target
// does not start with that instruction, the processor raises an exception
// instead of continuing executing code.
//
// If CET is enabled, the compiler emits endbr to all locations where indirect
// jumps may jump to.
//
// This mechanism makes it extremely hard to transfer the control to a middle of
// a function that is not supporsed to be a indirect jump target, preventing
// certain types of attacks such as ROP or JOP.
//
// Note that the processors in the market as of 2019 don't actually support the
// feature. Only the spec is available at the moment.
//
// Now, I'll explain why we have this extra PLT section for CET.
//
// Since you can indirectly jump to a PLT entry, we have to make PLT entries
// start with endbr. The problem is there's no extra space for endbr (which is 4
// bytes long), as the PLT entry is only 16 bytes long and all bytes are already
// used.
//
// In order to deal with the issue, we split a PLT entry into two PLT entries.
// Remember that each PLT entry contains code to jump to an address read from
// .got.plt AND code to resolve a dynamic symbol lazily. With the 2-PLT scheme,
// the former code is written to .plt.sec, and the latter code is written to
// .plt.
//
// Lazy symbol resolution in the 2-PLT scheme works in the usual way, except
// that the regular .plt is now called .plt.sec and .plt is repurposed to
// contain only code for lazy symbol resolution.
//
// In other words, this is how the 2-PLT scheme works. Application code is
// supposed to jump to .plt.sec to call an external function. Each .plt.sec
// entry contains code to read an address from a corresponding .got.plt entry
// and jump to that address. Addresses in .got.plt initially point to .plt, so
// when an application calls an external function for the first time, the
// control is transferred to a function that resolves a symbol name from
// external shared object files. That function then rewrites a .got.plt entry
// with a resolved address, so that the subsequent function calls directly jump
// to a desired location from .plt.sec.
//
// There is an open question as to whether the 2-PLT scheme was desirable or
// not. We could have simply extended the PLT entry size to 32-bytes to
// accommodate endbr, and that scheme would have been much simpler than the
// 2-PLT scheme. One reason to split PLT was, by doing that, we could keep hot
// code (.plt.sec) from cold code (.plt). But as far as I know no one proved
// that the optimization actually makes a difference.
//
// That said, the 2-PLT scheme is a part of the ABI, debuggers and other tools
// depend on it, so we implement the ABI.
IBTPltSection::IBTPltSection(Ctx &ctx)
    :{}

void IBTPltSection::writeTo(uint8_t *buf) {}

size_t IBTPltSection::getSize() const {}

bool IBTPltSection::isNeeded() const {}

RelroPaddingSection::RelroPaddingSection(Ctx &ctx)
    :{}

// The string hash function for .gdb_index.
static uint32_t computeGdbHash(StringRef s) {}

// 4-byte alignment ensures that values in the hash lookup table and the name
// table are aligned.
DebugNamesBaseSection::DebugNamesBaseSection(Ctx &ctx)
    :{}

// Get the size of the .debug_names section header in bytes for DWARF32:
static uint32_t getDebugNamesHeaderSize(uint32_t augmentationStringSize) {}

static Expected<DebugNamesBaseSection::IndexEntry *>
readEntry(uint64_t &offset, const DWARFDebugNames::NameIndex &ni,
          uint64_t entriesBase, DWARFDataExtractor &namesExtractor,
          const LLDDWARFSection &namesSec) {}

void DebugNamesBaseSection::parseDebugNames(
    InputChunk &inputChunk, OutputChunk &chunk,
    DWARFDataExtractor &namesExtractor, DataExtractor &strExtractor,
    function_ref<SmallVector<uint32_t, 0>(
        uint32_t numCus, const DWARFDebugNames::Header &,
        const DWARFDebugNames::DWARFDebugNamesOffsets &)>
        readOffsets) {}

// Compute the form for output DW_IDX_compile_unit attributes, similar to
// DIEInteger::BestForm. The input form (often DW_FORM_data1) may not hold all
// the merged CU indices.
std::pair<uint8_t, dwarf::Form> static getMergedCuCountForm(
    uint32_t compUnitCount) {}

void DebugNamesBaseSection::computeHdrAndAbbrevTable(
    MutableArrayRef<InputChunk> inputChunks) {}

void DebugNamesBaseSection::Abbrev::Profile(FoldingSetNodeID &id) const {}

std::pair<uint32_t, uint32_t> DebugNamesBaseSection::computeEntryPool(
    MutableArrayRef<InputChunk> inputChunks) {}

void DebugNamesBaseSection::init(
    function_ref<void(InputFile *, InputChunk &, OutputChunk &)> parseFile) {}

template <class ELFT>
DebugNamesSection<ELFT>::DebugNamesSection(Ctx &ctx)
    :{}

template <class ELFT>
template <class RelTy>
void DebugNamesSection<ELFT>::getNameRelocs(
    const InputFile &file, DenseMap<uint32_t, uint32_t> &relocs,
    Relocs<RelTy> rels) {}

template <class ELFT> void DebugNamesSection<ELFT>::finalizeContents() {}

template <class ELFT> void DebugNamesSection<ELFT>::writeTo(uint8_t *buf) {}

GdbIndexSection::GdbIndexSection(Ctx &ctx)
    :{}

// Returns the desired size of an on-disk hash table for a .gdb_index section.
// There's a tradeoff between size and collision rate. We aim 75% utilization.
size_t GdbIndexSection::computeSymtabSize() const {}

static SmallVector<GdbIndexSection::CuEntry, 0>
readCuList(DWARFContext &dwarf) {}

static SmallVector<GdbIndexSection::AddressEntry, 0>
readAddressAreas(DWARFContext &dwarf, InputSection *sec) {}

template <class ELFT>
static SmallVector<GdbIndexSection::NameAttrEntry, 0>
readPubNamesAndTypes(const LLDDwarfObj<ELFT> &obj,
                     const SmallVectorImpl<GdbIndexSection::CuEntry> &cus) {}

// Create a list of symbols from a given list of symbol names and types
// by uniquifying them by name.
static std::pair<SmallVector<GdbIndexSection::GdbSymbol, 0>, size_t>
createSymbols(
    ArrayRef<SmallVector<GdbIndexSection::NameAttrEntry, 0>> nameAttrs,
    const SmallVector<GdbIndexSection::GdbChunk, 0> &chunks) {}

// Returns a newly-created .gdb_index section.
template <class ELFT>
std::unique_ptr<GdbIndexSection> GdbIndexSection::create(Ctx &ctx) {}

void GdbIndexSection::writeTo(uint8_t *buf) {}

bool GdbIndexSection::isNeeded() const {}

EhFrameHeader::EhFrameHeader(Ctx &ctx)
    :{}

void EhFrameHeader::writeTo(uint8_t *buf) {}

// .eh_frame_hdr contains a binary search table of pointers to FDEs.
// Each entry of the search table consists of two values,
// the starting PC from where FDEs covers, and the FDE's address.
// It is sorted by PC.
void EhFrameHeader::write() {}

size_t EhFrameHeader::getSize() const {}

bool EhFrameHeader::isNeeded() const {}

VersionDefinitionSection::VersionDefinitionSection(Ctx &ctx)
    :{}

StringRef VersionDefinitionSection::getFileDefName() {}

void VersionDefinitionSection::finalizeContents() {}

void VersionDefinitionSection::writeOne(uint8_t *buf, uint32_t index,
                                        StringRef name, size_t nameOff) {}

void VersionDefinitionSection::writeTo(uint8_t *buf) {}

size_t VersionDefinitionSection::getSize() const {}

// .gnu.version is a table where each entry is 2 byte long.
VersionTableSection::VersionTableSection(Ctx &ctx)
    :{}

void VersionTableSection::finalizeContents() {}

size_t VersionTableSection::getSize() const {}

void VersionTableSection::writeTo(uint8_t *buf) {}

bool VersionTableSection::isNeeded() const {}

void elf::addVerneed(Symbol *ss) {}

template <class ELFT>
VersionNeedSection<ELFT>::VersionNeedSection(Ctx &ctx)
    :{}

template <class ELFT> void VersionNeedSection<ELFT>::finalizeContents() {}

template <class ELFT> void VersionNeedSection<ELFT>::writeTo(uint8_t *buf) {}

template <class ELFT> size_t VersionNeedSection<ELFT>::getSize() const {}

template <class ELFT> bool VersionNeedSection<ELFT>::isNeeded() const {}

void MergeSyntheticSection::addSection(MergeInputSection *ms) {}

MergeTailSection::MergeTailSection(Ctx &ctx, StringRef name, uint32_t type,
                                   uint64_t flags, uint32_t alignment)
    :{}

size_t MergeTailSection::getSize() const {}

void MergeTailSection::writeTo(uint8_t *buf) {}

void MergeTailSection::finalizeContents() {}

void MergeNoTailSection::writeTo(uint8_t *buf) {}

// This function is very hot (i.e. it can take several seconds to finish)
// because sometimes the number of inputs is in an order of magnitude of
// millions. So, we use multi-threading.
//
// For any strings S and T, we know S is not mergeable with T if S's hash
// value is different from T's. If that's the case, we can safely put S and
// T into different string builders without worrying about merge misses.
// We do it in parallel.
void MergeNoTailSection::finalizeContents() {}

template <class ELFT> void elf::splitSections(Ctx &ctx) {}

void elf::combineEhSections(Ctx &ctx) {}

MipsRldMapSection::MipsRldMapSection(Ctx &ctx)
    :{}

ARMExidxSyntheticSection::ARMExidxSyntheticSection(Ctx &ctx)
    :{}

static InputSection *findExidxSection(InputSection *isec) {}

static bool isValidExidxSectionDep(InputSection *isec) {}

bool ARMExidxSyntheticSection::addSection(InputSection *isec) {}

// References to .ARM.Extab Sections have bit 31 clear and are not the
// special EXIDX_CANTUNWIND bit-pattern.
static bool isExtabRef(uint32_t unwind) {}

// Return true if the .ARM.exidx section Cur can be merged into the .ARM.exidx
// section Prev, where Cur follows Prev in the table. This can be done if the
// unwinding instructions in Cur are identical to Prev. Linker generated
// EXIDX_CANTUNWIND entries are represented by nullptr as they do not have an
// InputSection.
static bool isDuplicateArmExidxSec(InputSection *prev, InputSection *cur) {}

// The .ARM.exidx table must be sorted in ascending order of the address of the
// functions the table describes. std::optionally duplicate adjacent table
// entries can be removed. At the end of the function the executableSections
// must be sorted in ascending order of address, Sentinel is set to the
// InputSection with the highest address and any InputSections that have
// mergeable .ARM.exidx table entries are removed from it.
void ARMExidxSyntheticSection::finalizeContents() {}

InputSection *ARMExidxSyntheticSection::getLinkOrderDep() const {}

// To write the .ARM.exidx table from the ExecutableSections we have three cases
// 1.) The InputSection has a .ARM.exidx InputSection in its dependent sections.
//     We write the .ARM.exidx section contents and apply its relocations.
// 2.) The InputSection does not have a dependent .ARM.exidx InputSection. We
//     must write the contents of an EXIDX_CANTUNWIND directly. We use the
//     start of the InputSection as the purpose of the linker generated
//     section is to terminate the address range of the previous entry.
// 3.) A trailing EXIDX_CANTUNWIND sentinel section is required at the end of
//     the table to terminate the address range of the final entry.
void ARMExidxSyntheticSection::writeTo(uint8_t *buf) {}

bool ARMExidxSyntheticSection::isNeeded() const {}

ThunkSection::ThunkSection(Ctx &ctx, OutputSection *os, uint64_t off)
    :{}

size_t ThunkSection::getSize() const {}

void ThunkSection::addThunk(Thunk *t) {}

void ThunkSection::writeTo(uint8_t *buf) {}

InputSection *ThunkSection::getTargetInputSection() const {}

bool ThunkSection::assignOffsets() {}

PPC32Got2Section::PPC32Got2Section(Ctx &ctx)
    :{}

bool PPC32Got2Section::isNeeded() const {}

void PPC32Got2Section::finalizeContents() {}

// If linking position-dependent code then the table will store the addresses
// directly in the binary so the section has type SHT_PROGBITS. If linking
// position-independent code the section has type SHT_NOBITS since it will be
// allocated and filled in by the dynamic linker.
PPC64LongBranchTargetSection::PPC64LongBranchTargetSection(Ctx &ctx)
    :{}

uint64_t PPC64LongBranchTargetSection::getEntryVA(const Symbol *sym,
                                                  int64_t addend) {}

std::optional<uint32_t>
PPC64LongBranchTargetSection::addEntry(const Symbol *sym, int64_t addend) {}

size_t PPC64LongBranchTargetSection::getSize() const {}

void PPC64LongBranchTargetSection::writeTo(uint8_t *buf) {}

bool PPC64LongBranchTargetSection::isNeeded() const {}

static uint8_t getAbiVersion(Ctx &ctx) {}

template <typename ELFT> void elf::writeEhdr(uint8_t *buf, Partition &part) {}

template <typename ELFT> void elf::writePhdrs(uint8_t *buf, Partition &part) {}

template <typename ELFT>
PartitionElfHeaderSection<ELFT>::PartitionElfHeaderSection(Ctx &ctx)
    :{}

template <typename ELFT>
size_t PartitionElfHeaderSection<ELFT>::getSize() const {}

template <typename ELFT>
void PartitionElfHeaderSection<ELFT>::writeTo(uint8_t *buf) {}

template <typename ELFT>
PartitionProgramHeadersSection<ELFT>::PartitionProgramHeadersSection(Ctx &ctx)
    :{}

template <typename ELFT>
size_t PartitionProgramHeadersSection<ELFT>::getSize() const {}

template <typename ELFT>
void PartitionProgramHeadersSection<ELFT>::writeTo(uint8_t *buf) {}

PartitionIndexSection::PartitionIndexSection(Ctx &ctx)
    :{}

size_t PartitionIndexSection::getSize() const {}

void PartitionIndexSection::finalizeContents() {}

void PartitionIndexSection::writeTo(uint8_t *buf) {}

void InStruct::reset() {}

static bool needsInterpSection(Ctx &ctx) {}

bool elf::hasMemtag(Ctx &ctx) {}

// Fully static executables don't support MTE globals at this point in time, as
// we currently rely on:
//   - A dynamic loader to process relocations, and
//   - Dynamic entries.
// This restriction could be removed in future by re-using some of the ideas
// that ifuncs use in fully static executables.
bool elf::canHaveMemtagGlobals(Ctx &ctx) {}

constexpr char kMemtagAndroidNoteName[] =;
void MemtagAndroidNote::writeTo(uint8_t *buf) {}

size_t MemtagAndroidNote::getSize() const {}

void PackageMetadataNote::writeTo(uint8_t *buf) {}

size_t PackageMetadataNote::getSize() const {}

// Helper function, return the size of the ULEB128 for 'v', optionally writing
// it to `*(buf + offset)` if `buf` is non-null.
static size_t computeOrWriteULEB128(uint64_t v, uint8_t *buf, size_t offset) {}

// https://github.com/ARM-software/abi-aa/blob/main/memtagabielf64/memtagabielf64.rst#83encoding-of-sht_aarch64_memtag_globals_dynamic
constexpr uint64_t kMemtagStepSizeBits =;
constexpr uint64_t kMemtagGranuleSize =;
static size_t
createMemtagGlobalDescriptors(Ctx &ctx,
                              const SmallVector<const Symbol *, 0> &symbols,
                              uint8_t *buf = nullptr) {}

bool MemtagGlobalDescriptors::updateAllocSize(Ctx &ctx) {}

void MemtagGlobalDescriptors::writeTo(uint8_t *buf) {}

size_t MemtagGlobalDescriptors::getSize() const {}

static OutputSection *findSection(StringRef name) {}

static Defined *addOptionalRegular(Ctx &ctx, StringRef name, SectionBase *sec,
                                   uint64_t val, uint8_t stOther = STV_HIDDEN) {}

template <class ELFT> void elf::createSyntheticSections(Ctx &ctx) {}

template void elf::splitSections<ELF32LE>(Ctx &);
template void elf::splitSections<ELF32BE>(Ctx &);
template void elf::splitSections<ELF64LE>(Ctx &);
template void elf::splitSections<ELF64BE>(Ctx &);

template void EhFrameSection::iterateFDEWithLSDA<ELF32LE>(
    function_ref<void(InputSection &)>);
template void EhFrameSection::iterateFDEWithLSDA<ELF32BE>(
    function_ref<void(InputSection &)>);
template void EhFrameSection::iterateFDEWithLSDA<ELF64LE>(
    function_ref<void(InputSection &)>);
template void EhFrameSection::iterateFDEWithLSDA<ELF64BE>(
    function_ref<void(InputSection &)>);

template class elf::SymbolTableSection<ELF32LE>;
template class elf::SymbolTableSection<ELF32BE>;
template class elf::SymbolTableSection<ELF64LE>;
template class elf::SymbolTableSection<ELF64BE>;

template void elf::writeEhdr<ELF32LE>(uint8_t *Buf, Partition &Part);
template void elf::writeEhdr<ELF32BE>(uint8_t *Buf, Partition &Part);
template void elf::writeEhdr<ELF64LE>(uint8_t *Buf, Partition &Part);
template void elf::writeEhdr<ELF64BE>(uint8_t *Buf, Partition &Part);

template void elf::writePhdrs<ELF32LE>(uint8_t *Buf, Partition &Part);
template void elf::writePhdrs<ELF32BE>(uint8_t *Buf, Partition &Part);
template void elf::writePhdrs<ELF64LE>(uint8_t *Buf, Partition &Part);
template void elf::writePhdrs<ELF64BE>(uint8_t *Buf, Partition &Part);

template void elf::createSyntheticSections<ELF32LE>(Ctx &);
template void elf::createSyntheticSections<ELF32BE>(Ctx &);
template void elf::createSyntheticSections<ELF64LE>(Ctx &);
template void elf::createSyntheticSections<ELF64BE>(Ctx &);