llvm/lld/COFF/Chunks.cpp

//===- Chunks.cpp ---------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#include "Chunks.h"
#include "COFFLinkerContext.h"
#include "InputFiles.h"
#include "SymbolTable.h"
#include "Symbols.h"
#include "Writer.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Twine.h"
#include "llvm/BinaryFormat/COFF.h"
#include "llvm/Object/COFF.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <iterator>

usingnamespacellvm;
usingnamespacellvm::object;
usingnamespacellvm::support::endian;
usingnamespacellvm::COFF;
ulittle32_t;

namespace lld::coff {

SectionChunk::SectionChunk(ObjFile *f, const coff_section *h, Kind k)
    :{}

// SectionChunk is one of the most frequently allocated classes, so it is
// important to keep it as compact as possible. As of this writing, the number
// below is the size of this class on x64 platforms.
static_assert;

static void add16(uint8_t *p, int16_t v) {}
static void add32(uint8_t *p, int32_t v) {}
static void add64(uint8_t *p, int64_t v) {}
static void or16(uint8_t *p, uint16_t v) {}
static void or32(uint8_t *p, uint32_t v) {}

// Verify that given sections are appropriate targets for SECREL
// relocations. This check is relaxed because unfortunately debug
// sections have section-relative relocations against absolute symbols.
static bool checkSecRel(const SectionChunk *sec, OutputSection *os) {}

static void applySecRel(const SectionChunk *sec, uint8_t *off,
                        OutputSection *os, uint64_t s) {}

static void applySecIdx(uint8_t *off, OutputSection *os,
                        unsigned numOutputSections) {}

void SectionChunk::applyRelX64(uint8_t *off, uint16_t type, OutputSection *os,
                               uint64_t s, uint64_t p,
                               uint64_t imageBase) const {}

void SectionChunk::applyRelX86(uint8_t *off, uint16_t type, OutputSection *os,
                               uint64_t s, uint64_t p,
                               uint64_t imageBase) const {}

static void applyMOV(uint8_t *off, uint16_t v) {}

static uint16_t readMOV(uint8_t *off, bool movt) {}

void applyMOV32T(uint8_t *off, uint32_t v) {}

static void applyBranch20T(uint8_t *off, int32_t v) {}

void applyBranch24T(uint8_t *off, int32_t v) {}

void SectionChunk::applyRelARM(uint8_t *off, uint16_t type, OutputSection *os,
                               uint64_t s, uint64_t p,
                               uint64_t imageBase) const {}

// Interpret the existing immediate value as a byte offset to the
// target symbol, then update the instruction with the immediate as
// the page offset from the current instruction to the target.
void applyArm64Addr(uint8_t *off, uint64_t s, uint64_t p, int shift) {}

// Update the immediate field in a AARCH64 ldr, str, and add instruction.
// Optionally limit the range of the written immediate by one or more bits
// (rangeLimit).
void applyArm64Imm(uint8_t *off, uint64_t imm, uint32_t rangeLimit) {}

// Add the 12 bit page offset to the existing immediate.
// Ldr/str instructions store the opcode immediate scaled
// by the load/store size (giving a larger range for larger
// loads/stores). The immediate is always (both before and after
// fixing up the relocation) stored scaled similarly.
// Even if larger loads/stores have a larger range, limit the
// effective offset to 12 bit, since it is intended to be a
// page offset.
static void applyArm64Ldr(uint8_t *off, uint64_t imm) {}

static void applySecRelLow12A(const SectionChunk *sec, uint8_t *off,
                              OutputSection *os, uint64_t s) {}

static void applySecRelHigh12A(const SectionChunk *sec, uint8_t *off,
                               OutputSection *os, uint64_t s) {}

static void applySecRelLdr(const SectionChunk *sec, uint8_t *off,
                           OutputSection *os, uint64_t s) {}

void applyArm64Branch26(uint8_t *off, int64_t v) {}

static void applyArm64Branch19(uint8_t *off, int64_t v) {}

static void applyArm64Branch14(uint8_t *off, int64_t v) {}

void SectionChunk::applyRelARM64(uint8_t *off, uint16_t type, OutputSection *os,
                                 uint64_t s, uint64_t p,
                                 uint64_t imageBase) const {}

static void maybeReportRelocationToDiscarded(const SectionChunk *fromChunk,
                                             Defined *sym,
                                             const coff_relocation &rel,
                                             bool isMinGW) {}

void SectionChunk::writeTo(uint8_t *buf) const {}

void SectionChunk::applyRelocation(uint8_t *off,
                                   const coff_relocation &rel) const {}

// Defend against unsorted relocations. This may be overly conservative.
void SectionChunk::sortRelocations() {}

// Similar to writeTo, but suitable for relocating a subsection of the overall
// section.
void SectionChunk::writeAndRelocateSubsection(ArrayRef<uint8_t> sec,
                                              ArrayRef<uint8_t> subsec,
                                              uint32_t &nextRelocIndex,
                                              uint8_t *buf) const {}

void SectionChunk::addAssociative(SectionChunk *child) {}

static uint8_t getBaserelType(const coff_relocation &rel,
                              Triple::ArchType arch) {}

// Windows-specific.
// Collect all locations that contain absolute addresses, which need to be
// fixed by the loader if load-time relocation is needed.
// Only called when base relocation is enabled.
void SectionChunk::getBaserels(std::vector<Baserel> *res) {}

// MinGW specific.
// Check whether a static relocation of type Type can be deferred and
// handled at runtime as a pseudo relocation (for references to a module
// local variable, which turned out to actually need to be imported from
// another DLL) This returns the size the relocation is supposed to update,
// in bits, or 0 if the relocation cannot be handled as a runtime pseudo
// relocation.
static int getRuntimePseudoRelocSize(uint16_t type,
                                     llvm::COFF::MachineTypes machine) {}

// MinGW specific.
// Append information to the provided vector about all relocations that
// need to be handled at runtime as runtime pseudo relocations (references
// to a module local variable, which turned out to actually need to be
// imported from another DLL).
void SectionChunk::getRuntimePseudoRelocs(
    std::vector<RuntimePseudoReloc> &res) {}

bool SectionChunk::isCOMDAT() const {}

void SectionChunk::printDiscardedMessage() const {}

StringRef SectionChunk::getDebugName() const {}

ArrayRef<uint8_t> SectionChunk::getContents() const {}

ArrayRef<uint8_t> SectionChunk::consumeDebugMagic() {}

ArrayRef<uint8_t> SectionChunk::consumeDebugMagic(ArrayRef<uint8_t> data,
                                                  StringRef sectionName) {}

SectionChunk *SectionChunk::findByName(ArrayRef<SectionChunk *> sections,
                                       StringRef name) {}

void SectionChunk::replace(SectionChunk *other) {}

uint32_t SectionChunk::getSectionNumber() const {}

CommonChunk::CommonChunk(const COFFSymbolRef s) :{}

uint32_t CommonChunk::getOutputCharacteristics() const {}

void StringChunk::writeTo(uint8_t *buf) const {}

ImportThunkChunkX64::ImportThunkChunkX64(COFFLinkerContext &ctx, Defined *s)
    :{}

void ImportThunkChunkX64::writeTo(uint8_t *buf) const {}

void ImportThunkChunkX86::getBaserels(std::vector<Baserel> *res) {}

void ImportThunkChunkX86::writeTo(uint8_t *buf) const {}

void ImportThunkChunkARM::getBaserels(std::vector<Baserel> *res) {}

void ImportThunkChunkARM::writeTo(uint8_t *buf) const {}

void ImportThunkChunkARM64::writeTo(uint8_t *buf) const {}

// A Thumb2, PIC, non-interworking range extension thunk.
const uint8_t armThunk[] =;

size_t RangeExtensionThunkARM::getSize() const {}

void RangeExtensionThunkARM::writeTo(uint8_t *buf) const {}

// A position independent ARM64 adrp+add thunk, with a maximum range of
// +/- 4 GB, which is enough for any PE-COFF.
const uint8_t arm64Thunk[] =;

size_t RangeExtensionThunkARM64::getSize() const {}

void RangeExtensionThunkARM64::writeTo(uint8_t *buf) const {}

LocalImportChunk::LocalImportChunk(COFFLinkerContext &c, Defined *s)
    :{}

void LocalImportChunk::getBaserels(std::vector<Baserel> *res) {}

size_t LocalImportChunk::getSize() const {}

void LocalImportChunk::writeTo(uint8_t *buf) const {}

void RVATableChunk::writeTo(uint8_t *buf) const {}

void RVAFlagTableChunk::writeTo(uint8_t *buf) const {}

size_t ECCodeMapChunk::getSize() const {}

void ECCodeMapChunk::writeTo(uint8_t *buf) const {}

// MinGW specific, for the "automatic import of variables from DLLs" feature.
size_t PseudoRelocTableChunk::getSize() const {}

// MinGW specific.
void PseudoRelocTableChunk::writeTo(uint8_t *buf) const {}

// Windows-specific. This class represents a block in .reloc section.
// The format is described here.
//
// On Windows, each DLL is linked against a fixed base address and
// usually loaded to that address. However, if there's already another
// DLL that overlaps, the loader has to relocate it. To do that, DLLs
// contain .reloc sections which contain offsets that need to be fixed
// up at runtime. If the loader finds that a DLL cannot be loaded to its
// desired base address, it loads it to somewhere else, and add <actual
// base address> - <desired base address> to each offset that is
// specified by the .reloc section. In ELF terms, .reloc sections
// contain relative relocations in REL format (as opposed to RELA.)
//
// This already significantly reduces the size of relocations compared
// to ELF .rel.dyn, but Windows does more to reduce it (probably because
// it was invented for PCs in the late '80s or early '90s.)  Offsets in
// .reloc are grouped by page where the page size is 12 bits, and
// offsets sharing the same page address are stored consecutively to
// represent them with less space. This is very similar to the page
// table which is grouped by (multiple stages of) pages.
//
// For example, let's say we have 0x00030, 0x00500, 0x00700, 0x00A00,
// 0x20004, and 0x20008 in a .reloc section for x64. The uppermost 4
// bits have a type IMAGE_REL_BASED_DIR64 or 0xA. In the section, they
// are represented like this:
//
//   0x00000  -- page address (4 bytes)
//   16       -- size of this block (4 bytes)
//     0xA030 -- entries (2 bytes each)
//     0xA500
//     0xA700
//     0xAA00
//   0x20000  -- page address (4 bytes)
//   12       -- size of this block (4 bytes)
//     0xA004 -- entries (2 bytes each)
//     0xA008
//
// Usually we have a lot of relocations for each page, so the number of
// bytes for one .reloc entry is close to 2 bytes on average.
BaserelChunk::BaserelChunk(uint32_t page, Baserel *begin, Baserel *end) {}

void BaserelChunk::writeTo(uint8_t *buf) const {}

uint8_t Baserel::getDefaultType(llvm::COFF::MachineTypes machine) {}

MergeChunk::MergeChunk(uint32_t alignment)
    :{}

void MergeChunk::addSection(COFFLinkerContext &ctx, SectionChunk *c) {}

void MergeChunk::finalizeContents() {}

void MergeChunk::assignSubsectionRVAs() {}

uint32_t MergeChunk::getOutputCharacteristics() const {}

size_t MergeChunk::getSize() const {}

void MergeChunk::writeTo(uint8_t *buf) const {}

// MinGW specific.
size_t AbsolutePointerChunk::getSize() const {}

void AbsolutePointerChunk::writeTo(uint8_t *buf) const {}

void ECExportThunkChunk::writeTo(uint8_t *buf) const {}

size_t CHPECodeRangesChunk::getSize() const {}

void CHPECodeRangesChunk::writeTo(uint8_t *buf) const {}

size_t CHPERedirectionChunk::getSize() const {}

void CHPERedirectionChunk::writeTo(uint8_t *buf) const {}

} // namespace lld::coff