// SPDX-License-Identifier: GPL-2.0-only /* * Re-map IO memory to kernel address space so that we can access it. * This is needed for high PCI addresses that aren't mapped in the * 640k-1MB IO memory area on PC's * * (C) Copyright 1995 1996 Linus Torvalds */ #include <linux/memblock.h> #include <linux/init.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/mmiotrace.h> #include <linux/cc_platform.h> #include <linux/efi.h> #include <linux/pgtable.h> #include <linux/kmsan.h> #include <asm/set_memory.h> #include <asm/e820/api.h> #include <asm/efi.h> #include <asm/fixmap.h> #include <asm/tlbflush.h> #include <asm/pgalloc.h> #include <asm/memtype.h> #include <asm/setup.h> #include "physaddr.h" /* * Descriptor controlling ioremap() behavior. */ struct ioremap_desc { … }; /* * Fix up the linear direct mapping of the kernel to avoid cache attribute * conflicts. */ int ioremap_change_attr(unsigned long vaddr, unsigned long size, enum page_cache_mode pcm) { … } /* Does the range (or a subset of) contain normal RAM? */ static unsigned int __ioremap_check_ram(struct resource *res) { … } /* * In a SEV guest, NONE and RESERVED should not be mapped encrypted because * there the whole memory is already encrypted. */ static unsigned int __ioremap_check_encrypted(struct resource *res) { … } /* * The EFI runtime services data area is not covered by walk_mem_res(), but must * be mapped encrypted when SEV is active. */ static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc) { … } static int __ioremap_collect_map_flags(struct resource *res, void *arg) { … } /* * To avoid multiple resource walks, this function walks resources marked as * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES). * * After that, deal with misc other ranges in __ioremap_check_other() which do * not fall into the above category. */ static void __ioremap_check_mem(resource_size_t addr, unsigned long size, struct ioremap_desc *desc) { … } /* * Remap an arbitrary physical address space into the kernel virtual * address space. It transparently creates kernel huge I/O mapping when * the physical address is aligned by a huge page size (1GB or 2MB) and * the requested size is at least the huge page size. * * NOTE: MTRRs can override PAT memory types with a 4KB granularity. * Therefore, the mapping code falls back to use a smaller page toward 4KB * when a mapping range is covered by non-WB type of MTRRs. * * NOTE! We need to allow non-page-aligned mappings too: we will obviously * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ static void __iomem * __ioremap_caller(resource_size_t phys_addr, unsigned long size, enum page_cache_mode pcm, void *caller, bool encrypted) { … } /** * ioremap - map bus memory into CPU space * @phys_addr: bus address of the memory * @size: size of the resource to map * * ioremap performs a platform specific sequence of operations to * make bus memory CPU accessible via the readb/readw/readl/writeb/ * writew/writel functions and the other mmio helpers. The returned * address is not guaranteed to be usable directly as a virtual * address. * * This version of ioremap ensures that the memory is marked uncachable * on the CPU as well as honouring existing caching rules from things like * the PCI bus. Note that there are other caches and buffers on many * busses. In particular driver authors should read up on PCI writes * * It's useful if some control registers are in such an area and * write combining or read caching is not desirable: * * Must be freed with iounmap. */ void __iomem *ioremap(resource_size_t phys_addr, unsigned long size) { … } EXPORT_SYMBOL(…); /** * ioremap_uc - map bus memory into CPU space as strongly uncachable * @phys_addr: bus address of the memory * @size: size of the resource to map * * ioremap_uc performs a platform specific sequence of operations to * make bus memory CPU accessible via the readb/readw/readl/writeb/ * writew/writel functions and the other mmio helpers. The returned * address is not guaranteed to be usable directly as a virtual * address. * * This version of ioremap ensures that the memory is marked with a strong * preference as completely uncachable on the CPU when possible. For non-PAT * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT * systems this will set the PAT entry for the pages as strong UC. This call * will honor existing caching rules from things like the PCI bus. Note that * there are other caches and buffers on many busses. In particular driver * authors should read up on PCI writes. * * It's useful if some control registers are in such an area and * write combining or read caching is not desirable: * * Must be freed with iounmap. */ void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size) { … } EXPORT_SYMBOL_GPL(…); /** * ioremap_wc - map memory into CPU space write combined * @phys_addr: bus address of the memory * @size: size of the resource to map * * This version of ioremap ensures that the memory is marked write combining. * Write combining allows faster writes to some hardware devices. * * Must be freed with iounmap. */ void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) { … } EXPORT_SYMBOL(…); /** * ioremap_wt - map memory into CPU space write through * @phys_addr: bus address of the memory * @size: size of the resource to map * * This version of ioremap ensures that the memory is marked write through. * Write through stores data into memory while keeping the cache up-to-date. * * Must be freed with iounmap. */ void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size) { … } EXPORT_SYMBOL(…); void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size) { … } EXPORT_SYMBOL(…); void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) { … } EXPORT_SYMBOL(…); void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, unsigned long prot_val) { … } EXPORT_SYMBOL(…); /** * iounmap - Free a IO remapping * @addr: virtual address from ioremap_* * * Caller must ensure there is only one unmapping for the same pointer. */ void iounmap(volatile void __iomem *addr) { … } EXPORT_SYMBOL(…); /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem * access */ void *xlate_dev_mem_ptr(phys_addr_t phys) { … } void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) { … } #ifdef CONFIG_AMD_MEM_ENCRYPT /* * Examine the physical address to determine if it is an area of memory * that should be mapped decrypted. If the memory is not part of the * kernel usable area it was accessed and created decrypted, so these * areas should be mapped decrypted. And since the encryption key can * change across reboots, persistent memory should also be mapped * decrypted. * * If SEV is active, that implies that BIOS/UEFI also ran encrypted so * only persistent memory should be mapped decrypted. */ static bool memremap_should_map_decrypted(resource_size_t phys_addr, unsigned long size) { … } /* * Examine the physical address to determine if it is EFI data. Check * it against the boot params structure and EFI tables and memory types. */ static bool memremap_is_efi_data(resource_size_t phys_addr, unsigned long size) { … } /* * Examine the physical address to determine if it is boot data by checking * it against the boot params setup_data chain. */ static bool memremap_is_setup_data(resource_size_t phys_addr, unsigned long size) { … } /* * Examine the physical address to determine if it is boot data by checking * it against the boot params setup_data chain (early boot version). */ static bool __init early_memremap_is_setup_data(resource_size_t phys_addr, unsigned long size) { … } /* * Architecture function to determine if RAM remap is allowed. By default, a * RAM remap will map the data as encrypted. Determine if a RAM remap should * not be done so that the data will be mapped decrypted. */ bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size, unsigned long flags) { … } /* * Architecture override of __weak function to adjust the protection attributes * used when remapping memory. By default, early_memremap() will map the data * as encrypted. Determine if an encrypted mapping should not be done and set * the appropriate protection attributes. */ pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr, unsigned long size, pgprot_t prot) { … } bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size) { … } /* Remap memory with encryption */ void __init *early_memremap_encrypted(resource_size_t phys_addr, unsigned long size) { … } /* * Remap memory with encryption and write-protected - cannot be called * before pat_init() is called */ void __init *early_memremap_encrypted_wp(resource_size_t phys_addr, unsigned long size) { … } /* Remap memory without encryption */ void __init *early_memremap_decrypted(resource_size_t phys_addr, unsigned long size) { … } /* * Remap memory without encryption and write-protected - cannot be called * before pat_init() is called */ void __init *early_memremap_decrypted_wp(resource_size_t phys_addr, unsigned long size) { … } #endif /* CONFIG_AMD_MEM_ENCRYPT */ static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) { … } static inline pte_t * __init early_ioremap_pte(unsigned long addr) { … } bool __init is_early_ioremap_ptep(pte_t *ptep) { … } void __init early_ioremap_init(void) { … } void __init __early_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags) { … }