linux/drivers/iommu/amd/io_pgtable_v2.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * CPU-agnostic AMD IO page table v2 allocator.
 *
 * Copyright (C) 2022, 2023 Advanced Micro Devices, Inc.
 * Author: Suravee Suthikulpanit <[email protected]>
 * Author: Vasant Hegde <[email protected]>
 */

#define pr_fmt(fmt)
#define dev_fmt(fmt)

#include <linux/bitops.h>
#include <linux/io-pgtable.h>
#include <linux/kernel.h>

#include <asm/barrier.h>

#include "amd_iommu_types.h"
#include "amd_iommu.h"
#include "../iommu-pages.h"

#define IOMMU_PAGE_PRESENT
#define IOMMU_PAGE_RW
#define IOMMU_PAGE_USER
#define IOMMU_PAGE_PWT
#define IOMMU_PAGE_PCD
#define IOMMU_PAGE_ACCESS
#define IOMMU_PAGE_DIRTY
#define IOMMU_PAGE_PSE
#define IOMMU_PAGE_NX

#define MAX_PTRS_PER_PAGE

#define IOMMU_PAGE_SIZE_2M
#define IOMMU_PAGE_SIZE_1G


static inline int get_pgtable_level(void)
{}

static inline bool is_large_pte(u64 pte)
{}

static inline u64 set_pgtable_attr(u64 *page)
{}

static inline void *get_pgtable_pte(u64 pte)
{}

static u64 set_pte_attr(u64 paddr, u64 pg_size, int prot)
{}

static inline u64 get_alloc_page_size(u64 size)
{}

static inline int page_size_to_level(u64 pg_size)
{}

static void free_pgtable(u64 *pt, int level)
{}

/* Allocate page table */
static u64 *v2_alloc_pte(int nid, u64 *pgd, unsigned long iova,
			 unsigned long pg_size, gfp_t gfp, bool *updated)
{}

/*
 * This function checks if there is a PTE for a given dma address.
 * If there is one, it returns the pointer to it.
 */
static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
		      unsigned long iova, unsigned long *page_size)
{}

static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
			      phys_addr_t paddr, size_t pgsize, size_t pgcount,
			      int prot, gfp_t gfp, size_t *mapped)
{}

static unsigned long iommu_v2_unmap_pages(struct io_pgtable_ops *ops,
					  unsigned long iova,
					  size_t pgsize, size_t pgcount,
					  struct iommu_iotlb_gather *gather)
{}

static phys_addr_t iommu_v2_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
{}

/*
 * ----------------------------------------------------
 */
static void v2_tlb_flush_all(void *cookie)
{}

static void v2_tlb_flush_walk(unsigned long iova, size_t size,
			      size_t granule, void *cookie)
{}

static void v2_tlb_add_page(struct iommu_iotlb_gather *gather,
			    unsigned long iova, size_t granule,
			    void *cookie)
{}

static const struct iommu_flush_ops v2_flush_ops =;

static void v2_free_pgtable(struct io_pgtable *iop)
{}

static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
{}

struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns =;