linux/drivers/iommu/intel/svm.c

// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright © 2015 Intel Corporation.
 *
 * Authors: David Woodhouse <[email protected]>
 */

#include <linux/mmu_notifier.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/slab.h>
#include <linux/rculist.h>
#include <linux/pci.h>
#include <linux/pci-ats.h>
#include <linux/dmar.h>
#include <linux/interrupt.h>
#include <linux/mm_types.h>
#include <linux/xarray.h>
#include <asm/page.h>
#include <asm/fpu/api.h>

#include "iommu.h"
#include "pasid.h"
#include "perf.h"
#include "../iommu-pages.h"
#include "trace.h"

static irqreturn_t prq_event_thread(int irq, void *d);

int intel_svm_enable_prq(struct intel_iommu *iommu)
{}

int intel_svm_finish_prq(struct intel_iommu *iommu)
{}

void intel_svm_check(struct intel_iommu *iommu)
{}

/* Pages have been freed at this point */
static void intel_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
					struct mm_struct *mm,
					unsigned long start, unsigned long end)
{}

static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
{}

static void intel_mm_free_notifier(struct mmu_notifier *mn)
{}

static const struct mmu_notifier_ops intel_mmuops =;

static int intel_svm_set_dev_pasid(struct iommu_domain *domain,
				   struct device *dev, ioasid_t pasid)
{}

/* Page request queue descriptor */
struct page_req_dsc {};

static bool is_canonical_address(u64 addr)
{}

/**
 * intel_drain_pasid_prq - Drain page requests and responses for a pasid
 * @dev: target device
 * @pasid: pasid for draining
 *
 * Drain all pending page requests and responses related to @pasid in both
 * software and hardware. This is supposed to be called after the device
 * driver has stopped DMA, the pasid entry has been cleared, and both IOTLB
 * and DevTLB have been invalidated.
 *
 * It waits until all pending page requests for @pasid in the page fault
 * queue are completed by the prq handling thread. Then follow the steps
 * described in VT-d spec CH7.10 to drain all page requests and page
 * responses pending in the hardware.
 */
void intel_drain_pasid_prq(struct device *dev, u32 pasid)
{}

static int prq_to_iommu_prot(struct page_req_dsc *req)
{}

static void intel_svm_prq_report(struct intel_iommu *iommu, struct device *dev,
				 struct page_req_dsc *desc)
{}

static void handle_bad_prq_event(struct intel_iommu *iommu,
				 struct page_req_dsc *req, int result)
{}

static irqreturn_t prq_event_thread(int irq, void *d)
{}

void intel_svm_page_response(struct device *dev, struct iopf_fault *evt,
			     struct iommu_page_response *msg)
{}

static void intel_svm_domain_free(struct iommu_domain *domain)
{}

static const struct iommu_domain_ops intel_svm_domain_ops =;

struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
					    struct mm_struct *mm)
{}