// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES */ #include <linux/iommufd.h> #include <linux/slab.h> #include <linux/iommu.h> #include <uapi/linux/iommufd.h> #include "../iommu-priv.h" #include "io_pagetable.h" #include "iommufd_private.h" static bool allow_unsafe_interrupts; module_param(allow_unsafe_interrupts, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(…) …; static void iommufd_group_release(struct kref *kref) { … } static void iommufd_put_group(struct iommufd_group *group) { … } static bool iommufd_group_try_get(struct iommufd_group *igroup, struct iommu_group *group) { … } /* * iommufd needs to store some more data for each iommu_group, we keep a * parallel xarray indexed by iommu_group id to hold this instead of putting it * in the core structure. To keep things simple the iommufd_group memory is * unique within the iommufd_ctx. This makes it easy to check there are no * memory leaks. */ static struct iommufd_group *iommufd_get_group(struct iommufd_ctx *ictx, struct device *dev) { … } void iommufd_device_destroy(struct iommufd_object *obj) { … } /** * iommufd_device_bind - Bind a physical device to an iommu fd * @ictx: iommufd file descriptor * @dev: Pointer to a physical device struct * @id: Output ID number to return to userspace for this device * * A successful bind establishes an ownership over the device and returns * struct iommufd_device pointer, otherwise returns error pointer. * * A driver using this API must set driver_managed_dma and must not touch * the device until this routine succeeds and establishes ownership. * * Binding a PCI device places the entire RID under iommufd control. * * The caller must undo this with iommufd_device_unbind() */ struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx, struct device *dev, u32 *id) { … } EXPORT_SYMBOL_NS_GPL(…); /** * iommufd_ctx_has_group - True if any device within the group is bound * to the ictx * @ictx: iommufd file descriptor * @group: Pointer to a physical iommu_group struct * * True if any device within the group has been bound to this ictx, ex. via * iommufd_device_bind(), therefore implying ictx ownership of the group. */ bool iommufd_ctx_has_group(struct iommufd_ctx *ictx, struct iommu_group *group) { … } EXPORT_SYMBOL_NS_GPL(…); /** * iommufd_device_unbind - Undo iommufd_device_bind() * @idev: Device returned by iommufd_device_bind() * * Release the device from iommufd control. The DMA ownership will return back * to unowned with DMA controlled by the DMA API. This invalidates the * iommufd_device pointer, other APIs that consume it must not be called * concurrently. */ void iommufd_device_unbind(struct iommufd_device *idev) { … } EXPORT_SYMBOL_NS_GPL(…); struct iommufd_ctx *iommufd_device_to_ictx(struct iommufd_device *idev) { … } EXPORT_SYMBOL_NS_GPL(…); u32 iommufd_device_to_id(struct iommufd_device *idev) { … } EXPORT_SYMBOL_NS_GPL(…); static int iommufd_group_setup_msi(struct iommufd_group *igroup, struct iommufd_hwpt_paging *hwpt_paging) { … } static int iommufd_hwpt_paging_attach(struct iommufd_hwpt_paging *hwpt_paging, struct iommufd_device *idev) { … } int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt, struct iommufd_device *idev) { … } struct iommufd_hw_pagetable * iommufd_hw_pagetable_detach(struct iommufd_device *idev) { … } static struct iommufd_hw_pagetable * iommufd_device_do_attach(struct iommufd_device *idev, struct iommufd_hw_pagetable *hwpt) { … } static void iommufd_group_remove_reserved_iova(struct iommufd_group *igroup, struct iommufd_hwpt_paging *hwpt_paging) { … } static int iommufd_group_do_replace_paging(struct iommufd_group *igroup, struct iommufd_hwpt_paging *hwpt_paging) { … } static struct iommufd_hw_pagetable * iommufd_device_do_replace(struct iommufd_device *idev, struct iommufd_hw_pagetable *hwpt) { … } attach_fn; /* * When automatically managing the domains we search for a compatible domain in * the iopt and if one is found use it, otherwise create a new domain. * Automatic domain selection will never pick a manually created domain. */ static struct iommufd_hw_pagetable * iommufd_device_auto_get_domain(struct iommufd_device *idev, struct iommufd_ioas *ioas, u32 *pt_id, attach_fn do_attach) { … } static int iommufd_device_change_pt(struct iommufd_device *idev, u32 *pt_id, attach_fn do_attach) { … } /** * iommufd_device_attach - Connect a device to an iommu_domain * @idev: device to attach * @pt_id: Input a IOMMUFD_OBJ_IOAS, or IOMMUFD_OBJ_HWPT_PAGING * Output the IOMMUFD_OBJ_HWPT_PAGING ID * * This connects the device to an iommu_domain, either automatically or manually * selected. Once this completes the device could do DMA. * * The caller should return the resulting pt_id back to userspace. * This function is undone by calling iommufd_device_detach(). */ int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id) { … } EXPORT_SYMBOL_NS_GPL(…); /** * iommufd_device_replace - Change the device's iommu_domain * @idev: device to change * @pt_id: Input a IOMMUFD_OBJ_IOAS, or IOMMUFD_OBJ_HWPT_PAGING * Output the IOMMUFD_OBJ_HWPT_PAGING ID * * This is the same as:: * * iommufd_device_detach(); * iommufd_device_attach(); * * If it fails then no change is made to the attachment. The iommu driver may * implement this so there is no disruption in translation. This can only be * called if iommufd_device_attach() has already succeeded. */ int iommufd_device_replace(struct iommufd_device *idev, u32 *pt_id) { … } EXPORT_SYMBOL_NS_GPL(…); /** * iommufd_device_detach - Disconnect a device to an iommu_domain * @idev: device to detach * * Undo iommufd_device_attach(). This disconnects the idev from the previously * attached pt_id. The device returns back to a blocked DMA translation. */ void iommufd_device_detach(struct iommufd_device *idev) { … } EXPORT_SYMBOL_NS_GPL(…); /* * On success, it will refcount_inc() at a valid new_ioas and refcount_dec() at * a valid cur_ioas (access->ioas). A caller passing in a valid new_ioas should * call iommufd_put_object() if it does an iommufd_get_object() for a new_ioas. */ static int iommufd_access_change_ioas(struct iommufd_access *access, struct iommufd_ioas *new_ioas) { … } static int iommufd_access_change_ioas_id(struct iommufd_access *access, u32 id) { … } void iommufd_access_destroy_object(struct iommufd_object *obj) { … } /** * iommufd_access_create - Create an iommufd_access * @ictx: iommufd file descriptor * @ops: Driver's ops to associate with the access * @data: Opaque data to pass into ops functions * @id: Output ID number to return to userspace for this access * * An iommufd_access allows a driver to read/write to the IOAS without using * DMA. The underlying CPU memory can be accessed using the * iommufd_access_pin_pages() or iommufd_access_rw() functions. * * The provided ops are required to use iommufd_access_pin_pages(). */ struct iommufd_access * iommufd_access_create(struct iommufd_ctx *ictx, const struct iommufd_access_ops *ops, void *data, u32 *id) { … } EXPORT_SYMBOL_NS_GPL(…); /** * iommufd_access_destroy - Destroy an iommufd_access * @access: The access to destroy * * The caller must stop using the access before destroying it. */ void iommufd_access_destroy(struct iommufd_access *access) { … } EXPORT_SYMBOL_NS_GPL(…); void iommufd_access_detach(struct iommufd_access *access) { … } EXPORT_SYMBOL_NS_GPL(…); int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id) { … } EXPORT_SYMBOL_NS_GPL(…); int iommufd_access_replace(struct iommufd_access *access, u32 ioas_id) { … } EXPORT_SYMBOL_NS_GPL(…); /** * iommufd_access_notify_unmap - Notify users of an iopt to stop using it * @iopt: iopt to work on * @iova: Starting iova in the iopt * @length: Number of bytes * * After this function returns there should be no users attached to the pages * linked to this iopt that intersect with iova,length. Anyone that has attached * a user through iopt_access_pages() needs to detach it through * iommufd_access_unpin_pages() before this function returns. * * iommufd_access_destroy() will wait for any outstanding unmap callback to * complete. Once iommufd_access_destroy() no unmap ops are running or will * run in the future. Due to this a driver must not create locking that prevents * unmap to complete while iommufd_access_destroy() is running. */ void iommufd_access_notify_unmap(struct io_pagetable *iopt, unsigned long iova, unsigned long length) { … } /** * iommufd_access_unpin_pages() - Undo iommufd_access_pin_pages * @access: IOAS access to act on * @iova: Starting IOVA * @length: Number of bytes to access * * Return the struct page's. The caller must stop accessing them before calling * this. The iova/length must exactly match the one provided to access_pages. */ void iommufd_access_unpin_pages(struct iommufd_access *access, unsigned long iova, unsigned long length) { … } EXPORT_SYMBOL_NS_GPL(…); static bool iopt_area_contig_is_aligned(struct iopt_area_contig_iter *iter) { … } static bool check_area_prot(struct iopt_area *area, unsigned int flags) { … } /** * iommufd_access_pin_pages() - Return a list of pages under the iova * @access: IOAS access to act on * @iova: Starting IOVA * @length: Number of bytes to access * @out_pages: Output page list * @flags: IOPMMUFD_ACCESS_RW_* flags * * Reads @length bytes starting at iova and returns the struct page * pointers. * These can be kmap'd by the caller for CPU access. * * The caller must perform iommufd_access_unpin_pages() when done to balance * this. * * This API always requires a page aligned iova. This happens naturally if the * ioas alignment is >= PAGE_SIZE and the iova is PAGE_SIZE aligned. However * smaller alignments have corner cases where this API can fail on otherwise * aligned iova. */ int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova, unsigned long length, struct page **out_pages, unsigned int flags) { … } EXPORT_SYMBOL_NS_GPL(…); /** * iommufd_access_rw - Read or write data under the iova * @access: IOAS access to act on * @iova: Starting IOVA * @data: Kernel buffer to copy to/from * @length: Number of bytes to access * @flags: IOMMUFD_ACCESS_RW_* flags * * Copy kernel to/from data into the range given by IOVA/length. If flags * indicates IOMMUFD_ACCESS_RW_KTHREAD then a large copy can be optimized * by changing it into copy_to/from_user(). */ int iommufd_access_rw(struct iommufd_access *access, unsigned long iova, void *data, size_t length, unsigned int flags) { … } EXPORT_SYMBOL_NS_GPL(…); int iommufd_get_hw_info(struct iommufd_ucmd *ucmd) { … }