// SPDX-License-Identifier: GPL-2.0-only /* * Helpers for the host side of a virtio ring. * * Since these may be in userspace, we use (inline) accessors. */ #include <linux/compiler.h> #include <linux/module.h> #include <linux/vringh.h> #include <linux/virtio_ring.h> #include <linux/kernel.h> #include <linux/ratelimit.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <linux/export.h> #if IS_REACHABLE(CONFIG_VHOST_IOTLB) #include <linux/bvec.h> #include <linux/highmem.h> #include <linux/vhost_iotlb.h> #endif #include <uapi/linux/virtio_config.h> static __printf(1,2) __cold void vringh_bad(const char *fmt, ...) { … } /* Returns vring->num if empty, -ve on error. */ static inline int __vringh_get_head(const struct vringh *vrh, int (*getu16)(const struct vringh *vrh, u16 *val, const __virtio16 *p), u16 *last_avail_idx) { … } /** * vringh_kiov_advance - skip bytes from vring_kiov * @iov: an iov passed to vringh_getdesc_*() (updated as we consume) * @len: the maximum length to advance */ void vringh_kiov_advance(struct vringh_kiov *iov, size_t len) { … } EXPORT_SYMBOL(…); /* Copy some bytes to/from the iovec. Returns num copied. */ static inline ssize_t vringh_iov_xfer(struct vringh *vrh, struct vringh_kiov *iov, void *ptr, size_t len, int (*xfer)(const struct vringh *vrh, void *addr, void *ptr, size_t len)) { … } /* May reduce *len if range is shorter. */ static inline bool range_check(struct vringh *vrh, u64 addr, size_t *len, struct vringh_range *range, bool (*getrange)(struct vringh *, u64, struct vringh_range *)) { … } static inline bool no_range_check(struct vringh *vrh, u64 addr, size_t *len, struct vringh_range *range, bool (*getrange)(struct vringh *, u64, struct vringh_range *)) { … } /* No reason for this code to be inline. */ static int move_to_indirect(const struct vringh *vrh, int *up_next, u16 *i, void *addr, const struct vring_desc *desc, struct vring_desc **descs, int *desc_max) { … } static int resize_iovec(struct vringh_kiov *iov, gfp_t gfp) { … } static u16 __cold return_from_indirect(const struct vringh *vrh, int *up_next, struct vring_desc **descs, int *desc_max) { … } static int slow_copy(struct vringh *vrh, void *dst, const void *src, bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len, struct vringh_range *range, bool (*getrange)(struct vringh *vrh, u64, struct vringh_range *)), bool (*getrange)(struct vringh *vrh, u64 addr, struct vringh_range *r), struct vringh_range *range, int (*copy)(const struct vringh *vrh, void *dst, const void *src, size_t len)) { … } static inline int __vringh_iov(struct vringh *vrh, u16 i, struct vringh_kiov *riov, struct vringh_kiov *wiov, bool (*rcheck)(struct vringh *vrh, u64 addr, size_t *len, struct vringh_range *range, bool (*getrange)(struct vringh *, u64, struct vringh_range *)), bool (*getrange)(struct vringh *, u64, struct vringh_range *), gfp_t gfp, int (*copy)(const struct vringh *vrh, void *dst, const void *src, size_t len)) { … } static inline int __vringh_complete(struct vringh *vrh, const struct vring_used_elem *used, unsigned int num_used, int (*putu16)(const struct vringh *vrh, __virtio16 *p, u16 val), int (*putused)(const struct vringh *vrh, struct vring_used_elem *dst, const struct vring_used_elem *src, unsigned num)) { … } static inline int __vringh_need_notify(struct vringh *vrh, int (*getu16)(const struct vringh *vrh, u16 *val, const __virtio16 *p)) { … } static inline bool __vringh_notify_enable(struct vringh *vrh, int (*getu16)(const struct vringh *vrh, u16 *val, const __virtio16 *p), int (*putu16)(const struct vringh *vrh, __virtio16 *p, u16 val)) { … } static inline void __vringh_notify_disable(struct vringh *vrh, int (*putu16)(const struct vringh *vrh, __virtio16 *p, u16 val)) { … } /* Userspace access helpers: in this case, addresses are really userspace. */ static inline int getu16_user(const struct vringh *vrh, u16 *val, const __virtio16 *p) { … } static inline int putu16_user(const struct vringh *vrh, __virtio16 *p, u16 val) { … } static inline int copydesc_user(const struct vringh *vrh, void *dst, const void *src, size_t len) { … } static inline int putused_user(const struct vringh *vrh, struct vring_used_elem *dst, const struct vring_used_elem *src, unsigned int num) { … } static inline int xfer_from_user(const struct vringh *vrh, void *src, void *dst, size_t len) { … } static inline int xfer_to_user(const struct vringh *vrh, void *dst, void *src, size_t len) { … } /** * vringh_init_user - initialize a vringh for a userspace vring. * @vrh: the vringh to initialize. * @features: the feature bits for this ring. * @num: the number of elements. * @weak_barriers: true if we only need memory barriers, not I/O. * @desc: the userspace descriptor pointer. * @avail: the userspace avail pointer. * @used: the userspace used pointer. * * Returns an error if num is invalid: you should check pointers * yourself! */ int vringh_init_user(struct vringh *vrh, u64 features, unsigned int num, bool weak_barriers, vring_desc_t __user *desc, vring_avail_t __user *avail, vring_used_t __user *used) { … } EXPORT_SYMBOL(…); /** * vringh_getdesc_user - get next available descriptor from userspace ring. * @vrh: the userspace vring. * @riov: where to put the readable descriptors (or NULL) * @wiov: where to put the writable descriptors (or NULL) * @getrange: function to call to check ranges. * @head: head index we received, for passing to vringh_complete_user(). * * Returns 0 if there was no descriptor, 1 if there was, or -errno. * * Note that on error return, you can tell the difference between an * invalid ring and a single invalid descriptor: in the former case, * *head will be vrh->vring.num. You may be able to ignore an invalid * descriptor, but there's not much you can do with an invalid ring. * * Note that you can reuse riov and wiov with subsequent calls. Content is * overwritten and memory reallocated if more space is needed. * When you don't have to use riov and wiov anymore, you should clean up them * calling vringh_iov_cleanup() to release the memory, even on error! */ int vringh_getdesc_user(struct vringh *vrh, struct vringh_iov *riov, struct vringh_iov *wiov, bool (*getrange)(struct vringh *vrh, u64 addr, struct vringh_range *r), u16 *head) { … } EXPORT_SYMBOL(…); /** * vringh_iov_pull_user - copy bytes from vring_iov. * @riov: the riov as passed to vringh_getdesc_user() (updated as we consume) * @dst: the place to copy. * @len: the maximum length to copy. * * Returns the bytes copied <= len or a negative errno. */ ssize_t vringh_iov_pull_user(struct vringh_iov *riov, void *dst, size_t len) { … } EXPORT_SYMBOL(…); /** * vringh_iov_push_user - copy bytes into vring_iov. * @wiov: the wiov as passed to vringh_getdesc_user() (updated as we consume) * @src: the place to copy from. * @len: the maximum length to copy. * * Returns the bytes copied <= len or a negative errno. */ ssize_t vringh_iov_push_user(struct vringh_iov *wiov, const void *src, size_t len) { … } EXPORT_SYMBOL(…); /** * vringh_abandon_user - we've decided not to handle the descriptor(s). * @vrh: the vring. * @num: the number of descriptors to put back (ie. num * vringh_get_user() to undo). * * The next vringh_get_user() will return the old descriptor(s) again. */ void vringh_abandon_user(struct vringh *vrh, unsigned int num) { … } EXPORT_SYMBOL(…); /** * vringh_complete_user - we've finished with descriptor, publish it. * @vrh: the vring. * @head: the head as filled in by vringh_getdesc_user. * @len: the length of data we have written. * * You should check vringh_need_notify_user() after one or more calls * to this function. */ int vringh_complete_user(struct vringh *vrh, u16 head, u32 len) { … } EXPORT_SYMBOL(…); /** * vringh_complete_multi_user - we've finished with many descriptors. * @vrh: the vring. * @used: the head, length pairs. * @num_used: the number of used elements. * * You should check vringh_need_notify_user() after one or more calls * to this function. */ int vringh_complete_multi_user(struct vringh *vrh, const struct vring_used_elem used[], unsigned num_used) { … } EXPORT_SYMBOL(…); /** * vringh_notify_enable_user - we want to know if something changes. * @vrh: the vring. * * This always enables notifications, but returns false if there are * now more buffers available in the vring. */ bool vringh_notify_enable_user(struct vringh *vrh) { … } EXPORT_SYMBOL(…); /** * vringh_notify_disable_user - don't tell us if something changes. * @vrh: the vring. * * This is our normal running state: we disable and then only enable when * we're going to sleep. */ void vringh_notify_disable_user(struct vringh *vrh) { … } EXPORT_SYMBOL(…); /** * vringh_need_notify_user - must we tell the other side about used buffers? * @vrh: the vring we've called vringh_complete_user() on. * * Returns -errno or 0 if we don't need to tell the other side, 1 if we do. */ int vringh_need_notify_user(struct vringh *vrh) { … } EXPORT_SYMBOL(…); /* Kernelspace access helpers. */ static inline int getu16_kern(const struct vringh *vrh, u16 *val, const __virtio16 *p) { … } static inline int putu16_kern(const struct vringh *vrh, __virtio16 *p, u16 val) { … } static inline int copydesc_kern(const struct vringh *vrh, void *dst, const void *src, size_t len) { … } static inline int putused_kern(const struct vringh *vrh, struct vring_used_elem *dst, const struct vring_used_elem *src, unsigned int num) { … } static inline int xfer_kern(const struct vringh *vrh, void *src, void *dst, size_t len) { … } static inline int kern_xfer(const struct vringh *vrh, void *dst, void *src, size_t len) { … } /** * vringh_init_kern - initialize a vringh for a kernelspace vring. * @vrh: the vringh to initialize. * @features: the feature bits for this ring. * @num: the number of elements. * @weak_barriers: true if we only need memory barriers, not I/O. * @desc: the userspace descriptor pointer. * @avail: the userspace avail pointer. * @used: the userspace used pointer. * * Returns an error if num is invalid. */ int vringh_init_kern(struct vringh *vrh, u64 features, unsigned int num, bool weak_barriers, struct vring_desc *desc, struct vring_avail *avail, struct vring_used *used) { … } EXPORT_SYMBOL(…); /** * vringh_getdesc_kern - get next available descriptor from kernelspace ring. * @vrh: the kernelspace vring. * @riov: where to put the readable descriptors (or NULL) * @wiov: where to put the writable descriptors (or NULL) * @head: head index we received, for passing to vringh_complete_kern(). * @gfp: flags for allocating larger riov/wiov. * * Returns 0 if there was no descriptor, 1 if there was, or -errno. * * Note that on error return, you can tell the difference between an * invalid ring and a single invalid descriptor: in the former case, * *head will be vrh->vring.num. You may be able to ignore an invalid * descriptor, but there's not much you can do with an invalid ring. * * Note that you can reuse riov and wiov with subsequent calls. Content is * overwritten and memory reallocated if more space is needed. * When you don't have to use riov and wiov anymore, you should clean up them * calling vringh_kiov_cleanup() to release the memory, even on error! */ int vringh_getdesc_kern(struct vringh *vrh, struct vringh_kiov *riov, struct vringh_kiov *wiov, u16 *head, gfp_t gfp) { … } EXPORT_SYMBOL(…); /** * vringh_iov_pull_kern - copy bytes from vring_iov. * @riov: the riov as passed to vringh_getdesc_kern() (updated as we consume) * @dst: the place to copy. * @len: the maximum length to copy. * * Returns the bytes copied <= len or a negative errno. */ ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len) { … } EXPORT_SYMBOL(…); /** * vringh_iov_push_kern - copy bytes into vring_iov. * @wiov: the wiov as passed to vringh_getdesc_kern() (updated as we consume) * @src: the place to copy from. * @len: the maximum length to copy. * * Returns the bytes copied <= len or a negative errno. */ ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov, const void *src, size_t len) { … } EXPORT_SYMBOL(…); /** * vringh_abandon_kern - we've decided not to handle the descriptor(s). * @vrh: the vring. * @num: the number of descriptors to put back (ie. num * vringh_get_kern() to undo). * * The next vringh_get_kern() will return the old descriptor(s) again. */ void vringh_abandon_kern(struct vringh *vrh, unsigned int num) { … } EXPORT_SYMBOL(…); /** * vringh_complete_kern - we've finished with descriptor, publish it. * @vrh: the vring. * @head: the head as filled in by vringh_getdesc_kern. * @len: the length of data we have written. * * You should check vringh_need_notify_kern() after one or more calls * to this function. */ int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len) { … } EXPORT_SYMBOL(…); /** * vringh_notify_enable_kern - we want to know if something changes. * @vrh: the vring. * * This always enables notifications, but returns false if there are * now more buffers available in the vring. */ bool vringh_notify_enable_kern(struct vringh *vrh) { … } EXPORT_SYMBOL(…); /** * vringh_notify_disable_kern - don't tell us if something changes. * @vrh: the vring. * * This is our normal running state: we disable and then only enable when * we're going to sleep. */ void vringh_notify_disable_kern(struct vringh *vrh) { … } EXPORT_SYMBOL(…); /** * vringh_need_notify_kern - must we tell the other side about used buffers? * @vrh: the vring we've called vringh_complete_kern() on. * * Returns -errno or 0 if we don't need to tell the other side, 1 if we do. */ int vringh_need_notify_kern(struct vringh *vrh) { … } EXPORT_SYMBOL(…); #if IS_REACHABLE(CONFIG_VHOST_IOTLB) struct iotlb_vec { … }; static int iotlb_translate(const struct vringh *vrh, u64 addr, u64 len, u64 *translated, struct iotlb_vec *ivec, u32 perm) { … } #define IOTLB_IOV_STRIDE … static inline int copy_from_iotlb(const struct vringh *vrh, void *dst, void *src, size_t len) { … } static inline int copy_to_iotlb(const struct vringh *vrh, void *dst, void *src, size_t len) { … } static inline int getu16_iotlb(const struct vringh *vrh, u16 *val, const __virtio16 *p) { … } static inline int putu16_iotlb(const struct vringh *vrh, __virtio16 *p, u16 val) { … } static inline int copydesc_iotlb(const struct vringh *vrh, void *dst, const void *src, size_t len) { … } static inline int xfer_from_iotlb(const struct vringh *vrh, void *src, void *dst, size_t len) { … } static inline int xfer_to_iotlb(const struct vringh *vrh, void *dst, void *src, size_t len) { … } static inline int putused_iotlb(const struct vringh *vrh, struct vring_used_elem *dst, const struct vring_used_elem *src, unsigned int num) { … } /** * vringh_init_iotlb - initialize a vringh for a ring with IOTLB. * @vrh: the vringh to initialize. * @features: the feature bits for this ring. * @num: the number of elements. * @weak_barriers: true if we only need memory barriers, not I/O. * @desc: the userspace descriptor pointer. * @avail: the userspace avail pointer. * @used: the userspace used pointer. * * Returns an error if num is invalid. */ int vringh_init_iotlb(struct vringh *vrh, u64 features, unsigned int num, bool weak_barriers, struct vring_desc *desc, struct vring_avail *avail, struct vring_used *used) { … } EXPORT_SYMBOL(…); /** * vringh_init_iotlb_va - initialize a vringh for a ring with IOTLB containing * user VA. * @vrh: the vringh to initialize. * @features: the feature bits for this ring. * @num: the number of elements. * @weak_barriers: true if we only need memory barriers, not I/O. * @desc: the userspace descriptor pointer. * @avail: the userspace avail pointer. * @used: the userspace used pointer. * * Returns an error if num is invalid. */ int vringh_init_iotlb_va(struct vringh *vrh, u64 features, unsigned int num, bool weak_barriers, struct vring_desc *desc, struct vring_avail *avail, struct vring_used *used) { … } EXPORT_SYMBOL(…); /** * vringh_set_iotlb - initialize a vringh for a ring with IOTLB. * @vrh: the vring * @iotlb: iotlb associated with this vring * @iotlb_lock: spinlock to synchronize the iotlb accesses */ void vringh_set_iotlb(struct vringh *vrh, struct vhost_iotlb *iotlb, spinlock_t *iotlb_lock) { … } EXPORT_SYMBOL(…); /** * vringh_getdesc_iotlb - get next available descriptor from ring with * IOTLB. * @vrh: the kernelspace vring. * @riov: where to put the readable descriptors (or NULL) * @wiov: where to put the writable descriptors (or NULL) * @head: head index we received, for passing to vringh_complete_iotlb(). * @gfp: flags for allocating larger riov/wiov. * * Returns 0 if there was no descriptor, 1 if there was, or -errno. * * Note that on error return, you can tell the difference between an * invalid ring and a single invalid descriptor: in the former case, * *head will be vrh->vring.num. You may be able to ignore an invalid * descriptor, but there's not much you can do with an invalid ring. * * Note that you can reuse riov and wiov with subsequent calls. Content is * overwritten and memory reallocated if more space is needed. * When you don't have to use riov and wiov anymore, you should clean up them * calling vringh_kiov_cleanup() to release the memory, even on error! */ int vringh_getdesc_iotlb(struct vringh *vrh, struct vringh_kiov *riov, struct vringh_kiov *wiov, u16 *head, gfp_t gfp) { … } EXPORT_SYMBOL(…); /** * vringh_iov_pull_iotlb - copy bytes from vring_iov. * @vrh: the vring. * @riov: the riov as passed to vringh_getdesc_iotlb() (updated as we consume) * @dst: the place to copy. * @len: the maximum length to copy. * * Returns the bytes copied <= len or a negative errno. */ ssize_t vringh_iov_pull_iotlb(struct vringh *vrh, struct vringh_kiov *riov, void *dst, size_t len) { … } EXPORT_SYMBOL(…); /** * vringh_iov_push_iotlb - copy bytes into vring_iov. * @vrh: the vring. * @wiov: the wiov as passed to vringh_getdesc_iotlb() (updated as we consume) * @src: the place to copy from. * @len: the maximum length to copy. * * Returns the bytes copied <= len or a negative errno. */ ssize_t vringh_iov_push_iotlb(struct vringh *vrh, struct vringh_kiov *wiov, const void *src, size_t len) { … } EXPORT_SYMBOL(…); /** * vringh_abandon_iotlb - we've decided not to handle the descriptor(s). * @vrh: the vring. * @num: the number of descriptors to put back (ie. num * vringh_get_iotlb() to undo). * * The next vringh_get_iotlb() will return the old descriptor(s) again. */ void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num) { … } EXPORT_SYMBOL(…); /** * vringh_complete_iotlb - we've finished with descriptor, publish it. * @vrh: the vring. * @head: the head as filled in by vringh_getdesc_iotlb. * @len: the length of data we have written. * * You should check vringh_need_notify_iotlb() after one or more calls * to this function. */ int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len) { … } EXPORT_SYMBOL(…); /** * vringh_notify_enable_iotlb - we want to know if something changes. * @vrh: the vring. * * This always enables notifications, but returns false if there are * now more buffers available in the vring. */ bool vringh_notify_enable_iotlb(struct vringh *vrh) { … } EXPORT_SYMBOL(…); /** * vringh_notify_disable_iotlb - don't tell us if something changes. * @vrh: the vring. * * This is our normal running state: we disable and then only enable when * we're going to sleep. */ void vringh_notify_disable_iotlb(struct vringh *vrh) { … } EXPORT_SYMBOL(…); /** * vringh_need_notify_iotlb - must we tell the other side about used buffers? * @vrh: the vring we've called vringh_complete_iotlb() on. * * Returns -errno or 0 if we don't need to tell the other side, 1 if we do. */ int vringh_need_notify_iotlb(struct vringh *vrh) { … } EXPORT_SYMBOL(…); #endif MODULE_DESCRIPTION(…) …; MODULE_LICENSE(…) …;