/* * SPDX-License-Identifier: MIT * * Copyright © 2012-2014 Intel Corporation * * Based on amdgpu_mn, which bears the following notice: * * Copyright 2014 Advanced Micro Devices, Inc. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * */ /* * Authors: * Christian König <[email protected]> */ #include <linux/mmu_context.h> #include <linux/mempolicy.h> #include <linux/swap.h> #include <linux/sched/mm.h> #include "i915_drv.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" #include "i915_scatterlist.h" #ifdef CONFIG_MMU_NOTIFIER /** * i915_gem_userptr_invalidate - callback to notify about mm change * * @mni: the range (mm) is about to update * @range: details on the invalidation * @cur_seq: Value to pass to mmu_interval_set_seq() * * Block for operations on BOs to finish and mark pages as accessed and * potentially dirty. */ static bool i915_gem_userptr_invalidate(struct mmu_interval_notifier *mni, const struct mmu_notifier_range *range, unsigned long cur_seq) { … } static const struct mmu_interval_notifier_ops i915_gem_userptr_notifier_ops = …; static int i915_gem_userptr_init__mmu_notifier(struct drm_i915_gem_object *obj) { … } static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj) { … } static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) { … } static void i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { … } static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj) { … } int i915_gem_object_userptr_submit_init(struct drm_i915_gem_object *obj) { … } int i915_gem_object_userptr_submit_done(struct drm_i915_gem_object *obj) { … } int i915_gem_object_userptr_validate(struct drm_i915_gem_object *obj) { … } static void i915_gem_userptr_release(struct drm_i915_gem_object *obj) { … } static int i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj) { … } static int i915_gem_userptr_pwrite(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pwrite *args) { … } static int i915_gem_userptr_pread(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pread *args) { … } static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = …; #endif static int probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len) { … } /* * Creates a new mm object that wraps some normal memory from the process * context - user memory. * * We impose several restrictions upon the memory being mapped * into the GPU. * 1. It must be page aligned (both start/end addresses, i.e ptr and size). * 2. It must be normal system memory, not a pointer into another map of IO * space (e.g. it must not be a GTT mmapping of another object). * 3. We only allow a bo as large as we could in theory map into the GTT, * that is we limit the size to the total size of the GTT. * 4. The bo is marked as being snoopable. The backing pages are left * accessible directly by the CPU, but reads and writes by the GPU may * incur the cost of a snoop (unless you have an LLC architecture). * * Synchronisation between multiple users and the GPU is left to userspace * through the normal set-domain-ioctl. The kernel will enforce that the * GPU relinquishes the VMA before it is returned back to the system * i.e. upon free(), munmap() or process termination. However, the userspace * malloc() library may not immediately relinquish the VMA after free() and * instead reuse it whilst the GPU is still reading and writing to the VMA. * Caveat emptor. * * Also note, that the object created here is not currently a "first class" * object, in that several ioctls are banned. These are the CPU access * ioctls: mmap(), pwrite and pread. In practice, you are expected to use * direct access via your pointer rather than use those ioctls. Another * restriction is that we do not allow userptr surfaces to be pinned to the * hardware and so we reject any attempt to create a framebuffer out of a * userptr. * * If you think this is a good interface to use to pass GPU memory between * drivers, please use dma-buf instead. In fact, wherever possible use * dma-buf instead. */ int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { … }