// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #include "vmwgfx_binding.h" #include "vmwgfx_bo.h" #include "vmwgfx_drv.h" #include "vmwgfx_mksstat.h" #include "vmwgfx_so.h" #include <drm/ttm/ttm_bo.h> #include <drm/ttm/ttm_placement.h> #include <linux/sync_file.h> #include <linux/hashtable.h> #include <linux/vmalloc.h> /* * Helper macro to get dx_ctx_node if available otherwise print an error * message. This is for use in command verifier function where if dx_ctx_node * is not set then command is invalid. */ #define VMW_GET_CTX_NODE(__sw_context) … #define VMW_DECLARE_CMD_VAR(__var, __type) … /** * struct vmw_relocation - Buffer object relocation * * @head: List head for the command submission context's relocation list * @vbo: Non ref-counted pointer to buffer object * @mob_loc: Pointer to location for mob id to be modified * @location: Pointer to location for guest pointer to be modified */ struct vmw_relocation { … }; /** * enum vmw_resource_relocation_type - Relocation type for resources * * @vmw_res_rel_normal: Traditional relocation. The resource id in the * command stream is replaced with the actual id after validation. * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced * with a NOP. * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after * validation is -1, the command is replaced with a NOP. Otherwise no action. * @vmw_res_rel_max: Last value in the enum - used for error checking */ enum vmw_resource_relocation_type { … }; /** * struct vmw_resource_relocation - Relocation info for resources * * @head: List head for the software context's relocation list. * @res: Non-ref-counted pointer to the resource. * @offset: Offset of single byte entries into the command buffer where the id * that needs fixup is located. * @rel_type: Type of relocation. */ struct vmw_resource_relocation { … }; /** * struct vmw_ctx_validation_info - Extra validation metadata for contexts * * @head: List head of context list * @ctx: The context resource * @cur: The context's persistent binding state * @staged: The binding state changes of this command buffer */ struct vmw_ctx_validation_info { … }; /** * struct vmw_cmd_entry - Describe a command for the verifier * * @func: Call-back to handle the command. * @user_allow: Whether allowed from the execbuf ioctl. * @gb_disable: Whether disabled if guest-backed objects are available. * @gb_enable: Whether enabled iff guest-backed objects are available. * @cmd_name: Name of the command. */ struct vmw_cmd_entry { … }; #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) … static int vmw_resource_context_res_add(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, struct vmw_resource *ctx); static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGAMobId *id, struct vmw_bo **vmw_bo_p); /** * vmw_ptr_diff - Compute the offset from a to b in bytes * * @a: A starting pointer. * @b: A pointer offset in the same address space. * * Returns: The offset in bytes between the two pointers. */ static size_t vmw_ptr_diff(void *a, void *b) { … } /** * vmw_execbuf_bindings_commit - Commit modified binding state * * @sw_context: The command submission context * @backoff: Whether this is part of the error path and binding state changes * should be ignored */ static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context, bool backoff) { … } /** * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced * * @sw_context: The command submission context */ static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context) { … } /** * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to * the validate list. * * @dev_priv: Pointer to the device private: * @sw_context: The command submission context * @res: Pointer to the resource * @node: The validation node holding the context resource metadata */ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, struct vmw_resource *res, struct vmw_ctx_validation_info *node) { … } /** * vmw_execbuf_res_size - calculate extra size fore the resource validation node * * @dev_priv: Pointer to the device private struct. * @res_type: The resource type. * * Guest-backed contexts and DX contexts require extra size to store execbuf * private information in the validation node. Typically the binding manager * associated data structures. * * Returns: The extra size requirement based on resource type. */ static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv, enum vmw_res_type res_type) { … } /** * vmw_execbuf_rcache_update - Update a resource-node cache entry * * @rcache: Pointer to the entry to update. * @res: Pointer to the resource. * @private: Pointer to the execbuf-private space in the resource validation * node. */ static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache, struct vmw_resource *res, void *private) { … } enum vmw_val_add_flags { … }; /** * vmw_execbuf_res_val_add - Add a resource to the validation list. * * @sw_context: Pointer to the software context. * @res: Unreferenced rcu-protected pointer to the resource. * @dirty: Whether to change dirty status. * @flags: specifies whether to use the context or not * * Returns: 0 on success. Negative error code on failure. Typical error codes * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed. */ static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context, struct vmw_resource *res, u32 dirty, u32 flags) { … } /** * vmw_view_res_val_add - Add a view and the surface it's pointing to to the * validation list * * @sw_context: The software context holding the validation list. * @view: Pointer to the view resource. * * Returns 0 if success, negative error code otherwise. */ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context, struct vmw_resource *view) { … } /** * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing * to to the validation list. * * @sw_context: The software context holding the validation list. * @view_type: The view type to look up. * @id: view id of the view. * * The view is represented by a view id and the DX context it's created on, or * scheduled for creation on. If there is no DX context set, the function will * return an -EINVAL error pointer. * * Returns: Unreferenced pointer to the resource on success, negative error * pointer on failure. */ static struct vmw_resource * vmw_view_id_val_add(struct vmw_sw_context *sw_context, enum vmw_view_type view_type, u32 id) { … } /** * vmw_resource_context_res_add - Put resources previously bound to a context on * the validation list * * @dev_priv: Pointer to a device private structure * @sw_context: Pointer to a software context used for this command submission * @ctx: Pointer to the context resource * * This function puts all resources that were previously bound to @ctx on the * resource validation list. This is part of the context state reemission */ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, struct vmw_resource *ctx) { … } /** * vmw_resource_relocation_add - Add a relocation to the relocation list * * @sw_context: Pointer to the software context. * @res: The resource. * @offset: Offset into the command buffer currently being parsed where the id * that needs fixup is located. Granularity is one byte. * @rel_type: Relocation type. */ static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context, const struct vmw_resource *res, unsigned long offset, enum vmw_resource_relocation_type rel_type) { … } /** * vmw_resource_relocations_free - Free all relocations on a list * * @list: Pointer to the head of the relocation list */ static void vmw_resource_relocations_free(struct list_head *list) { … } /** * vmw_resource_relocations_apply - Apply all relocations on a list * * @cb: Pointer to the start of the command buffer bein patch. This need not be * the same buffer as the one being parsed when the relocation list was built, * but the contents must be the same modulo the resource ids. * @list: Pointer to the head of the relocation list. */ static void vmw_resource_relocations_apply(uint32_t *cb, struct list_head *list) { … } static int vmw_cmd_invalid(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_ok(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_resources_reserve - Reserve all resources on the sw_context's resource * list. * * @sw_context: Pointer to the software context. * * Note that since vmware's command submission currently is protected by the * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since * only a single thread at once will attempt this. */ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) { … } /** * vmw_cmd_res_check - Check that a resource is present and if so, put it on the * resource validate list unless it's already there. * * @dev_priv: Pointer to a device private structure. * @sw_context: Pointer to the software context. * @res_type: Resource type. * @dirty: Whether to change dirty status. * @converter: User-space visible type specific information. * @id_loc: Pointer to the location in the command buffer currently being parsed * from where the user-space resource id handle is located. * @p_res: Pointer to pointer to resource validation node. Populated on * exit. */ static int vmw_cmd_res_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, enum vmw_res_type res_type, u32 dirty, const struct vmw_user_resource_conv *converter, uint32_t *id_loc, struct vmw_resource **p_res) { … } /** * vmw_rebind_all_dx_query - Rebind DX query associated with the context * * @ctx_res: context the query belongs to * * This function assumes binding_mutex is held. */ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) { … } /** * vmw_rebind_contexts - Rebind all resources previously bound to referenced * contexts. * * @sw_context: Pointer to the software context. * * Rebind context binding points that have been scrubbed because of eviction. */ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) { … } /** * vmw_view_bindings_add - Add an array of view bindings to a context binding * state tracker. * * @sw_context: The execbuf state used for this command. * @view_type: View type for the bindings. * @binding_type: Binding type for the bindings. * @shader_slot: The shader slot to user for the bindings. * @view_ids: Array of view ids to be bound. * @num_views: Number of view ids in @view_ids. * @first_slot: The binding slot to be used for the first view id in @view_ids. */ static int vmw_view_bindings_add(struct vmw_sw_context *sw_context, enum vmw_view_type view_type, enum vmw_ctx_binding_type binding_type, uint32 shader_slot, uint32 view_ids[], u32 num_views, u32 first_slot) { … } /** * vmw_cmd_cid_check - Check a command header for valid context information. * * @dev_priv: Pointer to a device private structure. * @sw_context: Pointer to the software context. * @header: A command header with an embedded user-space context handle. * * Convenience function: Call vmw_cmd_res_check with the user-space context * handle embedded in @header. */ static int vmw_cmd_cid_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_execbuf_info_from_res - Get the private validation metadata for a * recently validated resource * * @sw_context: Pointer to the command submission context * @res: The resource * * The resource pointed to by @res needs to be present in the command submission * context's resource cache and hence the last resource of that type to be * processed by the validation code. * * Return: a pointer to the private metadata of the resource, or NULL if it * wasn't found */ static struct vmw_ctx_validation_info * vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context, struct vmw_resource *res) { … } static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_present_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries. * * @dev_priv: The device private structure. * @new_query_bo: The new buffer holding query results. * @sw_context: The software context used for this command submission. * * This function checks whether @new_query_bo is suitable for holding query * results, and if another buffer currently is pinned for query results. If so, * the function prepares the state of @sw_context for switching pinned buffers * after successful submission of the current command batch. */ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, struct vmw_bo *new_query_bo, struct vmw_sw_context *sw_context) { … } /** * vmw_query_bo_switch_commit - Finalize switching pinned query buffer * * @dev_priv: The device private structure. * @sw_context: The software context used for this command submission batch. * * This function will check if we're switching query buffers, and will then, * issue a dummy occlusion query wait used as a query barrier. When the fence * object following that query wait has signaled, we are sure that all preceding * queries have finished, and the old query buffer can be unpinned. However, * since both the new query buffer and the old one are fenced with that fence, * we can do an asynchronus unpin now, and be sure that the old query buffer * won't be moved until the fence has signaled. * * As mentioned above, both the new - and old query buffers need to be fenced * using a sequence emitted *after* calling this function. */ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context) { … } /** * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle * to a MOB id. * * @dev_priv: Pointer to a device private structure. * @sw_context: The software context used for this command batch validation. * @id: Pointer to the user-space handle to be translated. * @vmw_bo_p: Points to a location that, on successful return will carry a * non-reference-counted pointer to the buffer object identified by the * user-space handle in @id. * * This function saves information needed to translate a user-space buffer * handle to a MOB id. The translation does not take place immediately, but * during a call to vmw_apply_relocations(). * * This function builds a relocation list and a list of buffers to validate. The * former needs to be freed using either vmw_apply_relocations() or * vmw_free_relocations(). The latter needs to be freed using * vmw_clear_validations. */ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGAMobId *id, struct vmw_bo **vmw_bo_p) { … } /** * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle * to a valid SVGAGuestPtr * * @dev_priv: Pointer to a device private structure. * @sw_context: The software context used for this command batch validation. * @ptr: Pointer to the user-space handle to be translated. * @vmw_bo_p: Points to a location that, on successful return will carry a * non-reference-counted pointer to the DMA buffer identified by the user-space * handle in @id. * * This function saves information needed to translate a user-space buffer * handle to a valid SVGAGuestPtr. The translation does not take place * immediately, but during a call to vmw_apply_relocations(). * * This function builds a relocation list and a list of buffers to validate. * The former needs to be freed using either vmw_apply_relocations() or * vmw_free_relocations(). The latter needs to be freed using * vmw_clear_validations. */ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGAGuestPtr *ptr, struct vmw_bo **vmw_bo_p) { … } /** * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context used for this command submission. * @header: Pointer to the command header in the command stream. * * This function adds the new query into the query COTABLE */ static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context used for this command submission. * @header: Pointer to the command header in the command stream. * * The query bind operation will eventually associate the query ID with its * backing MOB. In this function, we take the user mode MOB ID and use * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent. */ static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context used for this command submission. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context used for this command submission. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_begin_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context used for this command submission. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context used for this command submission. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_end_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context used for this command submission. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context used for this command submission. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_wait_query(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_dma(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_draw(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_tex_state(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, void *buf) { … } /** * vmw_cmd_res_switch_backup - Utility function to handle backup buffer * switching * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @res: Pointer to the resource. * @buf_id: Pointer to the user-space backup buffer handle in the command * stream. * @backup_offset: Offset of backup into MOB. * * This function prepares for registering a switch of backup buffers in the * resource metadata just prior to unreserving. It's basically a wrapper around * vmw_cmd_res_switch_backup with a different interface. */ static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, struct vmw_resource *res, uint32_t *buf_id, unsigned long backup_offset) { … } /** * vmw_cmd_switch_backup - Utility function to handle backup buffer switching * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @res_type: The resource type. * @converter: Information about user-space binding for this resource type. * @res_id: Pointer to the user-space resource handle in the command stream. * @buf_id: Pointer to the user-space backup buffer handle in the command * stream. * @backup_offset: Offset of backup into MOB. * * This function prepares for registering a switch of backup buffers in the * resource metadata just prior to unreserving. It's basically a wrapper around * vmw_cmd_res_switch_backup with a different interface. */ static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, enum vmw_res_type res_type, const struct vmw_user_resource_conv *converter, uint32_t *res_id, uint32_t *buf_id, unsigned long backup_offset) { … } /** * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE * command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE * command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE * command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_shader_define(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_set_shader(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_dx_set_single_constant_buffer - Validate * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_dx_set_constant_buffer_offset - Validate * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES * command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS * command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_dx_set_index_buffer - Validate * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS * command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_dx_clear_rendertarget_view - Validate * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_dx_clear_depthstencil_view - Validate * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE * command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view * resource for removal. * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. * * Check that the view exists, and if it was not created using this command * batch, conditionally make this command a NOP. */ static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_dx_transfer_from_buffer - Validate * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } /** * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command * * @dev_priv: Pointer to a device private struct. * @sw_context: The software context being used for this batch. * @header: Pointer to the command header in the command stream. */ static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_sm5(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_set_uav(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { … } static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, void *buf, uint32_t *size) { … } static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = …; bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd) { … } static int vmw_cmd_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, void *buf, uint32_t *size) { … } static int vmw_cmd_check_all(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, void *buf, uint32_t size) { … } static void vmw_free_relocations(struct vmw_sw_context *sw_context) { … } static void vmw_apply_relocations(struct vmw_sw_context *sw_context) { … } static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context, uint32_t size) { … } /* * vmw_execbuf_fence_commands - create and submit a command stream fence * * Creates a fence object and submits a command stream marker. * If this fails for some reason, We sync the fifo and return NULL. * It is then safe to fence buffers with a NULL pointer. * * If @p_handle is not NULL @file_priv must also not be NULL. Creates a * userspace handle if @p_handle is not NULL, otherwise not. */ int vmw_execbuf_fence_commands(struct drm_file *file_priv, struct vmw_private *dev_priv, struct vmw_fence_obj **p_fence, uint32_t *p_handle) { … } /** * vmw_execbuf_copy_fence_user - copy fence object information to user-space. * * @dev_priv: Pointer to a vmw_private struct. * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file. * @ret: Return value from fence object creation. * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which * the information should be copied. * @fence: Pointer to the fenc object. * @fence_handle: User-space fence handle. * @out_fence_fd: exported file descriptor for the fence. -1 if not used * * This function copies fence information to user-space. If copying fails, the * user-space struct drm_vmw_fence_rep::error member is hopefully left * untouched, and if it's preloaded with an -EFAULT by user-space, the error * will hopefully be detected. * * Also if copying fails, user-space will be unable to signal the fence object * so we wait for it immediately, and then unreference the user-space reference. */ int vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv, struct vmw_fpriv *vmw_fp, int ret, struct drm_vmw_fence_rep __user *user_fence_rep, struct vmw_fence_obj *fence, uint32_t fence_handle, int32_t out_fence_fd) { … } /** * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo. * * @dev_priv: Pointer to a device private structure. * @kernel_commands: Pointer to the unpatched command batch. * @command_size: Size of the unpatched command batch. * @sw_context: Structure holding the relocation lists. * * Side effects: If this function returns 0, then the command batch pointed to * by @kernel_commands will have been modified. */ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv, void *kernel_commands, u32 command_size, struct vmw_sw_context *sw_context) { … } /** * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the * command buffer manager. * * @dev_priv: Pointer to a device private structure. * @header: Opaque handle to the command buffer allocation. * @command_size: Size of the unpatched command batch. * @sw_context: Structure holding the relocation lists. * * Side effects: If this function returns 0, then the command buffer represented * by @header will have been modified. */ static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv, struct vmw_cmdbuf_header *header, u32 command_size, struct vmw_sw_context *sw_context) { … } /** * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for * submission using a command buffer. * * @dev_priv: Pointer to a device private structure. * @user_commands: User-space pointer to the commands to be submitted. * @command_size: Size of the unpatched command batch. * @header: Out parameter returning the opaque pointer to the command buffer. * * This function checks whether we can use the command buffer manager for * submission and if so, creates a command buffer of suitable size and copies * the user data into that buffer. * * On successful return, the function returns a pointer to the data in the * command buffer and *@header is set to non-NULL. * * @kernel_commands: If command buffers could not be used, the function will * return the value of @kernel_commands on function call. That value may be * NULL. In that case, the value of *@header will be set to NULL. * * If an error is encountered, the function will return a pointer error value. * If the function is interrupted by a signal while sleeping, it will return * -ERESTARTSYS casted to a pointer error value. */ static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv, void __user *user_commands, void *kernel_commands, u32 command_size, struct vmw_cmdbuf_header **header) { … } static int vmw_execbuf_tie_context(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, uint32_t handle) { … } int vmw_execbuf_process(struct drm_file *file_priv, struct vmw_private *dev_priv, void __user *user_commands, void *kernel_commands, uint32_t command_size, uint64_t throttle_us, uint32_t dx_context_handle, struct drm_vmw_fence_rep __user *user_fence_rep, struct vmw_fence_obj **out_fence, uint32_t flags) { … } /** * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer. * * @dev_priv: The device private structure. * * This function is called to idle the fifo and unpin the query buffer if the * normal way to do this hits an error, which should typically be extremely * rare. */ static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv) { … } /** * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query * bo. * * @dev_priv: The device private structure. * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a * query barrier that flushes all queries touching the current buffer pointed to * by @dev_priv->pinned_bo * * This function should be used to unpin the pinned query bo, or as a query * barrier when we need to make sure that all queries have finished before the * next fifo command. (For example on hardware context destructions where the * hardware may otherwise leak unfinished queries). * * This function does not return any failure codes, but make attempts to do safe * unpinning in case of errors. * * The function will synchronize on the previous query barrier, and will thus * not finish until that barrier has executed. * * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before * calling this function. */ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, struct vmw_fence_obj *fence) { … } /** * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo. * * @dev_priv: The device private structure. * * This function should be used to unpin the pinned query bo, or as a query * barrier when we need to make sure that all queries have finished before the * next fifo command. (For example on hardware context destructions where the * hardware may otherwise leak unfinished queries). * * This function does not return any failure codes, but make attempts to do safe * unpinning in case of errors. * * The function will synchronize on the previous query barrier, and will thus * not finish until that barrier has executed. */ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) { … } int vmw_execbuf_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { … }