linux/drivers/gpu/drm/xe/xe_guc_ct.c

// SPDX-License-Identifier: MIT
/*
 * Copyright © 2022 Intel Corporation
 */

#include "xe_guc_ct.h"

#include <linux/bitfield.h>
#include <linux/circ_buf.h>
#include <linux/delay.h>

#include <kunit/static_stub.h>

#include <drm/drm_managed.h>

#include "abi/guc_actions_abi.h"
#include "abi/guc_actions_sriov_abi.h"
#include "abi/guc_klvs_abi.h"
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_gt.h"
#include "xe_gt_pagefault.h"
#include "xe_gt_printk.h"
#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_sriov_pf_monitor.h"
#include "xe_gt_tlb_invalidation.h"
#include "xe_guc.h"
#include "xe_guc_relay.h"
#include "xe_guc_submit.h"
#include "xe_map.h"
#include "xe_pm.h"
#include "xe_trace_guc.h"

/* Used when a CT send wants to block and / or receive data */
struct g2h_fence {};

static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer)
{}

static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
{}

static struct xe_guc *
ct_to_guc(struct xe_guc_ct *ct)
{}

static struct xe_gt *
ct_to_gt(struct xe_guc_ct *ct)
{}

static struct xe_device *
ct_to_xe(struct xe_guc_ct *ct)
{}

/**
 * DOC: GuC CTB Blob
 *
 * We allocate single blob to hold both CTB descriptors and buffers:
 *
 *      +--------+-----------------------------------------------+------+
 *      | offset | contents                                      | size |
 *      +========+===============================================+======+
 *      | 0x0000 | H2G CTB Descriptor (send)                     |      |
 *      +--------+-----------------------------------------------+  4K  |
 *      | 0x0800 | G2H CTB Descriptor (g2h)                      |      |
 *      +--------+-----------------------------------------------+------+
 *      | 0x1000 | H2G CT Buffer (send)                          | n*4K |
 *      |        |                                               |      |
 *      +--------+-----------------------------------------------+------+
 *      | 0x1000 | G2H CT Buffer (g2h)                           | m*4K |
 *      | + n*4K |                                               |      |
 *      +--------+-----------------------------------------------+------+
 *
 * Size of each ``CT Buffer`` must be multiple of 4K.
 * We don't expect too many messages in flight at any time, unless we are
 * using the GuC submission. In that case each request requires a minimum
 * 2 dwords which gives us a maximum 256 queue'd requests. Hopefully this
 * enough space to avoid backpressure on the driver. We increase the size
 * of the receive buffer (relative to the send) to ensure a G2H response
 * CTB has a landing spot.
 */

#define CTB_DESC_SIZE
#define CTB_H2G_BUFFER_SIZE
#define CTB_G2H_BUFFER_SIZE
#define G2H_ROOM_BUFFER_SIZE

/**
 * xe_guc_ct_queue_proc_time_jiffies - Return maximum time to process a full
 * CT command queue
 * @ct: the &xe_guc_ct. Unused at this moment but will be used in the future.
 *
 * Observation is that a 4KiB buffer full of commands takes a little over a
 * second to process. Use that to calculate maximum time to process a full CT
 * command queue.
 *
 * Return: Maximum time to process a full CT queue in jiffies.
 */
long xe_guc_ct_queue_proc_time_jiffies(struct xe_guc_ct *ct)
{}

static size_t guc_ct_size(void)
{}

static void guc_ct_fini(struct drm_device *drm, void *arg)
{}

static void receive_g2h(struct xe_guc_ct *ct);
static void g2h_worker_func(struct work_struct *w);
static void safe_mode_worker_func(struct work_struct *w);

static void primelockdep(struct xe_guc_ct *ct)
{}

int xe_guc_ct_init(struct xe_guc_ct *ct)
{}

#define desc_read(xe_, guc_ctb__, field_)

#define desc_write(xe_, guc_ctb__, field_, val_)

static void guc_ct_ctb_h2g_init(struct xe_device *xe, struct guc_ctb *h2g,
				struct iosys_map *map)
{}

static void guc_ct_ctb_g2h_init(struct xe_device *xe, struct guc_ctb *g2h,
				struct iosys_map *map)
{}

static int guc_ct_ctb_h2g_register(struct xe_guc_ct *ct)
{}

static int guc_ct_ctb_g2h_register(struct xe_guc_ct *ct)
{}

static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable)
{}

static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
				enum xe_guc_ct_state state)
{}

static bool ct_needs_safe_mode(struct xe_guc_ct *ct)
{}

static bool ct_restart_safe_mode_worker(struct xe_guc_ct *ct)
{}

static void safe_mode_worker_func(struct work_struct *w)
{}

static void ct_enter_safe_mode(struct xe_guc_ct *ct)
{}

static void ct_exit_safe_mode(struct xe_guc_ct *ct)
{}

int xe_guc_ct_enable(struct xe_guc_ct *ct)
{}

static void stop_g2h_handler(struct xe_guc_ct *ct)
{}

/**
 * xe_guc_ct_disable - Set GuC to disabled state
 * @ct: the &xe_guc_ct
 *
 * Set GuC CT to disabled state and stop g2h handler. No outstanding g2h expected
 * in this transition.
 */
void xe_guc_ct_disable(struct xe_guc_ct *ct)
{}

/**
 * xe_guc_ct_stop - Set GuC to stopped state
 * @ct: the &xe_guc_ct
 *
 * Set GuC CT to stopped state, stop g2h handler, and clear any outstanding g2h
 */
void xe_guc_ct_stop(struct xe_guc_ct *ct)
{}

static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len)
{}

static bool g2h_has_room(struct xe_guc_ct *ct, u32 g2h_len)
{}

static int has_room(struct xe_guc_ct *ct, u32 cmd_len, u32 g2h_len)
{}

static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
{}

static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
{}

static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
{}

static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
{}

#define H2G_CT_HEADERS

static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len,
		     u32 ct_fence_value, bool want_response)
{}

/*
 * The CT protocol accepts a 16 bits fence. This field is fully owned by the
 * driver, the GuC will just copy it to the reply message. Since we need to
 * be able to distinguish between replies to REQUEST and FAST_REQUEST messages,
 * we use one bit of the seqno as an indicator for that and a rolling counter
 * for the remaining 15 bits.
 */
#define CT_SEQNO_MASK
#define CT_SEQNO_UNTRACKED
static u16 next_ct_seqno(struct xe_guc_ct *ct, bool is_g2h_fence)
{}

static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
				u32 len, u32 g2h_len, u32 num_g2h,
				struct g2h_fence *g2h_fence)
{}

static void kick_reset(struct xe_guc_ct *ct)
{}

static int dequeue_one_g2h(struct xe_guc_ct *ct);

static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
			      u32 g2h_len, u32 num_g2h,
			      struct g2h_fence *g2h_fence)
{}

static int guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
		       u32 g2h_len, u32 num_g2h, struct g2h_fence *g2h_fence)
{}

int xe_guc_ct_send(struct xe_guc_ct *ct, const u32 *action, u32 len,
		   u32 g2h_len, u32 num_g2h)
{}

int xe_guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len,
			  u32 g2h_len, u32 num_g2h)
{}

int xe_guc_ct_send_g2h_handler(struct xe_guc_ct *ct, const u32 *action, u32 len)
{}

/*
 * Check if a GT reset is in progress or will occur and if GT reset brought the
 * CT back up. Randomly picking 5 seconds for an upper limit to do a GT a reset.
 */
static bool retry_failure(struct xe_guc_ct *ct, int ret)
{}

static int guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
			    u32 *response_buffer, bool no_fail)
{}

/**
 * xe_guc_ct_send_recv - Send and receive HXG to the GuC
 * @ct: the &xe_guc_ct
 * @action: the dword array with `HXG Request`_ message (can't be NULL)
 * @len: length of the `HXG Request`_ message (in dwords, can't be 0)
 * @response_buffer: placeholder for the `HXG Response`_ message (can be NULL)
 *
 * Send a `HXG Request`_ message to the GuC over CT communication channel and
 * blocks until GuC replies with a `HXG Response`_ message.
 *
 * For non-blocking communication with GuC use xe_guc_ct_send().
 *
 * Note: The size of &response_buffer must be at least GUC_CTB_MAX_DWORDS_.
 *
 * Return: response length (in dwords) if &response_buffer was not NULL, or
 *         DATA0 from `HXG Response`_ if &response_buffer was NULL, or
 *         a negative error code on failure.
 */
int xe_guc_ct_send_recv(struct xe_guc_ct *ct, const u32 *action, u32 len,
			u32 *response_buffer)
{}

int xe_guc_ct_send_recv_no_fail(struct xe_guc_ct *ct, const u32 *action,
				u32 len, u32 *response_buffer)
{}

static u32 *msg_to_hxg(u32 *msg)
{}

static u32 msg_len_to_hxg_len(u32 len)
{}

static int parse_g2h_event(struct xe_guc_ct *ct, u32 *msg, u32 len)
{}

static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
{}

static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
{}

static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
{}

static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path)
{}

static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len)
{}

/**
 * xe_guc_ct_fast_path - process critical G2H in the IRQ handler
 * @ct: GuC CT object
 *
 * Anything related to page faults is critical for performance, process these
 * critical G2H in the IRQ. This is safe as these handlers either just wake up
 * waiters or queue another worker.
 */
void xe_guc_ct_fast_path(struct xe_guc_ct *ct)
{}

/* Returns less than zero on error, 0 on done, 1 on more available */
static int dequeue_one_g2h(struct xe_guc_ct *ct)
{}

static void receive_g2h(struct xe_guc_ct *ct)
{}

static void g2h_worker_func(struct work_struct *w)
{}

static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb,
				     struct guc_ctb_snapshot *snapshot,
				     bool atomic)
{}

static void guc_ctb_snapshot_print(struct guc_ctb_snapshot *snapshot,
				   struct drm_printer *p)
{}

static void guc_ctb_snapshot_free(struct guc_ctb_snapshot *snapshot)
{}

/**
 * xe_guc_ct_snapshot_capture - Take a quick snapshot of the CT state.
 * @ct: GuC CT object.
 * @atomic: Boolean to indicate if this is called from atomic context like
 * reset or CTB handler or from some regular path like debugfs.
 *
 * This can be printed out in a later stage like during dev_coredump
 * analysis.
 *
 * Returns: a GuC CT snapshot object that must be freed by the caller
 * by using `xe_guc_ct_snapshot_free`.
 */
struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct,
						      bool atomic)
{}

/**
 * xe_guc_ct_snapshot_print - Print out a given GuC CT snapshot.
 * @snapshot: GuC CT snapshot object.
 * @p: drm_printer where it will be printed out.
 *
 * This function prints out a given GuC CT snapshot object.
 */
void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot,
			      struct drm_printer *p)
{}

/**
 * xe_guc_ct_snapshot_free - Free all allocated objects for a given snapshot.
 * @snapshot: GuC CT snapshot object.
 *
 * This function free all the memory that needed to be allocated at capture
 * time.
 */
void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot)
{}

/**
 * xe_guc_ct_print - GuC CT Print.
 * @ct: GuC CT.
 * @p: drm_printer where it will be printed out.
 * @atomic: Boolean to indicate if this is called from atomic context like
 * reset or CTB handler or from some regular path like debugfs.
 *
 * This function quickly capture a snapshot and immediately print it out.
 */
void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic)
{}