linux/drivers/gpu/drm/display/drm_dp_tunnel.c

// SPDX-License-Identifier: MIT
/*
 * Copyright © 2023 Intel Corporation
 */

#include <linux/ref_tracker.h>
#include <linux/types.h>

#include <drm/drm_atomic_state_helper.h>

#include <drm/drm_atomic.h>
#include <drm/drm_print.h>
#include <drm/display/drm_dp.h>
#include <drm/display/drm_dp_helper.h>
#include <drm/display/drm_dp_tunnel.h>

#define to_group(__private_obj)

#define to_group_state(__private_state)

#define is_dp_tunnel_private_obj(__obj)

#define for_each_new_group_in_state(__state, __new_group_state, __i)

#define for_each_old_group_in_state(__state, __old_group_state, __i)

#define for_each_tunnel_in_group(__group, __tunnel)

#define for_each_tunnel_state(__group_state, __tunnel_state)

#define for_each_tunnel_state_safe(__group_state, __tunnel_state, __tunnel_state_tmp)

#define kbytes_to_mbits(__kbytes)

#define DPTUN_BW_ARG(__bw)

#define __tun_prn(__tunnel, __level, __type, __fmt, ...)

#define tun_dbg(__tunnel, __fmt, ...)

#define tun_dbg_stat(__tunnel, __err, __fmt, ...)

#define tun_dbg_atomic(__tunnel, __fmt, ...)

#define tun_grp_dbg(__group, __fmt, ...)

#define DP_TUNNELING_BASE

#define __DPTUN_REG_RANGE(__start, __size)

#define DPTUN_REG_RANGE(__addr, __size)

#define DPTUN_REG(__addr)

#define DPTUN_INFO_REG_MASK

static const DECLARE_BITMAP(dptun_info_regs, 64) =;

struct drm_dp_tunnel_regs {};

struct drm_dp_tunnel_group;

struct drm_dp_tunnel {};

struct drm_dp_tunnel_group_state;

struct drm_dp_tunnel_state {};

struct drm_dp_tunnel_group_state {};

struct drm_dp_tunnel_group {};

struct drm_dp_tunnel_mgr {};

/*
 * The following helpers provide a way to read out the tunneling DPCD
 * registers with a minimal amount of AUX transfers (1 transfer per contiguous
 * range, as permitted by the 16 byte per transfer AUX limit), not accessing
 * other registers to avoid any read side-effects.
 */
static int next_reg_area(int *offset)
{}

#define tunnel_reg_ptr(__regs, __address)

static int read_tunnel_regs(struct drm_dp_aux *aux, struct drm_dp_tunnel_regs *regs)
{}

static u8 tunnel_reg(const struct drm_dp_tunnel_regs *regs, int address)
{}

static u8 tunnel_reg_drv_group_id(const struct drm_dp_tunnel_regs *regs)
{}

/* Return granularity in kB/s units */
static int tunnel_reg_bw_granularity(const struct drm_dp_tunnel_regs *regs)
{}

static int tunnel_reg_max_dprx_rate(const struct drm_dp_tunnel_regs *regs)
{}

static int tunnel_reg_max_dprx_lane_count(const struct drm_dp_tunnel_regs *regs)
{}

static bool tunnel_reg_bw_alloc_supported(const struct drm_dp_tunnel_regs *regs)
{}

static bool tunnel_reg_bw_alloc_enabled(const struct drm_dp_tunnel_regs *regs)
{}

static u8 tunnel_group_drv_id(u8 drv_group_id)
{}

static u8 tunnel_group_id(u8 drv_group_id)
{}

const char *drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel)
{}
EXPORT_SYMBOL();

static const char *drm_dp_tunnel_group_name(const struct drm_dp_tunnel_group *group)
{}

static struct drm_dp_tunnel_group *
lookup_or_alloc_group(struct drm_dp_tunnel_mgr *mgr, u8 drv_group_id)
{}

static void free_group(struct drm_dp_tunnel_group *group)
{}

static struct drm_dp_tunnel *
tunnel_get(struct drm_dp_tunnel *tunnel)
{}

static void free_tunnel(struct kref *kref)
{}

static void tunnel_put(struct drm_dp_tunnel *tunnel)
{}

#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
static void track_tunnel_ref(struct drm_dp_tunnel *tunnel,
			     struct ref_tracker **tracker)
{}

static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel,
			       struct ref_tracker **tracker)
{}
#else
static void track_tunnel_ref(struct drm_dp_tunnel *tunnel,
			     struct ref_tracker **tracker)
{
}

static void untrack_tunnel_ref(struct drm_dp_tunnel *tunnel,
			       struct ref_tracker **tracker)
{
}
#endif

/**
 * drm_dp_tunnel_get - Get a reference for a DP tunnel
 * @tunnel: Tunnel object
 * @tracker: Debug tracker for the reference
 *
 * Get a reference for @tunnel, along with a debug tracker to help locating
 * the source of a reference leak/double reference put etc. issue.
 *
 * The reference must be dropped after use calling drm_dp_tunnel_put()
 * passing @tunnel and *@tracker returned from here.
 *
 * Returns @tunnel - as a convenience - along with *@tracker.
 */
struct drm_dp_tunnel *
drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel,
		  struct ref_tracker **tracker)
{}
EXPORT_SYMBOL();

/**
 * drm_dp_tunnel_put - Put a reference for a DP tunnel
 * @tunnel: Tunnel object
 * @tracker: Debug tracker for the reference
 *
 * Put a reference for @tunnel along with its debug *@tracker, which
 * was obtained with drm_dp_tunnel_get().
 */
void drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel,
		       struct ref_tracker **tracker)
{}
EXPORT_SYMBOL();

static bool add_tunnel_to_group(struct drm_dp_tunnel_mgr *mgr,
				u8 drv_group_id,
				struct drm_dp_tunnel *tunnel)
{}

static struct drm_dp_tunnel *
create_tunnel(struct drm_dp_tunnel_mgr *mgr,
	      struct drm_dp_aux *aux,
	      const struct drm_dp_tunnel_regs *regs)
{}

static void destroy_tunnel(struct drm_dp_tunnel *tunnel)
{}

/**
 * drm_dp_tunnel_set_io_error - Set the IO error flag for a DP tunnel
 * @tunnel: Tunnel object
 *
 * Set the IO error flag for @tunnel. Drivers can call this function upon
 * detecting a failure that affects the tunnel functionality, for instance
 * after a DP AUX transfer failure on the port @tunnel is connected to.
 *
 * This disables further management of @tunnel, including any related
 * AUX accesses for tunneling DPCD registers, returning error to the
 * initiators of these. The driver is supposed to drop this tunnel and -
 * optionally - recreate it.
 */
void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel)
{}
EXPORT_SYMBOL();

#define SKIP_DPRX_CAPS_CHECK
#define ALLOW_ALLOCATED_BW_CHANGE
static bool tunnel_regs_are_valid(struct drm_dp_tunnel_mgr *mgr,
				  const struct drm_dp_tunnel_regs *regs,
				  unsigned int flags)
{}

static int tunnel_allocated_bw(const struct drm_dp_tunnel *tunnel)
{}

static bool tunnel_info_changes_are_valid(struct drm_dp_tunnel *tunnel,
					  const struct drm_dp_tunnel_regs *regs,
					  unsigned int flags)
{}

static int
read_and_verify_tunnel_regs(struct drm_dp_tunnel *tunnel,
			    struct drm_dp_tunnel_regs *regs,
			    unsigned int flags)
{}

static bool update_dprx_caps(struct drm_dp_tunnel *tunnel, const struct drm_dp_tunnel_regs *regs)
{}

static int dev_id_len(const u8 *dev_id, int max_len)
{}

static int get_max_dprx_bw(const struct drm_dp_tunnel *tunnel)
{}

static int get_max_tunnel_bw(const struct drm_dp_tunnel *tunnel)
{}

/**
 * drm_dp_tunnel_detect - Detect DP tunnel on the link
 * @mgr: Tunnel manager
 * @aux: DP AUX on which the tunnel will be detected
 *
 * Detect if there is any DP tunnel on the link and add it to the tunnel
 * group's tunnel list.
 *
 * Returns a pointer to a tunnel on success, or an ERR_PTR() error on
 * failure.
 */
struct drm_dp_tunnel *
drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr,
		     struct drm_dp_aux *aux)
{}
EXPORT_SYMBOL();

/**
 * drm_dp_tunnel_destroy - Destroy tunnel object
 * @tunnel: Tunnel object
 *
 * Remove the tunnel from the tunnel topology and destroy it.
 *
 * Returns 0 on success, -ENODEV if the tunnel has been destroyed already.
 */
int drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel)
{}
EXPORT_SYMBOL();

static int check_tunnel(const struct drm_dp_tunnel *tunnel)
{}

static int group_allocated_bw(struct drm_dp_tunnel_group *group)
{}

/*
 * The estimated BW reported by the TBT Connection Manager for each tunnel in
 * a group includes the BW already allocated for the given tunnel and the
 * unallocated BW which is free to be used by any tunnel in the group.
 */
static int group_free_bw(const struct drm_dp_tunnel *tunnel)
{}

static int calc_group_available_bw(const struct drm_dp_tunnel *tunnel)
{}

static int update_group_available_bw(struct drm_dp_tunnel *tunnel,
				     const struct drm_dp_tunnel_regs *regs)
{}

static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable)
{}

/**
 * drm_dp_tunnel_enable_bw_alloc - Enable DP tunnel BW allocation mode
 * @tunnel: Tunnel object
 *
 * Enable the DP tunnel BW allocation mode on @tunnel if it supports it.
 *
 * Returns 0 in case of success, negative error code otherwise.
 */
int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel)
{}
EXPORT_SYMBOL();

/**
 * drm_dp_tunnel_disable_bw_alloc - Disable DP tunnel BW allocation mode
 * @tunnel: Tunnel object
 *
 * Disable the DP tunnel BW allocation mode on @tunnel.
 *
 * Returns 0 in case of success, negative error code otherwise.
 */
int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel)
{}
EXPORT_SYMBOL();

/**
 * drm_dp_tunnel_bw_alloc_is_enabled - Query the BW allocation mode enabled state
 * @tunnel: Tunnel object
 *
 * Query if the BW allocation mode is enabled for @tunnel.
 *
 * Returns %true if the BW allocation mode is enabled for @tunnel.
 */
bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel)
{}
EXPORT_SYMBOL();

static int clear_bw_req_state(struct drm_dp_aux *aux)
{}

static int bw_req_complete(struct drm_dp_aux *aux, bool *status_changed)
{}

static int allocate_tunnel_bw(struct drm_dp_tunnel *tunnel, int bw)
{}

/**
 * drm_dp_tunnel_alloc_bw - Allocate BW for a DP tunnel
 * @tunnel: Tunnel object
 * @bw: BW in kB/s units
 *
 * Allocate @bw kB/s for @tunnel. The allocated BW must be freed after use by
 * calling this function for the same tunnel setting @bw to 0.
 *
 * Returns 0 in case of success, a negative error code otherwise.
 */
int drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw)
{}
EXPORT_SYMBOL();

/**
 * drm_dp_tunnel_get_allocated_bw - Get the BW allocated for a DP tunnel
 * @tunnel: Tunnel object
 *
 * Get the current BW allocated for @tunnel. After the tunnel is created /
 * resumed and the BW allocation mode is enabled for it, the allocation
 * becomes determined only after the first allocation request by the driver
 * calling drm_dp_tunnel_alloc_bw().
 *
 * Return the BW allocated for the tunnel, or -1 if the allocation is
 * undetermined.
 */
int drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel *tunnel)
{}
EXPORT_SYMBOL();

/*
 * Return 0 if the status hasn't changed, 1 if the status has changed, a
 * negative error code in case of an I/O failure.
 */
static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel)
{}

/**
 * drm_dp_tunnel_update_state - Update DP tunnel SW state with the HW state
 * @tunnel: Tunnel object
 *
 * Update the SW state of @tunnel with the HW state.
 *
 * Returns 0 if the state has not changed, 1 if it has changed and got updated
 * successfully and a negative error code otherwise.
 */
int drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel)
{}
EXPORT_SYMBOL();

/*
 * drm_dp_tunnel_handle_irq - Handle DP tunnel IRQs
 *
 * Handle any pending DP tunnel IRQs, waking up waiters for a completion
 * event.
 *
 * Returns 1 if the state of the tunnel has changed which requires calling
 * drm_dp_tunnel_update_state(), a negative error code in case of a failure,
 * 0 otherwise.
 */
int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_aux *aux)
{}
EXPORT_SYMBOL();

/**
 * drm_dp_tunnel_max_dprx_rate - Query the maximum rate of the tunnel's DPRX
 * @tunnel: Tunnel object
 *
 * The function is used to query the maximum link rate of the DPRX connected
 * to @tunnel. Note that this rate will not be limited by the BW limit of the
 * tunnel, as opposed to the standard and extended DP_MAX_LINK_RATE DPCD
 * registers.
 *
 * Returns the maximum link rate in 10 kbit/s units.
 */
int drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel)
{}
EXPORT_SYMBOL();

/**
 * drm_dp_tunnel_max_dprx_lane_count - Query the maximum lane count of the tunnel's DPRX
 * @tunnel: Tunnel object
 *
 * The function is used to query the maximum lane count of the DPRX connected
 * to @tunnel. Note that this lane count will not be limited by the BW limit of
 * the tunnel, as opposed to the standard and extended DP_MAX_LANE_COUNT DPCD
 * registers.
 *
 * Returns the maximum lane count.
 */
int drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel)
{}
EXPORT_SYMBOL();

/**
 * drm_dp_tunnel_available_bw - Query the estimated total available BW of the tunnel
 * @tunnel: Tunnel object
 *
 * This function is used to query the estimated total available BW of the
 * tunnel. This includes the currently allocated and free BW for all the
 * tunnels in @tunnel's group. The available BW is valid only after the BW
 * allocation mode has been enabled for the tunnel and its state got updated
 * calling drm_dp_tunnel_update_state().
 *
 * Returns the @tunnel group's estimated total available bandwidth in kB/s
 * units, or -1 if the available BW isn't valid (the BW allocation mode is
 * not enabled or the tunnel's state hasn't been updated).
 */
int drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel)
{}
EXPORT_SYMBOL();

static struct drm_dp_tunnel_group_state *
drm_dp_tunnel_atomic_get_group_state(struct drm_atomic_state *state,
				     const struct drm_dp_tunnel *tunnel)
{}

static struct drm_dp_tunnel_state *
add_tunnel_state(struct drm_dp_tunnel_group_state *group_state,
		 struct drm_dp_tunnel *tunnel)
{}

static void free_tunnel_state(struct drm_dp_tunnel_state *tunnel_state)
{}

static void free_group_state(struct drm_dp_tunnel_group_state *group_state)
{}

static struct drm_dp_tunnel_state *
get_tunnel_state(struct drm_dp_tunnel_group_state *group_state,
		 const struct drm_dp_tunnel *tunnel)
{}

static struct drm_dp_tunnel_state *
get_or_add_tunnel_state(struct drm_dp_tunnel_group_state *group_state,
			struct drm_dp_tunnel *tunnel)
{}

static struct drm_private_state *
tunnel_group_duplicate_state(struct drm_private_obj *obj)
{}

static void tunnel_group_destroy_state(struct drm_private_obj *obj, struct drm_private_state *state)
{}

static const struct drm_private_state_funcs tunnel_group_funcs =;

/**
 * drm_dp_tunnel_atomic_get_state - get/allocate the new atomic state for a tunnel
 * @state: Atomic state
 * @tunnel: Tunnel to get the state for
 *
 * Get the new atomic state for @tunnel, duplicating it from the old tunnel
 * state if not yet allocated.
 *
 * Return the state or an ERR_PTR() error on failure.
 */
struct drm_dp_tunnel_state *
drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state,
			       struct drm_dp_tunnel *tunnel)
{}
EXPORT_SYMBOL();

/**
 * drm_dp_tunnel_atomic_get_old_state - get the old atomic state for a tunnel
 * @state: Atomic state
 * @tunnel: Tunnel to get the state for
 *
 * Get the old atomic state for @tunnel.
 *
 * Return the old state or NULL if the tunnel's atomic state is not in @state.
 */
struct drm_dp_tunnel_state *
drm_dp_tunnel_atomic_get_old_state(struct drm_atomic_state *state,
				   const struct drm_dp_tunnel *tunnel)
{}
EXPORT_SYMBOL();

/**
 * drm_dp_tunnel_atomic_get_new_state - get the new atomic state for a tunnel
 * @state: Atomic state
 * @tunnel: Tunnel to get the state for
 *
 * Get the new atomic state for @tunnel.
 *
 * Return the new state or NULL if the tunnel's atomic state is not in @state.
 */
struct drm_dp_tunnel_state *
drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state,
				   const struct drm_dp_tunnel *tunnel)
{}
EXPORT_SYMBOL();

static bool init_group(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_tunnel_group *group)
{}

static void cleanup_group(struct drm_dp_tunnel_group *group)
{}

#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG
static void check_unique_stream_ids(const struct drm_dp_tunnel_group_state *group_state)
{}
#else
static void check_unique_stream_ids(const struct drm_dp_tunnel_group_state *group_state)
{
}
#endif

static int stream_id_to_idx(u32 stream_mask, u8 stream_id)
{}

static int resize_bw_array(struct drm_dp_tunnel_state *tunnel_state,
			   unsigned long old_mask, unsigned long new_mask)
{}

static int set_stream_bw(struct drm_dp_tunnel_state *tunnel_state,
			 u8 stream_id, int bw)
{}

static int clear_stream_bw(struct drm_dp_tunnel_state *tunnel_state,
			   u8 stream_id)
{}

/**
 * drm_dp_tunnel_atomic_set_stream_bw - Set the BW for a DP tunnel stream
 * @state: Atomic state
 * @tunnel: DP tunnel containing the stream
 * @stream_id: Stream ID
 * @bw: BW of the stream
 *
 * Set a DP tunnel stream's required BW in the atomic state.
 *
 * Returns 0 in case of success, a negative error code otherwise.
 */
int drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state,
				       struct drm_dp_tunnel *tunnel,
				       u8 stream_id, int bw)
{}
EXPORT_SYMBOL();

/**
 * drm_dp_tunnel_atomic_get_required_bw - Get the BW required by a DP tunnel
 * @tunnel_state: Atomic state of the queried tunnel
 *
 * Calculate the BW required by a tunnel adding up the required BW of all
 * the streams in the tunnel.
 *
 * Return the total BW required by the tunnel.
 */
int drm_dp_tunnel_atomic_get_required_bw(const struct drm_dp_tunnel_state *tunnel_state)
{}
EXPORT_SYMBOL();

/**
 * drm_dp_tunnel_atomic_get_group_streams_in_state - Get mask of stream IDs in a group
 * @state: Atomic state
 * @tunnel: Tunnel object
 * @stream_mask: Mask of streams in @tunnel's group
 *
 * Get the mask of all the stream IDs in the tunnel group of @tunnel.
 *
 * Return 0 in case of success - with the stream IDs in @stream_mask - or a
 * negative error code in case of failure.
 */
int drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state,
						    const struct drm_dp_tunnel *tunnel,
						    u32 *stream_mask)
{}
EXPORT_SYMBOL();

static int
drm_dp_tunnel_atomic_check_group_bw(struct drm_dp_tunnel_group_state *new_group_state,
				    u32 *failed_stream_mask)
{}

/**
 * drm_dp_tunnel_atomic_check_stream_bws - Check BW limit for all streams in state
 * @state: Atomic state
 * @failed_stream_mask: Mask of stream IDs with a BW limit failure
 *
 * Check the required BW of each DP tunnel in @state against both the DPRX BW
 * limit of the tunnel and the BW limit of the tunnel group. Return a mask of
 * stream IDs in @failed_stream_mask once a check fails. The mask will contain
 * either all the streams in a tunnel (in case a DPRX BW limit check failed) or
 * all the streams in a tunnel group (in case a group BW limit check failed).
 *
 * Return 0 if all the BW limit checks passed, -ENOSPC in case a BW limit
 * check failed - with @failed_stream_mask containing the streams failing the
 * check - or a negative error code otherwise.
 */
int drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state,
					  u32 *failed_stream_mask)
{}
EXPORT_SYMBOL();

static void destroy_mgr(struct drm_dp_tunnel_mgr *mgr)
{}

/**
 * drm_dp_tunnel_mgr_create - Create a DP tunnel manager
 * @dev: DRM device object
 * @max_group_count: Maximum number of tunnel groups
 *
 * Creates a DP tunnel manager for @dev.
 *
 * Returns a pointer to the tunnel manager if created successfully or NULL in
 * case of an error.
 */
struct drm_dp_tunnel_mgr *
drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
{}
EXPORT_SYMBOL();

/**
 * drm_dp_tunnel_mgr_destroy - Destroy DP tunnel manager
 * @mgr: Tunnel manager object
 *
 * Destroy the tunnel manager.
 */
void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr)
{}
EXPORT_SYMBOL();