Commit 295654f7 authored by Imre Deak's avatar Imre Deak

drm/dp: Add support for DP tunneling

Add support for Display Port tunneling. For now this includes the
support for Bandwidth Allocation Mode (BWA), leaving adding Panel Replay
support for later.

BWA allows using displays that share the same (Thunderbolt) link with
their maximum resolution. Atm, this may not be possible due to the
coarse granularity of partitioning the link BW among the displays on the
link: the BW allocation policy is in a SW/FW/HW component on the link
(on Thunderbolt it's the SW or FW Connection Manager), independent of
the driver. This policy will set the DPRX maximum rate and lane count
DPCD registers the GFX driver will see (0x00000, 0x00001, 0x02200,
0x02201) based on the available link BW.

The granularity of the current BW allocation policy is coarse, based on
the required link rate in the 1.62Gbs..8.1Gbps range and it may prevent
using higher resolutions all together: the display connected first will
get a share of the link BW which corresponds to its full DPRX capability
(regardless of the actual mode it uses). A subsequent display connected
will only get the remaining BW, which could be well below its full
capability.

BWA solves the above coarse granularity (reducing it to a 250Mbs..1Gps
range) and first-come/first-served issues by letting the driver request
the BW for each display on a link which reflects the actual modes the
displays use.

This patch adds the DRM core helper functions, while a follow-up change
in the patchset takes them into use in the i915 driver.

v2:
- Fix prepare_to_wait vs. wake-up cond check order in
  allocate_tunnel_bw(). (Ville)
- Move tunnel==NULL checks from callers in drivers to here. (Ville)
- Avoid var inits in declaration blocks that can fail or have
  side-effects. (Ville)
- Use u8 for driver and group IDs. (Ville)
- Simplify API removing drm_dp_tunnel_get/put_untracked(). (Ville)
- Reuse str_yes_no() instead of a local yes_no_chr(). (Ville)
- s/drm_dp_tunnel_atomic_clear_state()/free_tunnel_state() and unexport
  the function. (Ville)
- s/clear_tunnel_group_state()/free_group_state() and move kfree() to
  this function. (Ville)
- Add separate group_free_bw() helper and describe what the tunnel
  estimated BW includes. (Ville)
- Improve help text for CONFIG_DRM_DISPLAY_DP_TUNNEL. (Ville)
- Add code comment explaining the purpose of DPCD reg read helpers.
  (Ville)
- Add code comment describing the tunnel group name prefix format.
  (Ville)
- Report the allocated BW as undetermined until the first allocation
  request.
- Skip allocation requests matching the previous request.
- Clear any stale BW request status flags before a new request.
- Add missing error return check of drm_dp_tunnel_atomic_get_group_state()
  in drm_dp_tunnel_atomic_set_stream_bw().
- Add drm_dp_tunnel_get_allocated_bw().
- s/drm_dp_tunnel_atomic_get_tunnel_bw/drm_dp_tunnel_atomic_get_required_bw
- Fix return value description in function doc of drm_dp_tunnel_detect().
- Add function documentation to all exported functions.

v3:
- Improve grouping of fields in drm_dp_tunnel_group struct. (Uma)
- Fix validating the BW granularity DPCD reg value. (Uma)
- Document return value of check_and_clear_status_change(). (Uma)
- Fix resetting drm_dp_tunnel_ref::tunnel in drm_dp_tunnel_ref_put().
  (Ville)
- Allow for ALLOCATED_BW to change after a BWA enable/disable sequence.

Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Reviewed-by: default avatarUma Shankar <uma.shankar@intel.com>
Reviewed-by: default avatarVille Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: default avatarImre Deak <imre.deak@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240226185246.1276018-2-imre.deak@intel.com
parent 1e59ab50
......@@ -17,6 +17,27 @@ config DRM_DISPLAY_DP_HELPER
help
DRM display helpers for DisplayPort.
config DRM_DISPLAY_DP_TUNNEL
bool
select DRM_DISPLAY_DP_HELPER
help
Enable support for DisplayPort tunnels. This allows drivers to use
DP tunnel features like the Bandwidth Allocation mode to maximize the
BW utilization for display streams on Thunderbolt links.
config DRM_DISPLAY_DEBUG_DP_TUNNEL_STATE
bool "Enable debugging the DP tunnel state"
depends on REF_TRACKER
depends on DRM_DISPLAY_DP_TUNNEL
depends on DEBUG_KERNEL
depends on EXPERT
help
Enables debugging the DP tunnel manager's state, including the
consistency of all managed tunnels' reference counting and the state of
streams contained in tunnels.
If in doubt, say "N".
config DRM_DISPLAY_HDCP_HELPER
bool
depends on DRM_DISPLAY_HELPER
......
......@@ -8,6 +8,8 @@ drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \
drm_dp_helper.o \
drm_dp_mst_topology.o \
drm_dsc_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_TUNNEL) += \
drm_dp_tunnel.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \
drm_hdmi_helper.o \
......
This diff is collapsed.
......@@ -1382,6 +1382,66 @@
#define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET 0x69494
#define DP_HDCP_2_2_REG_DBG_OFFSET 0x69518
/* DP-tunneling */
#define DP_TUNNELING_OUI 0xe0000
#define DP_TUNNELING_OUI_BYTES 3
#define DP_TUNNELING_DEV_ID 0xe0003
#define DP_TUNNELING_DEV_ID_BYTES 6
#define DP_TUNNELING_HW_REV 0xe0009
#define DP_TUNNELING_HW_REV_MAJOR_SHIFT 4
#define DP_TUNNELING_HW_REV_MAJOR_MASK (0xf << DP_TUNNELING_HW_REV_MAJOR_SHIFT)
#define DP_TUNNELING_HW_REV_MINOR_SHIFT 0
#define DP_TUNNELING_HW_REV_MINOR_MASK (0xf << DP_TUNNELING_HW_REV_MINOR_SHIFT)
#define DP_TUNNELING_SW_REV_MAJOR 0xe000a
#define DP_TUNNELING_SW_REV_MINOR 0xe000b
#define DP_TUNNELING_CAPABILITIES 0xe000d
#define DP_IN_BW_ALLOCATION_MODE_SUPPORT (1 << 7)
#define DP_PANEL_REPLAY_OPTIMIZATION_SUPPORT (1 << 6)
#define DP_TUNNELING_SUPPORT (1 << 0)
#define DP_IN_ADAPTER_INFO 0xe000e
#define DP_IN_ADAPTER_NUMBER_BITS 7
#define DP_IN_ADAPTER_NUMBER_MASK ((1 << DP_IN_ADAPTER_NUMBER_BITS) - 1)
#define DP_USB4_DRIVER_ID 0xe000f
#define DP_USB4_DRIVER_ID_BITS 4
#define DP_USB4_DRIVER_ID_MASK ((1 << DP_USB4_DRIVER_ID_BITS) - 1)
#define DP_USB4_DRIVER_BW_CAPABILITY 0xe0020
#define DP_USB4_DRIVER_BW_ALLOCATION_MODE_SUPPORT (1 << 7)
#define DP_IN_ADAPTER_TUNNEL_INFORMATION 0xe0021
#define DP_GROUP_ID_BITS 3
#define DP_GROUP_ID_MASK ((1 << DP_GROUP_ID_BITS) - 1)
#define DP_BW_GRANULARITY 0xe0022
#define DP_BW_GRANULARITY_MASK 0x3
#define DP_ESTIMATED_BW 0xe0023
#define DP_ALLOCATED_BW 0xe0024
#define DP_TUNNELING_STATUS 0xe0025
#define DP_BW_ALLOCATION_CAPABILITY_CHANGED (1 << 3)
#define DP_ESTIMATED_BW_CHANGED (1 << 2)
#define DP_BW_REQUEST_SUCCEEDED (1 << 1)
#define DP_BW_REQUEST_FAILED (1 << 0)
#define DP_TUNNELING_MAX_LINK_RATE 0xe0028
#define DP_TUNNELING_MAX_LANE_COUNT 0xe0029
#define DP_TUNNELING_MAX_LANE_COUNT_MASK 0x1f
#define DP_DPTX_BW_ALLOCATION_MODE_CONTROL 0xe0030
#define DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE (1 << 7)
#define DP_UNMASK_BW_ALLOCATION_IRQ (1 << 6)
#define DP_REQUEST_BW 0xe0031
#define MAX_DP_REQUEST_BW 255
/* LTTPR: Link Training (LT)-tunable PHY Repeaters */
#define DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV 0xf0000 /* 1.3 */
#define DP_MAX_LINK_RATE_PHY_REPEATER 0xf0001 /* 1.4a */
......
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2023 Intel Corporation
*/
#ifndef __DRM_DP_TUNNEL_H__
#define __DRM_DP_TUNNEL_H__
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/types.h>
struct drm_dp_aux;
struct drm_device;
struct drm_atomic_state;
struct drm_dp_tunnel_mgr;
struct drm_dp_tunnel_state;
struct ref_tracker;
struct drm_dp_tunnel_ref {
struct drm_dp_tunnel *tunnel;
struct ref_tracker *tracker;
};
#ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL
struct drm_dp_tunnel *
drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker);
void
drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker);
static inline void drm_dp_tunnel_ref_get(struct drm_dp_tunnel *tunnel,
struct drm_dp_tunnel_ref *tunnel_ref)
{
tunnel_ref->tunnel = drm_dp_tunnel_get(tunnel, &tunnel_ref->tracker);
}
static inline void drm_dp_tunnel_ref_put(struct drm_dp_tunnel_ref *tunnel_ref)
{
drm_dp_tunnel_put(tunnel_ref->tunnel, &tunnel_ref->tracker);
tunnel_ref->tunnel = NULL;
}
struct drm_dp_tunnel *
drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr,
struct drm_dp_aux *aux);
int drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel);
int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel);
int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel);
bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel);
int drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw);
int drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel *tunnel);
int drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel);
void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel);
int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr,
struct drm_dp_aux *aux);
int drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel);
int drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel);
int drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel);
const char *drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel);
struct drm_dp_tunnel_state *
drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state,
struct drm_dp_tunnel *tunnel);
struct drm_dp_tunnel_state *
drm_dp_tunnel_atomic_get_old_state(struct drm_atomic_state *state,
const struct drm_dp_tunnel *tunnel);
struct drm_dp_tunnel_state *
drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state,
const struct drm_dp_tunnel *tunnel);
int drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state,
struct drm_dp_tunnel *tunnel,
u8 stream_id, int bw);
int drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state,
const struct drm_dp_tunnel *tunnel,
u32 *stream_mask);
int drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state,
u32 *failed_stream_mask);
int drm_dp_tunnel_atomic_get_required_bw(const struct drm_dp_tunnel_state *tunnel_state);
struct drm_dp_tunnel_mgr *
drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count);
void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr);
#else
static inline struct drm_dp_tunnel *
drm_dp_tunnel_get(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker)
{
return NULL;
}
static inline void
drm_dp_tunnel_put(struct drm_dp_tunnel *tunnel, struct ref_tracker **tracker) {}
static inline void drm_dp_tunnel_ref_get(struct drm_dp_tunnel *tunnel,
struct drm_dp_tunnel_ref *tunnel_ref) {}
static inline void drm_dp_tunnel_ref_put(struct drm_dp_tunnel_ref *tunnel_ref) {}
static inline struct drm_dp_tunnel *
drm_dp_tunnel_detect(struct drm_dp_tunnel_mgr *mgr,
struct drm_dp_aux *aux)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline int
drm_dp_tunnel_destroy(struct drm_dp_tunnel *tunnel)
{
return 0;
}
static inline int drm_dp_tunnel_enable_bw_alloc(struct drm_dp_tunnel *tunnel)
{
return -EOPNOTSUPP;
}
static inline int drm_dp_tunnel_disable_bw_alloc(struct drm_dp_tunnel *tunnel)
{
return -EOPNOTSUPP;
}
static inline bool drm_dp_tunnel_bw_alloc_is_enabled(const struct drm_dp_tunnel *tunnel)
{
return false;
}
static inline int
drm_dp_tunnel_alloc_bw(struct drm_dp_tunnel *tunnel, int bw)
{
return -EOPNOTSUPP;
}
static inline int
drm_dp_tunnel_get_allocated_bw(struct drm_dp_tunnel *tunnel)
{
return -1;
}
static inline int
drm_dp_tunnel_update_state(struct drm_dp_tunnel *tunnel)
{
return -EOPNOTSUPP;
}
static inline void drm_dp_tunnel_set_io_error(struct drm_dp_tunnel *tunnel) {}
static inline int
drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr,
struct drm_dp_aux *aux)
{
return -EOPNOTSUPP;
}
static inline int
drm_dp_tunnel_max_dprx_rate(const struct drm_dp_tunnel *tunnel)
{
return 0;
}
static inline int
drm_dp_tunnel_max_dprx_lane_count(const struct drm_dp_tunnel *tunnel)
{
return 0;
}
static inline int
drm_dp_tunnel_available_bw(const struct drm_dp_tunnel *tunnel)
{
return -1;
}
static inline const char *
drm_dp_tunnel_name(const struct drm_dp_tunnel *tunnel)
{
return NULL;
}
static inline struct drm_dp_tunnel_state *
drm_dp_tunnel_atomic_get_state(struct drm_atomic_state *state,
struct drm_dp_tunnel *tunnel)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline struct drm_dp_tunnel_state *
drm_dp_tunnel_atomic_get_new_state(struct drm_atomic_state *state,
const struct drm_dp_tunnel *tunnel)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline int
drm_dp_tunnel_atomic_set_stream_bw(struct drm_atomic_state *state,
struct drm_dp_tunnel *tunnel,
u8 stream_id, int bw)
{
return -EOPNOTSUPP;
}
static inline int
drm_dp_tunnel_atomic_get_group_streams_in_state(struct drm_atomic_state *state,
const struct drm_dp_tunnel *tunnel,
u32 *stream_mask)
{
return -EOPNOTSUPP;
}
static inline int
drm_dp_tunnel_atomic_check_stream_bws(struct drm_atomic_state *state,
u32 *failed_stream_mask)
{
return -EOPNOTSUPP;
}
static inline int
drm_dp_tunnel_atomic_get_required_bw(const struct drm_dp_tunnel_state *tunnel_state)
{
return 0;
}
static inline struct drm_dp_tunnel_mgr *
drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count)
{
return ERR_PTR(-EOPNOTSUPP);
}
static inline
void drm_dp_tunnel_mgr_destroy(struct drm_dp_tunnel_mgr *mgr) {}
#endif /* CONFIG_DRM_DISPLAY_DP_TUNNEL */
#endif /* __DRM_DP_TUNNEL_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment