Commit 047d4cd2 authored by Lyude Paul's avatar Lyude Paul

drm/dp_mst: Rewrite and fix bandwidth limit checks

Sigh, this is mostly my fault for not giving commit cd82d82c
("drm/dp_mst: Add branch bandwidth validation to MST atomic check")
enough scrutiny during review. The way we're checking bandwidth
limitations here is mostly wrong:

For starters, drm_dp_mst_atomic_check_bw_limit() determines the
pbn_limit of a branch by simply scanning each port on the current branch
device, then uses the last non-zero full_pbn value that it finds. It
then counts the sum of the PBN used on each branch device for that
level, and compares against the full_pbn value it found before.

This is wrong because ports can and will have different PBN limitations
on many hubs, especially since a number of DisplayPort hubs out there
will be clever and only use the smallest link rate required for each
downstream sink - potentially giving every port a different full_pbn
value depending on what link rate it's trained at. This means with our
current code, which max PBN value we end up with is not well defined.

Additionally, we also need to remember when checking bandwidth
limitations that the top-most device in any MST topology is a branch
device, not a port. This means that the first level of a topology
doesn't technically have a full_pbn value that needs to be checked.
Instead, we should assume that so long as our VCPI allocations fit we're
within the bandwidth limitations of the primary MSTB.

We do however, want to check full_pbn on every port including those of
the primary MSTB. However, it's important to keep in mind that this
value represents the minimum link rate /between a port's sink or mstb,
and the mstb itself/. A quick diagram to explain:

                                MSTB #1
                               /       \
                              /         \
                           Port #1    Port #2
       full_pbn for Port #1 → |          | ← full_pbn for Port #2
                           Sink #1    MSTB #2
                                         |
                                       etc...

Note that in the above diagram, the combined PBN from all VCPI
allocations on said hub should not exceed the full_pbn value of port #2,
and the display configuration on sink #1 should not exceed the full_pbn
value of port #1. However, port #1 and port #2 can otherwise consume as
much bandwidth as they want so long as their VCPI allocations still fit.

And finally - our current bandwidth checking code also makes the mistake
of not checking whether something is an end device or not before trying
to traverse down it.

So, let's fix it by rewriting our bandwidth checking helpers. We split
the function into one part for handling branches which simply adds up
the total PBN on each branch and returns it, and one for checking each
port to ensure we're not going over its PBN limit. Phew.

This should fix regressions seen, where we erroneously reject display
configurations due to thinking they're going over our bandwidth limits
when they're not.

Changes since v1:
* Took an even closer look at how PBN limitations are supposed to be
  handled, and did some experimenting with Sean Paul. Ended up rewriting
  these helpers again, but this time they should actually be correct!
Changes since v2:
* Small indenting fix
* Fix pbn_used check in drm_dp_mst_atomic_check_port_bw_limit()
Signed-off-by: default avatarLyude Paul <lyude@redhat.com>
Fixes: cd82d82c ("drm/dp_mst: Add branch bandwidth validation to MST atomic check")
Cc: Sean Paul <seanpaul@google.com>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Reviewed-by: default avatarMikita Lipski <mikita.lipski@amd.com>
Tested-by: default avatarHans de Goede <hdegoede@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200309210131.1497545-1-lyude@redhat.com
parent 87212b51
...@@ -4830,41 +4830,102 @@ static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port, ...@@ -4830,41 +4830,102 @@ static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
return false; return false;
} }
static inline static int
int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch, drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
struct drm_dp_mst_topology_state *mst_state) struct drm_dp_mst_topology_state *state);
static int
drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_topology_state *state)
{ {
struct drm_dp_mst_port *port;
struct drm_dp_vcpi_allocation *vcpi; struct drm_dp_vcpi_allocation *vcpi;
int pbn_limit = 0, pbn_used = 0; struct drm_dp_mst_port *port;
int pbn_used = 0, ret;
bool found = false;
list_for_each_entry(port, &branch->ports, next) { /* Check that we have at least one port in our state that's downstream
if (port->mstb) * of this branch, otherwise we can skip this branch
if (drm_dp_mst_atomic_check_bw_limit(port->mstb, mst_state)) */
return -ENOSPC; list_for_each_entry(vcpi, &state->vcpis, next) {
if (!vcpi->pbn ||
!drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb))
continue;
if (port->full_pbn > 0) found = true;
pbn_limit = port->full_pbn; break;
} }
DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch has %d PBN available\n", if (!found)
branch, pbn_limit); return 0;
list_for_each_entry(vcpi, &mst_state->vcpis, next) { if (mstb->port_parent)
if (!vcpi->pbn) DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
continue; mstb->port_parent->parent, mstb->port_parent,
mstb);
else
DRM_DEBUG_ATOMIC("[MSTB:%p] Checking bandwidth limits\n",
mstb);
list_for_each_entry(port, &mstb->ports, next) {
ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
if (ret < 0)
return ret;
if (drm_dp_mst_port_downstream_of_branch(vcpi->port, branch)) pbn_used += ret;
pbn_used += vcpi->pbn;
} }
DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch used %d PBN\n",
branch, pbn_used);
if (pbn_used > pbn_limit) { return pbn_used;
DRM_DEBUG_ATOMIC("[MST BRANCH:%p] No available bandwidth\n", }
branch);
static int
drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
struct drm_dp_mst_topology_state *state)
{
struct drm_dp_vcpi_allocation *vcpi;
int pbn_used = 0;
if (port->pdt == DP_PEER_DEVICE_NONE)
return 0;
if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
bool found = false;
list_for_each_entry(vcpi, &state->vcpis, next) {
if (vcpi->port != port)
continue;
if (!vcpi->pbn)
return 0;
found = true;
break;
}
if (!found)
return 0;
/* This should never happen, as it means we tried to
* set a mode before querying the full_pbn
*/
if (WARN_ON(!port->full_pbn))
return -EINVAL;
pbn_used = vcpi->pbn;
} else {
pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
state);
if (pbn_used <= 0)
return pbn_used;
}
if (pbn_used > port->full_pbn) {
DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
port->parent, port, pbn_used,
port->full_pbn);
return -ENOSPC; return -ENOSPC;
} }
return 0;
DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
port->parent, port, pbn_used, port->full_pbn);
return pbn_used;
} }
static inline int static inline int
...@@ -5062,9 +5123,15 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state) ...@@ -5062,9 +5123,15 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state); ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
if (ret) if (ret)
break; break;
ret = drm_dp_mst_atomic_check_bw_limit(mgr->mst_primary, mst_state);
if (ret) mutex_lock(&mgr->lock);
ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
mst_state);
mutex_unlock(&mgr->lock);
if (ret < 0)
break; break;
else
ret = 0;
} }
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment