Commit 5aa1dfcd authored by Wayne Lin's avatar Wayne Lin Committed by Lyude Paul

drm/mst: Refactor the flow for payload allocation/removement

[Why]
Today, the allocation/deallocation steps and status is a bit unclear.

For instance, payload->vc_start_slot = -1 stands for "the failure of
updating DPCD payload ID table" and can also represent as "payload is not
allocated yet". These two cases should be handled differently and hence
better to distinguish them for better understanding.

[How]
Define enumeration - ALLOCATION_LOCAL, ALLOCATION_DFP and ALLOCATION_REMOTE
to distinguish different allocation status. Adjust the code to handle
different status accordingly for better understanding the sequence of
payload allocation and payload removement.

For payload creation, the procedure should look like this:
DRM part 1:
* step 1 - update sw mst mgr variables to add a new payload
* step 2 - add payload at immediate DFP DPCD payload table

Driver:
* Add new payload in HW and sync up with DFP by sending ACT

DRM Part 2:
* Send ALLOCATE_PAYLOAD sideband message to allocate bandwidth along the
  virtual channel.

And as for payload removement, the procedure should look like this:
DRM part 1:
* step 1 - Send ALLOCATE_PAYLOAD sideband message to release bandwidth
           along the virtual channel
* step 2 - Clear payload allocation at immediate DFP DPCD payload table

Driver:
* Remove the payload in HW and sync up with DFP by sending ACT

DRM part 2:
* update sw mst mgr variables to remove the payload

Note that it's fine to fail when communicate with the branch device
connected at immediate downstrean-facing port, but updating variables of
SW mst mgr and HW configuration should be conducted anyway. That's because
it's under commit_tail and we need to complete the HW programming.

Changes since v1:
* Remove the set but not use variable 'old_payload' in function
  'nv50_msto_prepare'. Catched by kernel test robot <lkp@intel.com>
Signed-off-by: default avatarWayne Lin <Wayne.Lin@amd.com>
Signed-off-by: default avatarLyude Paul <lyude@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230807025639.1612361-3-Wayne.Lin@amd.com
parent ae4d2314
...@@ -219,7 +219,7 @@ static void dm_helpers_construct_old_payload( ...@@ -219,7 +219,7 @@ static void dm_helpers_construct_old_payload(
/* Set correct time_slots/PBN of old payload. /* Set correct time_slots/PBN of old payload.
* other fields (delete & dsc_enabled) in * other fields (delete & dsc_enabled) in
* struct drm_dp_mst_atomic_payload are don't care fields * struct drm_dp_mst_atomic_payload are don't care fields
* while calling drm_dp_remove_payload() * while calling drm_dp_remove_payload_part2()
*/ */
for (i = 0; i < current_link_table.stream_count; i++) { for (i = 0; i < current_link_table.stream_count; i++) {
dc_alloc = dc_alloc =
...@@ -262,13 +262,12 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( ...@@ -262,13 +262,12 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
mst_mgr = &aconnector->mst_root->mst_mgr; mst_mgr = &aconnector->mst_root->mst_mgr;
mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
/* It's OK for this to fail */
new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
if (enable) { if (enable) {
target_payload = new_payload; target_payload = new_payload;
/* It's OK for this to fail */
drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload); drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload);
} else { } else {
/* construct old payload by VCPI*/ /* construct old payload by VCPI*/
...@@ -276,7 +275,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( ...@@ -276,7 +275,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
new_payload, &old_payload); new_payload, &old_payload);
target_payload = &old_payload; target_payload = &old_payload;
drm_dp_remove_payload(mst_mgr, mst_state, &old_payload, new_payload); drm_dp_remove_payload_part1(mst_mgr, mst_state, new_payload);
} }
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
...@@ -342,7 +341,7 @@ bool dm_helpers_dp_mst_send_payload_allocation( ...@@ -342,7 +341,7 @@ bool dm_helpers_dp_mst_send_payload_allocation(
struct amdgpu_dm_connector *aconnector; struct amdgpu_dm_connector *aconnector;
struct drm_dp_mst_topology_state *mst_state; struct drm_dp_mst_topology_state *mst_state;
struct drm_dp_mst_topology_mgr *mst_mgr; struct drm_dp_mst_topology_mgr *mst_mgr;
struct drm_dp_mst_atomic_payload *payload; struct drm_dp_mst_atomic_payload *new_payload, *old_payload;
enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD; enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD;
enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD; enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
int ret = 0; int ret = 0;
...@@ -355,15 +354,20 @@ bool dm_helpers_dp_mst_send_payload_allocation( ...@@ -355,15 +354,20 @@ bool dm_helpers_dp_mst_send_payload_allocation(
mst_mgr = &aconnector->mst_root->mst_mgr; mst_mgr = &aconnector->mst_root->mst_mgr;
mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state);
payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port);
if (!enable) { if (!enable) {
set_flag = MST_CLEAR_ALLOCATED_PAYLOAD; set_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
clr_flag = MST_ALLOCATE_NEW_PAYLOAD; clr_flag = MST_ALLOCATE_NEW_PAYLOAD;
} }
if (enable) if (enable) {
ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, payload); ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload);
} else {
dm_helpers_construct_old_payload(stream->link, mst_state->pbn_div,
new_payload, old_payload);
drm_dp_remove_payload_part2(mst_mgr, mst_state, old_payload, new_payload);
}
if (ret) { if (ret) {
amdgpu_dm_set_mst_status(&aconnector->mst_status, amdgpu_dm_set_mst_status(&aconnector->mst_status,
......
...@@ -3255,15 +3255,15 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr, ...@@ -3255,15 +3255,15 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr,
} }
EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status); EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr, static int drm_dp_create_payload_at_dfp(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_atomic_payload *payload) struct drm_dp_mst_atomic_payload *payload)
{ {
return drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, return drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot,
payload->time_slots); payload->time_slots);
} }
static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr, static int drm_dp_create_payload_to_remote(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_atomic_payload *payload) struct drm_dp_mst_atomic_payload *payload)
{ {
int ret; int ret;
struct drm_dp_mst_port *port = drm_dp_mst_topology_get_port_validated(mgr, payload->port); struct drm_dp_mst_port *port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
...@@ -3276,17 +3276,20 @@ static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr, ...@@ -3276,17 +3276,20 @@ static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
return ret; return ret;
} }
static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr, static void drm_dp_destroy_payload_at_remote_and_dfp(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_state *mst_state, struct drm_dp_mst_topology_state *mst_state,
struct drm_dp_mst_atomic_payload *payload) struct drm_dp_mst_atomic_payload *payload)
{ {
drm_dbg_kms(mgr->dev, "\n"); drm_dbg_kms(mgr->dev, "\n");
/* it's okay for these to fail */ /* it's okay for these to fail */
drm_dp_payload_send_msg(mgr, payload->port, payload->vcpi, 0); if (payload->payload_allocation_status == DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE) {
drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, 0); drm_dp_payload_send_msg(mgr, payload->port, payload->vcpi, 0);
payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_DFP;
}
return 0; if (payload->payload_allocation_status == DRM_DP_MST_PAYLOAD_ALLOCATION_DFP)
drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, 0);
} }
/** /**
...@@ -3296,81 +3299,105 @@ static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr, ...@@ -3296,81 +3299,105 @@ static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
* @payload: The payload to write * @payload: The payload to write
* *
* Determines the starting time slot for the given payload, and programs the VCPI for this payload * Determines the starting time slot for the given payload, and programs the VCPI for this payload
* into hardware. After calling this, the driver should generate ACT and payload packets. * into the DPCD of DPRX. After calling this, the driver should generate ACT and payload packets.
* *
* Returns: 0 on success, error code on failure. In the event that this fails, * Returns: 0 on success, error code on failure.
* @payload.vc_start_slot will also be set to -1.
*/ */
int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_state *mst_state, struct drm_dp_mst_topology_state *mst_state,
struct drm_dp_mst_atomic_payload *payload) struct drm_dp_mst_atomic_payload *payload)
{ {
struct drm_dp_mst_port *port; struct drm_dp_mst_port *port;
int ret; int ret = 0;
bool allocate = true;
/* Update mst mgr info */
if (mgr->payload_count == 0)
mgr->next_start_slot = mst_state->start_slot;
payload->vc_start_slot = mgr->next_start_slot;
mgr->payload_count++;
mgr->next_start_slot += payload->time_slots;
/* Allocate payload to immediate downstream facing port */
port = drm_dp_mst_topology_get_port_validated(mgr, payload->port); port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
if (!port) { if (!port) {
drm_dbg_kms(mgr->dev, drm_dbg_kms(mgr->dev,
"VCPI %d for port %p not in topology, not creating a payload\n", "VCPI %d for port %p not in topology, not creating a payload to remote\n",
payload->vcpi, payload->port); payload->vcpi, payload->port);
payload->vc_start_slot = -1; allocate = false;
return 0;
} }
if (mgr->payload_count == 0) if (allocate) {
mgr->next_start_slot = mst_state->start_slot; ret = drm_dp_create_payload_at_dfp(mgr, payload);
if (ret < 0)
payload->vc_start_slot = mgr->next_start_slot; drm_warn(mgr->dev, "Failed to create MST payload for port %p: %d\n",
payload->port, ret);
ret = drm_dp_create_payload_step1(mgr, payload);
drm_dp_mst_topology_put_port(port);
if (ret < 0) {
drm_warn(mgr->dev, "Failed to create MST payload for port %p: %d\n",
payload->port, ret);
payload->vc_start_slot = -1;
return ret;
} }
mgr->payload_count++; payload->payload_allocation_status =
mgr->next_start_slot += payload->time_slots; (!allocate || ret < 0) ? DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL :
DRM_DP_MST_PAYLOAD_ALLOCATION_DFP;
return 0; drm_dp_mst_topology_put_port(port);
return ret;
} }
EXPORT_SYMBOL(drm_dp_add_payload_part1); EXPORT_SYMBOL(drm_dp_add_payload_part1);
/** /**
* drm_dp_remove_payload() - Remove an MST payload * drm_dp_remove_payload_part1() - Remove an MST payload along the virtual channel
* @mgr: Manager to use. * @mgr: Manager to use.
* @mst_state: The MST atomic state * @mst_state: The MST atomic state
* @old_payload: The payload with its old state * @payload: The payload to remove
* @new_payload: The payload to write
* *
* Removes a payload from an MST topology if it was successfully assigned a start slot. Also updates * Removes a payload along the virtual channel if it was successfully allocated.
* the starting time slots of all other payloads which would have been shifted towards the start of * After calling this, the driver should set HW to generate ACT and then switch to new
* the VC table as a result. After calling this, the driver should generate ACT and payload packets. * payload allocation state.
*/ */
void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr, void drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_state *mst_state, struct drm_dp_mst_topology_state *mst_state,
const struct drm_dp_mst_atomic_payload *old_payload, struct drm_dp_mst_atomic_payload *payload)
struct drm_dp_mst_atomic_payload *new_payload)
{ {
struct drm_dp_mst_atomic_payload *pos; /* Remove remote payload allocation */
bool send_remove = false; bool send_remove = false;
/* We failed to make the payload, so nothing to do */
if (new_payload->vc_start_slot == -1)
return;
mutex_lock(&mgr->lock); mutex_lock(&mgr->lock);
send_remove = drm_dp_mst_port_downstream_of_branch(new_payload->port, mgr->mst_primary); send_remove = drm_dp_mst_port_downstream_of_branch(payload->port, mgr->mst_primary);
mutex_unlock(&mgr->lock); mutex_unlock(&mgr->lock);
if (send_remove) if (send_remove)
drm_dp_destroy_payload_step1(mgr, mst_state, new_payload); drm_dp_destroy_payload_at_remote_and_dfp(mgr, mst_state, payload);
else else
drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n", drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n",
new_payload->vcpi); payload->vcpi);
payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL;
}
EXPORT_SYMBOL(drm_dp_remove_payload_part1);
/**
* drm_dp_remove_payload_part2() - Remove an MST payload locally
* @mgr: Manager to use.
* @mst_state: The MST atomic state
* @old_payload: The payload with its old state
* @new_payload: The payload with its latest state
*
* Updates the starting time slots of all other payloads which would have been shifted towards
* the start of the payload ID table as a result of removing a payload. Driver should call this
* function whenever it removes a payload in its HW. It's independent to the result of payload
* allocation/deallocation at branch devices along the virtual channel.
*/
void drm_dp_remove_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_state *mst_state,
const struct drm_dp_mst_atomic_payload *old_payload,
struct drm_dp_mst_atomic_payload *new_payload)
{
struct drm_dp_mst_atomic_payload *pos;
/* Remove local payload allocation */
list_for_each_entry(pos, &mst_state->payloads, next) { list_for_each_entry(pos, &mst_state->payloads, next) {
if (pos != new_payload && pos->vc_start_slot > new_payload->vc_start_slot) if (pos != new_payload && pos->vc_start_slot > new_payload->vc_start_slot)
pos->vc_start_slot -= old_payload->time_slots; pos->vc_start_slot -= old_payload->time_slots;
...@@ -3382,9 +3409,10 @@ void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr, ...@@ -3382,9 +3409,10 @@ void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr,
if (new_payload->delete) if (new_payload->delete)
drm_dp_mst_put_port_malloc(new_payload->port); drm_dp_mst_put_port_malloc(new_payload->port);
}
EXPORT_SYMBOL(drm_dp_remove_payload);
new_payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_NONE;
}
EXPORT_SYMBOL(drm_dp_remove_payload_part2);
/** /**
* drm_dp_add_payload_part2() - Execute payload update part 2 * drm_dp_add_payload_part2() - Execute payload update part 2
* @mgr: Manager to use. * @mgr: Manager to use.
...@@ -3403,17 +3431,19 @@ int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr, ...@@ -3403,17 +3431,19 @@ int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
int ret = 0; int ret = 0;
/* Skip failed payloads */ /* Skip failed payloads */
if (payload->vc_start_slot == -1) { if (payload->payload_allocation_status != DRM_DP_MST_PAYLOAD_ALLOCATION_DFP) {
drm_dbg_kms(mgr->dev, "Part 1 of payload creation for %s failed, skipping part 2\n", drm_dbg_kms(state->dev, "Part 1 of payload creation for %s failed, skipping part 2\n",
payload->port->connector->name); payload->port->connector->name);
return -EIO; return -EIO;
} }
ret = drm_dp_create_payload_step2(mgr, payload); /* Allocate payload to remote end */
if (ret < 0) { ret = drm_dp_create_payload_to_remote(mgr, payload);
if (ret < 0)
drm_err(mgr->dev, "Step 2 of creating MST payload for %p failed: %d\n", drm_err(mgr->dev, "Step 2 of creating MST payload for %p failed: %d\n",
payload->port, ret); payload->port, ret);
} else
payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE;
return ret; return ret;
} }
...@@ -4324,6 +4354,7 @@ int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state, ...@@ -4324,6 +4354,7 @@ int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state,
drm_dp_mst_get_port_malloc(port); drm_dp_mst_get_port_malloc(port);
payload->port = port; payload->port = port;
payload->vc_start_slot = -1; payload->vc_start_slot = -1;
payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_NONE;
list_add(&payload->next, &topology_state->payloads); list_add(&payload->next, &topology_state->payloads);
} }
payload->time_slots = req_slots; payload->time_slots = req_slots;
...@@ -4493,7 +4524,7 @@ void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state) ...@@ -4493,7 +4524,7 @@ void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state)
} }
/* Now that previous state is committed, it's safe to copy over the start slot /* Now that previous state is committed, it's safe to copy over the start slot
* assignments * and allocation status assignments
*/ */
list_for_each_entry(old_payload, &old_mst_state->payloads, next) { list_for_each_entry(old_payload, &old_mst_state->payloads, next) {
if (old_payload->delete) if (old_payload->delete)
...@@ -4502,6 +4533,8 @@ void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state) ...@@ -4502,6 +4533,8 @@ void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state)
new_payload = drm_atomic_get_mst_payload_state(new_mst_state, new_payload = drm_atomic_get_mst_payload_state(new_mst_state,
old_payload->port); old_payload->port);
new_payload->vc_start_slot = old_payload->vc_start_slot; new_payload->vc_start_slot = old_payload->vc_start_slot;
new_payload->payload_allocation_status =
old_payload->payload_allocation_status;
} }
} }
} }
...@@ -4818,6 +4851,13 @@ void drm_dp_mst_dump_topology(struct seq_file *m, ...@@ -4818,6 +4851,13 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
struct drm_dp_mst_atomic_payload *payload; struct drm_dp_mst_atomic_payload *payload;
int i, ret; int i, ret;
static const char *const status[] = {
"None",
"Local",
"DFP",
"Remote",
};
mutex_lock(&mgr->lock); mutex_lock(&mgr->lock);
if (mgr->mst_primary) if (mgr->mst_primary)
drm_dp_mst_dump_mstb(m, mgr->mst_primary); drm_dp_mst_dump_mstb(m, mgr->mst_primary);
...@@ -4834,7 +4874,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m, ...@@ -4834,7 +4874,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
seq_printf(m, "payload_mask: %x, max_payloads: %d, start_slot: %u, pbn_div: %d\n", seq_printf(m, "payload_mask: %x, max_payloads: %d, start_slot: %u, pbn_div: %d\n",
state->payload_mask, mgr->max_payloads, state->start_slot, state->pbn_div); state->payload_mask, mgr->max_payloads, state->start_slot, state->pbn_div);
seq_printf(m, "\n| idx | port | vcpi | slots | pbn | dsc | sink name |\n"); seq_printf(m, "\n| idx | port | vcpi | slots | pbn | dsc | status | sink name |\n");
for (i = 0; i < mgr->max_payloads; i++) { for (i = 0; i < mgr->max_payloads; i++) {
list_for_each_entry(payload, &state->payloads, next) { list_for_each_entry(payload, &state->payloads, next) {
char name[14]; char name[14];
...@@ -4843,7 +4883,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m, ...@@ -4843,7 +4883,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
continue; continue;
fetch_monitor_name(mgr, payload->port, name, sizeof(name)); fetch_monitor_name(mgr, payload->port, name, sizeof(name));
seq_printf(m, " %5d %6d %6d %02d - %02d %5d %5s %19s\n", seq_printf(m, " %5d %6d %6d %02d - %02d %5d %5s %8s %19s\n",
i, i,
payload->port->port_num, payload->port->port_num,
payload->vcpi, payload->vcpi,
...@@ -4851,6 +4891,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m, ...@@ -4851,6 +4891,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
payload->vc_start_slot + payload->time_slots - 1, payload->vc_start_slot + payload->time_slots - 1,
payload->pbn, payload->pbn,
payload->dsc_enabled ? "Y" : "N", payload->dsc_enabled ? "Y" : "N",
status[payload->payload_allocation_status],
(*name != 0) ? name : "Unknown"); (*name != 0) ? name : "Unknown");
} }
} }
......
...@@ -557,12 +557,8 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state, ...@@ -557,12 +557,8 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
struct intel_dp *intel_dp = &dig_port->dp; struct intel_dp *intel_dp = &dig_port->dp;
struct intel_connector *connector = struct intel_connector *connector =
to_intel_connector(old_conn_state->connector); to_intel_connector(old_conn_state->connector);
struct drm_dp_mst_topology_state *old_mst_state =
drm_atomic_get_old_mst_topology_state(&state->base, &intel_dp->mst_mgr);
struct drm_dp_mst_topology_state *new_mst_state = struct drm_dp_mst_topology_state *new_mst_state =
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr); drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
const struct drm_dp_mst_atomic_payload *old_payload =
drm_atomic_get_mst_payload_state(old_mst_state, connector->port);
struct drm_dp_mst_atomic_payload *new_payload = struct drm_dp_mst_atomic_payload *new_payload =
drm_atomic_get_mst_payload_state(new_mst_state, connector->port); drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
struct drm_i915_private *i915 = to_i915(connector->base.dev); struct drm_i915_private *i915 = to_i915(connector->base.dev);
...@@ -572,8 +568,7 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state, ...@@ -572,8 +568,7 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
intel_hdcp_disable(intel_mst->connector); intel_hdcp_disable(intel_mst->connector);
drm_dp_remove_payload(&intel_dp->mst_mgr, new_mst_state, drm_dp_remove_payload_part1(&intel_dp->mst_mgr, new_mst_state, new_payload);
old_payload, new_payload);
intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state);
} }
...@@ -588,6 +583,14 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, ...@@ -588,6 +583,14 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
struct intel_dp *intel_dp = &dig_port->dp; struct intel_dp *intel_dp = &dig_port->dp;
struct intel_connector *connector = struct intel_connector *connector =
to_intel_connector(old_conn_state->connector); to_intel_connector(old_conn_state->connector);
struct drm_dp_mst_topology_state *old_mst_state =
drm_atomic_get_old_mst_topology_state(&state->base, &intel_dp->mst_mgr);
struct drm_dp_mst_topology_state *new_mst_state =
drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr);
const struct drm_dp_mst_atomic_payload *old_payload =
drm_atomic_get_mst_payload_state(old_mst_state, connector->port);
struct drm_dp_mst_atomic_payload *new_payload =
drm_atomic_get_mst_payload_state(new_mst_state, connector->port);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
bool last_mst_stream; bool last_mst_stream;
...@@ -608,6 +611,9 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, ...@@ -608,6 +611,9 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
wait_for_act_sent(encoder, old_crtc_state); wait_for_act_sent(encoder, old_crtc_state);
drm_dp_remove_payload_part2(&intel_dp->mst_mgr, new_mst_state,
old_payload, new_payload);
intel_ddi_disable_transcoder_func(old_crtc_state); intel_ddi_disable_transcoder_func(old_crtc_state);
if (DISPLAY_VER(dev_priv) >= 9) if (DISPLAY_VER(dev_priv) >= 9)
......
...@@ -882,21 +882,26 @@ struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder) ...@@ -882,21 +882,26 @@ struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder)
static void static void
nv50_msto_cleanup(struct drm_atomic_state *state, nv50_msto_cleanup(struct drm_atomic_state *state,
struct drm_dp_mst_topology_state *mst_state, struct drm_dp_mst_topology_state *new_mst_state,
struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_topology_mgr *mgr,
struct nv50_msto *msto) struct nv50_msto *msto)
{ {
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev); struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
struct drm_dp_mst_atomic_payload *payload = struct drm_dp_mst_atomic_payload *new_payload =
drm_atomic_get_mst_payload_state(mst_state, msto->mstc->port); drm_atomic_get_mst_payload_state(new_mst_state, msto->mstc->port);
struct drm_dp_mst_topology_state *old_mst_state =
drm_atomic_get_old_mst_topology_state(state, mgr);
const struct drm_dp_mst_atomic_payload *old_payload =
drm_atomic_get_mst_payload_state(old_mst_state, msto->mstc->port);
NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name); NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
if (msto->disabled) { if (msto->disabled) {
msto->mstc = NULL; msto->mstc = NULL;
msto->disabled = false; msto->disabled = false;
drm_dp_remove_payload_part2(mgr, new_mst_state, old_payload, new_payload);
} else if (msto->enabled) { } else if (msto->enabled) {
drm_dp_add_payload_part2(mgr, state, payload); drm_dp_add_payload_part2(mgr, state, new_payload);
msto->enabled = false; msto->enabled = false;
} }
} }
...@@ -910,19 +915,15 @@ nv50_msto_prepare(struct drm_atomic_state *state, ...@@ -910,19 +915,15 @@ nv50_msto_prepare(struct drm_atomic_state *state,
struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev); struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev);
struct nv50_mstc *mstc = msto->mstc; struct nv50_mstc *mstc = msto->mstc;
struct nv50_mstm *mstm = mstc->mstm; struct nv50_mstm *mstm = mstc->mstm;
struct drm_dp_mst_topology_state *old_mst_state; struct drm_dp_mst_atomic_payload *payload;
struct drm_dp_mst_atomic_payload *payload, *old_payload;
NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name); NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
old_mst_state = drm_atomic_get_old_mst_topology_state(state, mgr);
payload = drm_atomic_get_mst_payload_state(mst_state, mstc->port); payload = drm_atomic_get_mst_payload_state(mst_state, mstc->port);
old_payload = drm_atomic_get_mst_payload_state(old_mst_state, mstc->port);
// TODO: Figure out if we want to do a better job of handling VCPI allocation failures here? // TODO: Figure out if we want to do a better job of handling VCPI allocation failures here?
if (msto->disabled) { if (msto->disabled) {
drm_dp_remove_payload(mgr, mst_state, old_payload, payload); drm_dp_remove_payload_part1(mgr, mst_state, payload);
nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0); nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0);
} else { } else {
......
...@@ -46,6 +46,13 @@ struct drm_dp_mst_topology_ref_history { ...@@ -46,6 +46,13 @@ struct drm_dp_mst_topology_ref_history {
}; };
#endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */ #endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */
enum drm_dp_mst_payload_allocation {
DRM_DP_MST_PAYLOAD_ALLOCATION_NONE,
DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL,
DRM_DP_MST_PAYLOAD_ALLOCATION_DFP,
DRM_DP_MST_PAYLOAD_ALLOCATION_REMOTE,
};
struct drm_dp_mst_branch; struct drm_dp_mst_branch;
/** /**
...@@ -537,7 +544,7 @@ struct drm_dp_mst_atomic_payload { ...@@ -537,7 +544,7 @@ struct drm_dp_mst_atomic_payload {
* drm_dp_mst_atomic_wait_for_dependencies() has been called, which will ensure the * drm_dp_mst_atomic_wait_for_dependencies() has been called, which will ensure the
* previous MST states payload start slots have been copied over to the new state. Note * previous MST states payload start slots have been copied over to the new state. Note
* that a new start slot won't be assigned/removed from this payload until * that a new start slot won't be assigned/removed from this payload until
* drm_dp_add_payload_part1()/drm_dp_remove_payload() have been called. * drm_dp_add_payload_part1()/drm_dp_remove_payload_part2() have been called.
* * Acquire the MST modesetting lock, and then wait for any pending MST-related commits to * * Acquire the MST modesetting lock, and then wait for any pending MST-related commits to
* get committed to hardware by calling drm_crtc_commit_wait() on each of the * get committed to hardware by calling drm_crtc_commit_wait() on each of the
* &drm_crtc_commit structs in &drm_dp_mst_topology_state.commit_deps. * &drm_crtc_commit structs in &drm_dp_mst_topology_state.commit_deps.
...@@ -564,6 +571,9 @@ struct drm_dp_mst_atomic_payload { ...@@ -564,6 +571,9 @@ struct drm_dp_mst_atomic_payload {
/** @dsc_enabled: Whether or not this payload has DSC enabled */ /** @dsc_enabled: Whether or not this payload has DSC enabled */
bool dsc_enabled : 1; bool dsc_enabled : 1;
/** @payload_allocation_status: The allocation status of this payload */
enum drm_dp_mst_payload_allocation payload_allocation_status;
/** @next: The list node for this payload */ /** @next: The list node for this payload */
struct list_head next; struct list_head next;
}; };
...@@ -842,10 +852,13 @@ int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr, ...@@ -842,10 +852,13 @@ int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr, int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
struct drm_atomic_state *state, struct drm_atomic_state *state,
struct drm_dp_mst_atomic_payload *payload); struct drm_dp_mst_atomic_payload *payload);
void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr, void drm_dp_remove_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_state *mst_state, struct drm_dp_mst_topology_state *mst_state,
const struct drm_dp_mst_atomic_payload *old_payload, struct drm_dp_mst_atomic_payload *payload);
struct drm_dp_mst_atomic_payload *new_payload); void drm_dp_remove_payload_part2(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_state *mst_state,
const struct drm_dp_mst_atomic_payload *old_payload,
struct drm_dp_mst_atomic_payload *new_payload);
int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr); int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment