Commit f66d84c8 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-misc-fixes-2020-01-16' of git://anongit.freedesktop.org/drm/drm-misc into drm-fixes

virtio: maintain obj reservation lock when submitting cmds (Gerd)
rockchip: increase link rate var size to accommodate rates (Tobias)
mst: serialize down messages and clear timeslots are on unplug (Wayne)

Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Tobias Schramm <t.schramm@manjaro.org>
Cc: Wayne Lin <Wayne.Lin@amd.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Sean Paul <sean@poorly.run>
Link: https://patchwork.freedesktop.org/patch/msgid/20200116162856.GA11524@art_vandelay
parents e33c7ba8 5a64967a
...@@ -1190,6 +1190,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, ...@@ -1190,6 +1190,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
txmsg->state == DRM_DP_SIDEBAND_TX_SENT) { txmsg->state == DRM_DP_SIDEBAND_TX_SENT) {
mstb->tx_slots[txmsg->seqno] = NULL; mstb->tx_slots[txmsg->seqno] = NULL;
} }
mgr->is_waiting_for_dwn_reply = false;
} }
out: out:
if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) {
...@@ -1199,6 +1201,7 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, ...@@ -1199,6 +1201,7 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
} }
mutex_unlock(&mgr->qlock); mutex_unlock(&mgr->qlock);
drm_dp_mst_kick_tx(mgr);
return ret; return ret;
} }
...@@ -2318,7 +2321,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, ...@@ -2318,7 +2321,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
{ {
struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port; struct drm_dp_mst_port *port;
int old_ddps, ret; int old_ddps, old_input, ret, i;
u8 new_pdt; u8 new_pdt;
bool dowork = false, create_connector = false; bool dowork = false, create_connector = false;
...@@ -2349,6 +2352,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, ...@@ -2349,6 +2352,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
} }
old_ddps = port->ddps; old_ddps = port->ddps;
old_input = port->input;
port->input = conn_stat->input_port; port->input = conn_stat->input_port;
port->mcs = conn_stat->message_capability_status; port->mcs = conn_stat->message_capability_status;
port->ldps = conn_stat->legacy_device_plug_status; port->ldps = conn_stat->legacy_device_plug_status;
...@@ -2373,6 +2377,28 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, ...@@ -2373,6 +2377,28 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
dowork = false; dowork = false;
} }
if (!old_input && old_ddps != port->ddps && !port->ddps) {
for (i = 0; i < mgr->max_payloads; i++) {
struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
struct drm_dp_mst_port *port_validated;
if (!vcpi)
continue;
port_validated =
container_of(vcpi, struct drm_dp_mst_port, vcpi);
port_validated =
drm_dp_mst_topology_get_port_validated(mgr, port_validated);
if (!port_validated) {
mutex_lock(&mgr->payload_lock);
vcpi->num_slots = 0;
mutex_unlock(&mgr->payload_lock);
} else {
drm_dp_mst_topology_put_port(port_validated);
}
}
}
if (port->connector) if (port->connector)
drm_modeset_unlock(&mgr->base.lock); drm_modeset_unlock(&mgr->base.lock);
else if (create_connector) else if (create_connector)
...@@ -2718,9 +2744,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) ...@@ -2718,9 +2744,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
ret = process_single_tx_qlock(mgr, txmsg, false); ret = process_single_tx_qlock(mgr, txmsg, false);
if (ret == 1) { if (ret == 1) {
/* txmsg is sent it should be in the slots now */ /* txmsg is sent it should be in the slots now */
mgr->is_waiting_for_dwn_reply = true;
list_del(&txmsg->next); list_del(&txmsg->next);
} else if (ret) { } else if (ret) {
DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
mgr->is_waiting_for_dwn_reply = false;
list_del(&txmsg->next); list_del(&txmsg->next);
if (txmsg->seqno != -1) if (txmsg->seqno != -1)
txmsg->dst->tx_slots[txmsg->seqno] = NULL; txmsg->dst->tx_slots[txmsg->seqno] = NULL;
...@@ -2760,7 +2788,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, ...@@ -2760,7 +2788,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); drm_dp_mst_dump_sideband_msg_tx(&p, txmsg);
} }
if (list_is_singular(&mgr->tx_msg_downq)) if (list_is_singular(&mgr->tx_msg_downq) &&
!mgr->is_waiting_for_dwn_reply)
process_single_down_tx_qlock(mgr); process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock); mutex_unlock(&mgr->qlock);
} }
...@@ -3678,6 +3707,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) ...@@ -3678,6 +3707,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
mutex_lock(&mgr->qlock); mutex_lock(&mgr->qlock);
txmsg->state = DRM_DP_SIDEBAND_TX_RX; txmsg->state = DRM_DP_SIDEBAND_TX_RX;
mstb->tx_slots[slot] = NULL; mstb->tx_slots[slot] = NULL;
mgr->is_waiting_for_dwn_reply = false;
mutex_unlock(&mgr->qlock); mutex_unlock(&mgr->qlock);
wake_up_all(&mgr->tx_waitq); wake_up_all(&mgr->tx_waitq);
...@@ -3687,6 +3717,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) ...@@ -3687,6 +3717,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
no_msg: no_msg:
drm_dp_mst_topology_put_mstb(mstb); drm_dp_mst_topology_put_mstb(mstb);
clear_down_rep_recv: clear_down_rep_recv:
mutex_lock(&mgr->qlock);
mgr->is_waiting_for_dwn_reply = false;
mutex_unlock(&mgr->qlock);
memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0; return 0;
...@@ -4497,7 +4530,7 @@ static void drm_dp_tx_work(struct work_struct *work) ...@@ -4497,7 +4530,7 @@ static void drm_dp_tx_work(struct work_struct *work)
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
mutex_lock(&mgr->qlock); mutex_lock(&mgr->qlock);
if (!list_empty(&mgr->tx_msg_downq)) if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply)
process_single_down_tx_qlock(mgr); process_single_down_tx_qlock(mgr);
mutex_unlock(&mgr->qlock); mutex_unlock(&mgr->qlock);
} }
......
...@@ -95,7 +95,7 @@ struct cdn_dp_device { ...@@ -95,7 +95,7 @@ struct cdn_dp_device {
struct cdn_dp_port *port[MAX_PHY]; struct cdn_dp_port *port[MAX_PHY];
u8 ports; u8 ports;
u8 max_lanes; u8 max_lanes;
u8 max_rate; unsigned int max_rate;
u8 lanes; u8 lanes;
int active_port; int active_port;
......
...@@ -232,6 +232,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, ...@@ -232,6 +232,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
if (!objs) if (!objs)
return; return;
virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]);
virtio_gpu_array_lock_resv(objs);
virtio_gpu_cmd_transfer_to_host_2d virtio_gpu_cmd_transfer_to_host_2d
(vgdev, 0, (vgdev, 0,
plane->state->crtc_w, plane->state->crtc_w,
......
...@@ -605,6 +605,12 @@ struct drm_dp_mst_topology_mgr { ...@@ -605,6 +605,12 @@ struct drm_dp_mst_topology_mgr {
* &drm_dp_sideband_msg_tx.state once they are queued * &drm_dp_sideband_msg_tx.state once they are queued
*/ */
struct mutex qlock; struct mutex qlock;
/**
* @is_waiting_for_dwn_reply: indicate whether is waiting for down reply
*/
bool is_waiting_for_dwn_reply;
/** /**
* @tx_msg_downq: List of pending down replies. * @tx_msg_downq: List of pending down replies.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment