Commit f7e3a1ba authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'drm-fixes-2023-07-21' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Mostly amdgpu fixes, a couple of i915 fixes, some nouveau and then a
  few misc accel and other fixes.

  client:
   - memory leak fix

  dma-buf:
   - memory leak fix

  qaic:
   - bound check fixes
   - map_user_pages leak
   - int overflow fixes

  habanalabs:
   - debugfs stub helper

  nouveau:
   - aux event slot fixes
   - anx9805 cards fixes

  i915:
   - Add sentinel to xehp_oa_b_counters
   - Revert "drm/i915: use localized __diag_ignore_all() instead of per
     file"

  amdgpu:
   - More PCIe DPM fixes for Intel platforms
   - DCN3.0.1 fixes
   - Virtual display timer fix
   - Async flip fix
   - SMU13 clock reporting fixes
   - Add missing PSP firmware declaration
   - DP MST fix
   - DCN3.1.x fixes
   - Slab out of bounds fix"

* tag 'drm-fixes-2023-07-21' of git://anongit.freedesktop.org/drm/drm: (31 commits)
  accel/habanalabs: add more debugfs stub helpers
  drm/nouveau/kms/nv50-: init hpd_irq_lock for PIOR DP
  drm/nouveau/disp: PIOR DP uses GPIO for HPD, not PMGR AUX interrupts
  drm/nouveau/i2c: fix number of aux event slots
  drm/amdgpu: use a macro to define no xcp partition case
  drm/amdgpu/vm: use the same xcp_id from root PD
  drm/amdgpu: fix slab-out-of-bounds issue in amdgpu_vm_pt_create
  drm/amdgpu: Allocate root PD on correct partition
  drm/amd/display: Keep PHY active for DP displays on DCN31
  drm/amd/display: Prevent vtotal from being set to 0
  drm/amd/display: Disable MPC split by default on special asic
  drm/amd/display: check TG is non-null before checking if enabled
  drm/amd/display: Add polling method to handle MST reply packet
  drm/amd/display: Clean up errors & warnings in amdgpu_dm.c
  drm/amdgpu: Allow the initramfs generator to include psp_13_0_6_ta
  drm/amdgpu/pm: make mclk consistent for smu 13.0.7
  drm/amdgpu/pm: make gfxclock consistent for sienna cichlid
  drm/amd/display: only accept async flips for fast updates
  drm/amdgpu/vkms: relax timer deactivation by hrtimer_try_to_cancel
  drm/amd/display: add DCN301 specific logic for OTG programming
  ...
parents 12a5088e 28801cc8
......@@ -3980,6 +3980,15 @@ static inline void hl_debugfs_fini(void)
{
}
static inline int hl_debugfs_device_init(struct hl_device *hdev)
{
return 0;
}
static inline void hl_debugfs_device_fini(struct hl_device *hdev)
{
}
static inline void hl_debugfs_add_device(struct hl_device *hdev)
{
}
......
......@@ -14,6 +14,7 @@
#include <linux/mm.h>
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/overflow.h>
#include <linux/pci.h>
#include <linux/scatterlist.h>
#include <linux/types.h>
......@@ -366,7 +367,7 @@ static int encode_passthrough(struct qaic_device *qdev, void *trans, struct wrap
if (in_trans->hdr.len % 8 != 0)
return -EINVAL;
if (msg_hdr_len + in_trans->hdr.len > QAIC_MANAGE_EXT_MSG_LENGTH)
if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_EXT_MSG_LENGTH)
return -ENOSPC;
trans_wrapper = add_wrapper(wrappers,
......@@ -418,9 +419,12 @@ static int find_and_map_user_pages(struct qaic_device *qdev,
}
ret = get_user_pages_fast(xfer_start_addr, nr_pages, 0, page_list);
if (ret < 0 || ret != nr_pages) {
ret = -EFAULT;
if (ret < 0)
goto free_page_list;
if (ret != nr_pages) {
nr_pages = ret;
ret = -EFAULT;
goto put_pages;
}
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
......@@ -557,11 +561,8 @@ static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list
msg = &wrapper->msg;
msg_hdr_len = le32_to_cpu(msg->hdr.len);
if (msg_hdr_len > (UINT_MAX - QAIC_MANAGE_EXT_MSG_LENGTH))
return -EINVAL;
/* There should be enough space to hold at least one ASP entry. */
if (msg_hdr_len + sizeof(*out_trans) + sizeof(struct wire_addr_size_pair) >
if (size_add(msg_hdr_len, sizeof(*out_trans) + sizeof(struct wire_addr_size_pair)) >
QAIC_MANAGE_EXT_MSG_LENGTH)
return -ENOMEM;
......@@ -634,7 +635,7 @@ static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper
msg = &wrapper->msg;
msg_hdr_len = le32_to_cpu(msg->hdr.len);
if (msg_hdr_len + sizeof(*out_trans) > QAIC_MANAGE_MAX_MSG_LENGTH)
if (size_add(msg_hdr_len, sizeof(*out_trans)) > QAIC_MANAGE_MAX_MSG_LENGTH)
return -ENOSPC;
if (!in_trans->queue_size)
......@@ -718,7 +719,7 @@ static int encode_status(struct qaic_device *qdev, void *trans, struct wrapper_l
msg = &wrapper->msg;
msg_hdr_len = le32_to_cpu(msg->hdr.len);
if (msg_hdr_len + in_trans->hdr.len > QAIC_MANAGE_MAX_MSG_LENGTH)
if (size_add(msg_hdr_len, in_trans->hdr.len) > QAIC_MANAGE_MAX_MSG_LENGTH)
return -ENOSPC;
trans_wrapper = add_wrapper(wrappers, sizeof(*trans_wrapper));
......@@ -748,7 +749,8 @@ static int encode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
int ret;
int i;
if (!user_msg->count) {
if (!user_msg->count ||
user_msg->len < sizeof(*trans_hdr)) {
ret = -EINVAL;
goto out;
}
......@@ -765,12 +767,13 @@ static int encode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
}
for (i = 0; i < user_msg->count; ++i) {
if (user_len >= user_msg->len) {
if (user_len > user_msg->len - sizeof(*trans_hdr)) {
ret = -EINVAL;
break;
}
trans_hdr = (struct qaic_manage_trans_hdr *)(user_msg->data + user_len);
if (user_len + trans_hdr->len > user_msg->len) {
if (trans_hdr->len < sizeof(trans_hdr) ||
size_add(user_len, trans_hdr->len) > user_msg->len) {
ret = -EINVAL;
break;
}
......@@ -953,15 +956,23 @@ static int decode_message(struct qaic_device *qdev, struct manage_msg *user_msg,
int ret;
int i;
if (msg_hdr_len > QAIC_MANAGE_MAX_MSG_LENGTH)
if (msg_hdr_len < sizeof(*trans_hdr) ||
msg_hdr_len > QAIC_MANAGE_MAX_MSG_LENGTH)
return -EINVAL;
user_msg->len = 0;
user_msg->count = le32_to_cpu(msg->hdr.count);
for (i = 0; i < user_msg->count; ++i) {
u32 hdr_len;
if (msg_len > msg_hdr_len - sizeof(*trans_hdr))
return -EINVAL;
trans_hdr = (struct wire_trans_hdr *)(msg->data + msg_len);
if (msg_len + le32_to_cpu(trans_hdr->len) > msg_hdr_len)
hdr_len = le32_to_cpu(trans_hdr->len);
if (hdr_len < sizeof(*trans_hdr) ||
size_add(msg_len, hdr_len) > msg_hdr_len)
return -EINVAL;
switch (le32_to_cpu(trans_hdr->type)) {
......
......@@ -571,6 +571,7 @@ int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
dma_resv_for_each_fence_unlocked(&cursor, fence) {
if (dma_resv_iter_is_restarted(&cursor)) {
struct dma_fence **new_fences;
unsigned int count;
while (*num_fences)
......@@ -579,13 +580,17 @@ int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
count = cursor.num_fences + 1;
/* Eventually re-allocate the array */
*fences = krealloc_array(*fences, count,
sizeof(void *),
GFP_KERNEL);
if (count && !*fences) {
new_fences = krealloc_array(*fences, count,
sizeof(void *),
GFP_KERNEL);
if (count && !new_fences) {
kfree(*fences);
*fences = NULL;
*num_fences = 0;
dma_resv_iter_end(&cursor);
return -ENOMEM;
}
*fences = new_fences;
}
(*fences)[(*num_fences)++] = dma_fence_get(fence);
......
......@@ -1709,7 +1709,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0;
}
xcp_id = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id;
xcp_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ?
0 : fpriv->xcp_id;
} else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0;
......
......@@ -1229,13 +1229,13 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
pasid = 0;
}
r = amdgpu_vm_init(adev, &fpriv->vm);
r = amdgpu_xcp_open_device(adev, fpriv, file_priv);
if (r)
goto error_pasid;
r = amdgpu_xcp_open_device(adev, fpriv, file_priv);
r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id);
if (r)
goto error_vm;
goto error_pasid;
r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid);
if (r)
......
......@@ -1382,7 +1382,7 @@ int amdgpu_mes_self_test(struct amdgpu_device *adev)
goto error_pasid;
}
r = amdgpu_vm_init(adev, vm);
r = amdgpu_vm_init(adev, vm, -1);
if (r) {
DRM_ERROR("failed to initialize vm\n");
goto error_pasid;
......
......@@ -55,8 +55,9 @@ static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer)
DRM_WARN("%s: vblank timer overrun\n", __func__);
ret = drm_crtc_handle_vblank(crtc);
/* Don't queue timer again when vblank is disabled. */
if (!ret)
DRM_ERROR("amdgpu_vkms failure on handling vblank");
return HRTIMER_NORESTART;
return HRTIMER_RESTART;
}
......@@ -81,7 +82,7 @@ static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
hrtimer_cancel(&amdgpu_crtc->vblank_timer);
hrtimer_try_to_cancel(&amdgpu_crtc->vblank_timer);
}
static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc,
......
......@@ -2121,13 +2121,14 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
*
* @adev: amdgpu_device pointer
* @vm: requested vm
* @xcp_id: GPU partition selection id
*
* Init @vm fields.
*
* Returns:
* 0 for success, error for failure.
*/
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id)
{
struct amdgpu_bo *root_bo;
struct amdgpu_bo_vm *root;
......@@ -2177,7 +2178,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->evicting = false;
r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level,
false, &root);
false, &root, xcp_id);
if (r)
goto error_free_delayed;
root_bo = &root->bo;
......
......@@ -392,7 +392,7 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm,
u32 pasid);
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id);
int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm);
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
......@@ -475,7 +475,8 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
struct amdgpu_bo_vm *vmbo, bool immediate);
int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int level, bool immediate, struct amdgpu_bo_vm **vmbo);
int level, bool immediate, struct amdgpu_bo_vm **vmbo,
int32_t xcp_id);
void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm);
bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
......
......@@ -498,11 +498,12 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* @level: the page table level
* @immediate: use a immediate update
* @vmbo: pointer to the buffer object pointer
* @xcp_id: GPU partition id
*/
int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int level, bool immediate, struct amdgpu_bo_vm **vmbo)
int level, bool immediate, struct amdgpu_bo_vm **vmbo,
int32_t xcp_id)
{
struct amdgpu_fpriv *fpriv = container_of(vm, struct amdgpu_fpriv, vm);
struct amdgpu_bo_param bp;
struct amdgpu_bo *bo;
struct dma_resv *resv;
......@@ -535,7 +536,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bp.type = ttm_bo_type_kernel;
bp.no_wait_gpu = immediate;
bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1;
bp.xcp_id_plus1 = xcp_id + 1;
if (vm->root.bo)
bp.resv = vm->root.bo->tbo.base.resv;
......@@ -561,7 +562,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm,
bp.type = ttm_bo_type_kernel;
bp.resv = bo->tbo.base.resv;
bp.bo_ptr_size = sizeof(struct amdgpu_bo);
bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1;
bp.xcp_id_plus1 = xcp_id + 1;
r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow);
......@@ -606,7 +607,8 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev,
return 0;
amdgpu_vm_eviction_unlock(vm);
r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt);
r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt,
vm->root.bo->xcp_id);
amdgpu_vm_eviction_lock(vm);
if (r)
return r;
......
......@@ -363,7 +363,7 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
if (!adev->xcp_mgr)
return 0;
fpriv->xcp_id = ~0;
fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION;
for (i = 0; i < MAX_XCP; ++i) {
if (!adev->xcp_mgr->xcp[i].ddev)
break;
......@@ -381,7 +381,7 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev,
}
}
fpriv->vm.mem_id = fpriv->xcp_id == ~0 ? -1 :
fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 :
adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id;
return 0;
}
......
......@@ -37,6 +37,8 @@
#define AMDGPU_XCP_FL_NONE 0
#define AMDGPU_XCP_FL_LOCKED (1 << 0)
#define AMDGPU_XCP_NO_PARTITION (~0)
struct amdgpu_fpriv;
enum AMDGPU_XCP_IP_BLOCK {
......
......@@ -68,7 +68,7 @@ static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev,
enum AMDGPU_XCP_IP_BLOCK ip_blk;
uint32_t inst_mask;
ring->xcp_id = ~0;
ring->xcp_id = AMDGPU_XCP_NO_PARTITION;
if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
return;
......@@ -177,7 +177,7 @@ static int aqua_vanjaram_select_scheds(
u32 sel_xcp_id;
int i;
if (fpriv->xcp_id == ~0) {
if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) {
u32 least_ref_cnt = ~0;
fpriv->xcp_id = 0;
......
......@@ -49,6 +49,7 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_11_toc.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_11_ta.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_6_sos.bin");
MODULE_FIRMWARE("amdgpu/psp_13_0_6_ta.bin");
/* For large FW files the time to complete can be very long */
#define USBC_PD_POLLING_LIMIT_S 240
......
......@@ -194,6 +194,11 @@ struct hpd_rx_irq_offload_work_queue {
* we're handling link loss
*/
bool is_handling_link_loss;
/**
* @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message
* ready event when we're already handling mst message ready event
*/
bool is_handling_mst_msg_rdy_event;
/**
* @aconnector: The aconnector that this work queue is attached to
*/
......@@ -638,6 +643,8 @@ struct amdgpu_dm_connector {
struct drm_dp_mst_port *mst_output_port;
struct amdgpu_dm_connector *mst_root;
struct drm_dp_aux *dsc_aux;
struct mutex handle_mst_msg_ready;
/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;
......
......@@ -398,6 +398,18 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
return -EINVAL;
}
/*
* Only allow async flips for fast updates that don't change the FB
* pitch, the DCC state, rotation, etc.
*/
if (crtc_state->async_flip &&
dm_crtc_state->update_type != UPDATE_TYPE_FAST) {
drm_dbg_atomic(crtc->dev,
"[CRTC:%d:%s] async flips are only supported for fast updates\n",
crtc->base.id, crtc->name);
return -EINVAL;
}
/* In some use cases, like reset, no stream is attached */
if (!dm_crtc_state->stream)
return 0;
......
......@@ -619,8 +619,118 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
return connector;
}
void dm_handle_mst_sideband_msg_ready_event(
struct drm_dp_mst_topology_mgr *mgr,
enum mst_msg_ready_type msg_rdy_type)
{
uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
uint8_t dret;
bool new_irq_handled = false;
int dpcd_addr;
uint8_t dpcd_bytes_to_read;
const uint8_t max_process_count = 30;
uint8_t process_count = 0;
u8 retry;
struct amdgpu_dm_connector *aconnector =
container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
/* DPCD 0x200 - 0x201 for downstream IRQ */
dpcd_addr = DP_SINK_COUNT;
} else {
dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
/* DPCD 0x2002 - 0x2005 for downstream IRQ */
dpcd_addr = DP_SINK_COUNT_ESI;
}
mutex_lock(&aconnector->handle_mst_msg_ready);
while (process_count < max_process_count) {
u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {};
process_count++;
dret = drm_dp_dpcd_read(
&aconnector->dm_dp_aux.aux,
dpcd_addr,
esi,
dpcd_bytes_to_read);
if (dret != dpcd_bytes_to_read) {
DRM_DEBUG_KMS("DPCD read and acked number is not as expected!");
break;
}
DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
switch (msg_rdy_type) {
case DOWN_REP_MSG_RDY_EVENT:
/* Only handle DOWN_REP_MSG_RDY case*/
esi[1] &= DP_DOWN_REP_MSG_RDY;
break;
case UP_REQ_MSG_RDY_EVENT:
/* Only handle UP_REQ_MSG_RDY case*/
esi[1] &= DP_UP_REQ_MSG_RDY;
break;
default:
/* Handle both cases*/
esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY);
break;
}
if (!esi[1])
break;
/* handle MST irq */
if (aconnector->mst_mgr.mst_state)
drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr,
esi,
ack,
&new_irq_handled);
if (new_irq_handled) {
/* ACK at DPCD to notify down stream */
for (retry = 0; retry < 3; retry++) {
ssize_t wret;
wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux,
dpcd_addr + 1,
ack[1]);
if (wret == 1)
break;
}
if (retry == 3) {
DRM_ERROR("Failed to ack MST event.\n");
return;
}
drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr);
new_irq_handled = false;
} else {
break;
}
}
mutex_unlock(&aconnector->handle_mst_msg_ready);
if (process_count == max_process_count)
DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
}
static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr)
{
dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT);
}
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
.add_connector = dm_dp_add_mst_connector,
.poll_hpd_irq = dm_handle_mst_down_rep_msg_ready,
};
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
......
......@@ -49,6 +49,13 @@
#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031
#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000
enum mst_msg_ready_type {
NONE_MSG_RDY_EVENT = 0,
DOWN_REP_MSG_RDY_EVENT = 1,
UP_REQ_MSG_RDY_EVENT = 2,
DOWN_OR_UP_MSG_RDY_EVENT = 3
};
struct amdgpu_display_manager;
struct amdgpu_dm_connector;
......@@ -61,6 +68,10 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
void
dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev);
void dm_handle_mst_sideband_msg_ready_event(
struct drm_dp_mst_topology_mgr *mgr,
enum mst_msg_ready_type msg_rdy_type);
struct dsc_mst_fairness_vars {
int pbn;
bool dsc_enabled;
......
......@@ -87,6 +87,11 @@ static int dcn31_get_active_display_cnt_wa(
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
tmds_present = true;
/* Checking stream / link detection ensuring that PHY is active*/
if (dc_is_dp_signal(stream->signal) && !stream->dpms_off)
display_count++;
}
for (i = 0; i < dc->link_count; i++) {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment