Commit e27c41d5 authored by Jude Shih's avatar Jude Shih Committed by Alex Deucher

drm/amd/display: Support for DMUB HPD interrupt handling

[WHY]
To add support for HPD interrupt handling from DMUB.
HPD interrupt could be triggered from outbox1 from DMUB

[HOW]
1) Use queue_work to handle hpd task from outbox1

2) Add handle_hpd_irq_helper to share interrupt handling code
between legacy and DMUB HPD from outbox1

3) Added DMUB HPD handling in dmub_srv_stat_get_notification().
HPD handling callback function and wake up the DMUB thread.
Reviewed-by: default avatarNicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
Acked-by: default avatarQingqing Zhuo <qingqing.zhuo@amd.com>
Signed-off-by: default avatarJude Shih <shenshih@amd.com>
Tested-by: default avatarDaniel Wheeler <Daniel.Wheeler@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent b5ce6fe8
......@@ -215,6 +215,8 @@ static void handle_cursor_update(struct drm_plane *plane,
static const struct drm_format_info *
amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
static bool
is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
struct drm_crtc_state *new_crtc_state);
......@@ -618,6 +620,116 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
}
#endif
/**
* dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
* @adev: amdgpu_device pointer
* @notify: dmub notification structure
*
* Dmub AUX or SET_CONFIG command completion processing callback
* Copies dmub notification to DM which is to be read by AUX command.
* issuing thread and also signals the event to wake up the thread.
*/
void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
{
if (adev->dm.dmub_notify)
memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
complete(&adev->dm.dmub_aux_transfer_done);
}
/**
* dmub_hpd_callback - DMUB HPD interrupt processing callback.
* @adev: amdgpu_device pointer
* @notify: dmub notification structure
*
* Dmub Hpd interrupt processing callback. Gets displayindex through the
* ink index and calls helper to do the processing.
*/
void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
struct drm_connector_list_iter iter;
struct dc_link *link;
uint8_t link_index = 0;
struct drm_device *dev = adev->dm.ddev;
if (adev == NULL)
return;
if (notify == NULL) {
DRM_ERROR("DMUB HPD callback notification was NULL");
return;
}
if (notify->link_index > adev->dm.dc->link_count) {
DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
return;
}
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
link_index = notify->link_index;
link = adev->dm.dc->links[link_index];
drm_connector_list_iter_begin(dev, &iter);
drm_for_each_connector_iter(connector, &iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (link && aconnector->dc_link == link) {
DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
handle_hpd_irq_helper(aconnector);
break;
}
}
drm_connector_list_iter_end(&iter);
drm_modeset_unlock(&dev->mode_config.connection_mutex);
}
/**
* register_dmub_notify_callback - Sets callback for DMUB notify
* @adev: amdgpu_device pointer
* @type: Type of dmub notification
* @callback: Dmub interrupt callback function
* @dmub_int_thread_offload: offload indicator
*
* API to register a dmub callback handler for a dmub notification
* Also sets indicator whether callback processing to be offloaded.
* to dmub interrupt handling thread
* Return: true if successfully registered, false if there is existing registration
*/
bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
{
if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
adev->dm.dmub_callback[type] = callback;
adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
} else
return false;
return true;
}
static void dm_handle_hpd_work(struct work_struct *work)
{
struct dmub_hpd_work *dmub_hpd_wrk;
dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
if (!dmub_hpd_wrk->dmub_notify) {
DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
return;
}
if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
dmub_hpd_wrk->dmub_notify);
}
kfree(dmub_hpd_wrk);
}
#define DMUB_TRACE_MAX_READ 64
/**
* dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
......@@ -634,18 +746,33 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
struct amdgpu_display_manager *dm = &adev->dm;
struct dmcub_trace_buf_entry entry = { 0 };
uint32_t count = 0;
struct dmub_hpd_work *dmub_hpd_wrk;
if (dc_enable_dmub_notifications(adev->dm.dc)) {
dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
if (!dmub_hpd_wrk) {
DRM_ERROR("Failed to allocate dmub_hpd_wrk");
return;
}
INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
do {
dc_stat_get_dmub_notification(adev->dm.dc, &notify);
} while (notify.pending_notification);
if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
DRM_ERROR("DM: notify type %d larger than the array size %ld !", notify.type,
ARRAY_SIZE(dm->dmub_thread_offload));
continue;
}
if (dm->dmub_thread_offload[notify.type] == true) {
dmub_hpd_wrk->dmub_notify = &notify;
dmub_hpd_wrk->adev = adev;
queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
} else {
dm->dmub_callback[notify.type](adev, &notify);
}
if (adev->dm.dmub_notify)
memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
complete(&adev->dm.dmub_aux_transfer_done);
// TODO : HPD Implementation
} while (notify.pending_notification);
} else {
DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
......@@ -1251,7 +1378,25 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
goto error;
}
adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
if (!adev->dm.delayed_hpd_wq) {
DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
goto error;
}
amdgpu_dm_outbox_init(adev);
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
dmub_aux_setconfig_callback, false)) {
DRM_ERROR("amdgpu: fail to register dmub aux callback");
goto error;
}
if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
DRM_ERROR("amdgpu: fail to register dmub hpd callback");
goto error;
}
#endif
}
if (amdgpu_dm_initialize_drm_device(adev)) {
......@@ -1333,6 +1478,8 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
if (dc_enable_dmub_notifications(adev->dm.dc)) {
kfree(adev->dm.dmub_notify);
adev->dm.dmub_notify = NULL;
destroy_workqueue(adev->dm.delayed_hpd_wq);
adev->dm.delayed_hpd_wq = NULL;
}
if (adev->dm.dmub_bo)
......@@ -2611,9 +2758,8 @@ void amdgpu_dm_update_connector_after_detect(
dc_sink_release(sink);
}
static void handle_hpd_irq(void *param)
static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
{
struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
......@@ -2672,6 +2818,15 @@ static void handle_hpd_irq(void *param)
}
static void handle_hpd_irq(void *param)
{
struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
handle_hpd_irq_helper(aconnector);
}
static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
{
uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
......
......@@ -47,6 +47,8 @@
#define AMDGPU_DM_MAX_CRTC 6
#define AMDGPU_DM_MAX_NUM_EDP 2
#define AMDGPU_DMUB_NOTIFICATION_MAX 5
/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
......@@ -86,6 +88,21 @@ struct dm_compressor_info {
uint64_t gpu_addr;
};
typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify);
/**
* struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ
*
* @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq
* @dmub_notify: notification for callback function
* @adev: amdgpu_device pointer
*/
struct dmub_hpd_work {
struct work_struct handle_hpd_work;
struct dmub_notification *dmub_notify;
struct amdgpu_device *adev;
};
/**
* struct vblank_control_work - Work data for vblank control
* @work: Kernel work data for the work event
......@@ -190,8 +207,30 @@ struct amdgpu_display_manager {
*/
struct dmub_srv *dmub_srv;
/**
* @dmub_notify:
*
* Notification from DMUB.
*/
struct dmub_notification *dmub_notify;
/**
* @dmub_callback:
*
* Callback functions to handle notification from DMUB.
*/
dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX];
/**
* @dmub_thread_offload:
*
* Flag to indicate if callback is offload.
*/
bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX];
/**
* @dmub_fb_info:
*
......@@ -439,6 +478,7 @@ struct amdgpu_display_manager {
*/
struct list_head da_list;
struct completion dmub_aux_transfer_done;
struct workqueue_struct *delayed_hpd_wq;
/**
* @brightness:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment