Commit b6f91fc1 authored by Xiaogang Chen's avatar Xiaogang Chen Committed by Alex Deucher

drm/amdgpu/display: buffer INTERRUPT_LOW_IRQ_CONTEXT interrupt work

amdgpu DM handles INTERRUPT_LOW_IRQ_CONTEXT interrupt(hpd, hpd_rx) by using work
queue and uses single work_struct. If new interrupt is recevied before the
previous handler finished, new interrupts(same type) will be discarded and
driver just sends "amdgpu_dm_irq_schedule_work FAILED" message out. If some
important hpd, hpd_rx related interrupts are missed by driver the hot (un)plug
devices may cause system hang or instability, such as issues with system
resume from S3 sleep with mst device connected.

This patch dynamically allocates new amdgpu_dm_irq_handler_data for new
interrupts if previous INTERRUPT_LOW_IRQ_CONTEXT interrupt work has not been
handled. So the new interrupt works can be queued to the same workqueue_struct,
instead of discard the new interrupts. All allocated amdgpu_dm_irq_handler_data
are put into a single linked list and will be reused after.
Signed-off-by: default avatarXiaogang Chen <xiaogang.chen@amd.com>
Reviewed-by: default avatarAurabindo Pillai <aurabindo.pillai@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent a8a2e134
...@@ -68,18 +68,6 @@ struct common_irq_params { ...@@ -68,18 +68,6 @@ struct common_irq_params {
enum dc_irq_source irq_src; enum dc_irq_source irq_src;
}; };
/**
* struct irq_list_head - Linked-list for low context IRQ handlers.
*
* @head: The list_head within &struct handler_data
* @work: A work_struct containing the deferred handler work
*/
struct irq_list_head {
struct list_head head;
/* In case this interrupt needs post-processing, 'work' will be queued*/
struct work_struct work;
};
/** /**
* struct dm_compressor_info - Buffer info used by frame buffer compression * struct dm_compressor_info - Buffer info used by frame buffer compression
* @cpu_addr: MMIO cpu addr * @cpu_addr: MMIO cpu addr
...@@ -293,7 +281,7 @@ struct amdgpu_display_manager { ...@@ -293,7 +281,7 @@ struct amdgpu_display_manager {
* Note that handlers are called in the same order as they were * Note that handlers are called in the same order as they were
* registered (FIFO). * registered (FIFO).
*/ */
struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER]; struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
/** /**
* @irq_handler_list_high_tab: * @irq_handler_list_high_tab:
......
...@@ -82,6 +82,7 @@ struct amdgpu_dm_irq_handler_data { ...@@ -82,6 +82,7 @@ struct amdgpu_dm_irq_handler_data {
struct amdgpu_display_manager *dm; struct amdgpu_display_manager *dm;
/* DAL irq source which registered for this interrupt. */ /* DAL irq source which registered for this interrupt. */
enum dc_irq_source irq_source; enum dc_irq_source irq_source;
struct work_struct work;
}; };
#define DM_IRQ_TABLE_LOCK(adev, flags) \ #define DM_IRQ_TABLE_LOCK(adev, flags) \
...@@ -111,20 +112,10 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd, ...@@ -111,20 +112,10 @@ static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd,
*/ */
static void dm_irq_work_func(struct work_struct *work) static void dm_irq_work_func(struct work_struct *work)
{ {
struct irq_list_head *irq_list_head = struct amdgpu_dm_irq_handler_data *handler_data =
container_of(work, struct irq_list_head, work); container_of(work, struct amdgpu_dm_irq_handler_data, work);
struct list_head *handler_list = &irq_list_head->head;
struct amdgpu_dm_irq_handler_data *handler_data;
list_for_each_entry(handler_data, handler_list, list) {
DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n",
handler_data->irq_source);
DRM_DEBUG_KMS("DM_IRQ: schedule_work: for dal_src=%d\n", handler_data->handler(handler_data->handler_arg);
handler_data->irq_source);
handler_data->handler(handler_data->handler_arg);
}
/* Call a DAL subcomponent which registered for interrupt notification /* Call a DAL subcomponent which registered for interrupt notification
* at INTERRUPT_LOW_IRQ_CONTEXT. * at INTERRUPT_LOW_IRQ_CONTEXT.
...@@ -156,7 +147,7 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev, ...@@ -156,7 +147,7 @@ static struct list_head *remove_irq_handler(struct amdgpu_device *adev,
break; break;
case INTERRUPT_LOW_IRQ_CONTEXT: case INTERRUPT_LOW_IRQ_CONTEXT:
default: default:
hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head; hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
break; break;
} }
...@@ -290,7 +281,8 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, ...@@ -290,7 +281,8 @@ void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev,
break; break;
case INTERRUPT_LOW_IRQ_CONTEXT: case INTERRUPT_LOW_IRQ_CONTEXT:
default: default:
hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source].head; hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source];
INIT_WORK(&handler_data->work, dm_irq_work_func);
break; break;
} }
...@@ -372,7 +364,7 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, ...@@ -372,7 +364,7 @@ void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev,
int amdgpu_dm_irq_init(struct amdgpu_device *adev) int amdgpu_dm_irq_init(struct amdgpu_device *adev)
{ {
int src; int src;
struct irq_list_head *lh; struct list_head *lh;
DRM_DEBUG_KMS("DM_IRQ\n"); DRM_DEBUG_KMS("DM_IRQ\n");
...@@ -381,9 +373,7 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev) ...@@ -381,9 +373,7 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
/* low context handler list init */ /* low context handler list init */
lh = &adev->dm.irq_handler_list_low_tab[src]; lh = &adev->dm.irq_handler_list_low_tab[src];
INIT_LIST_HEAD(&lh->head); INIT_LIST_HEAD(lh);
INIT_WORK(&lh->work, dm_irq_work_func);
/* high context handler init */ /* high context handler init */
INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]); INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]);
} }
...@@ -400,8 +390,11 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev) ...@@ -400,8 +390,11 @@ int amdgpu_dm_irq_init(struct amdgpu_device *adev)
void amdgpu_dm_irq_fini(struct amdgpu_device *adev) void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
{ {
int src; int src;
struct irq_list_head *lh; struct list_head *lh;
struct list_head *entry, *tmp;
struct amdgpu_dm_irq_handler_data *handler;
unsigned long irq_table_flags; unsigned long irq_table_flags;
DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n"); DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n");
for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) {
DM_IRQ_TABLE_LOCK(adev, irq_table_flags); DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
...@@ -410,7 +403,16 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev) ...@@ -410,7 +403,16 @@ void amdgpu_dm_irq_fini(struct amdgpu_device *adev)
* (because no code can schedule a new one). */ * (because no code can schedule a new one). */
lh = &adev->dm.irq_handler_list_low_tab[src]; lh = &adev->dm.irq_handler_list_low_tab[src];
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
flush_work(&lh->work);
if (!list_empty(lh)) {
list_for_each_safe(entry, tmp, lh) {
handler = list_entry(
entry,
struct amdgpu_dm_irq_handler_data,
list);
flush_work(&handler->work);
}
}
} }
} }
...@@ -420,6 +422,8 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev) ...@@ -420,6 +422,8 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
struct list_head *hnd_list_h; struct list_head *hnd_list_h;
struct list_head *hnd_list_l; struct list_head *hnd_list_l;
unsigned long irq_table_flags; unsigned long irq_table_flags;
struct list_head *entry, *tmp;
struct amdgpu_dm_irq_handler_data *handler;
DM_IRQ_TABLE_LOCK(adev, irq_table_flags); DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
...@@ -430,14 +434,22 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev) ...@@ -430,14 +434,22 @@ int amdgpu_dm_irq_suspend(struct amdgpu_device *adev)
* will be disabled from manage_dm_interrupts on disable CRTC. * will be disabled from manage_dm_interrupts on disable CRTC.
*/ */
for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) { for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
dc_interrupt_set(adev->dm.dc, src, false); dc_interrupt_set(adev->dm.dc, src, false);
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags);
flush_work(&adev->dm.irq_handler_list_low_tab[src].work);
if (!list_empty(hnd_list_l)) {
list_for_each_safe (entry, tmp, hnd_list_l) {
handler = list_entry(
entry,
struct amdgpu_dm_irq_handler_data,
list);
flush_work(&handler->work);
}
}
DM_IRQ_TABLE_LOCK(adev, irq_table_flags); DM_IRQ_TABLE_LOCK(adev, irq_table_flags);
} }
...@@ -457,7 +469,7 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) ...@@ -457,7 +469,7 @@ int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev)
/* re-enable short pulse interrupts HW interrupt */ /* re-enable short pulse interrupts HW interrupt */
for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) { for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) {
hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
dc_interrupt_set(adev->dm.dc, src, true); dc_interrupt_set(adev->dm.dc, src, true);
...@@ -483,7 +495,7 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) ...@@ -483,7 +495,7 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
* will be enabled from manage_dm_interrupts on enable CRTC. * will be enabled from manage_dm_interrupts on enable CRTC.
*/ */
for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) { for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) {
hnd_list_l = &adev->dm.irq_handler_list_low_tab[src].head; hnd_list_l = &adev->dm.irq_handler_list_low_tab[src];
hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; hnd_list_h = &adev->dm.irq_handler_list_high_tab[src];
if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h))
dc_interrupt_set(adev->dm.dc, src, true); dc_interrupt_set(adev->dm.dc, src, true);
...@@ -500,22 +512,53 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) ...@@ -500,22 +512,53 @@ int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev)
static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev, static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev,
enum dc_irq_source irq_source) enum dc_irq_source irq_source)
{ {
unsigned long irq_table_flags; struct list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source];
struct work_struct *work = NULL; struct amdgpu_dm_irq_handler_data *handler_data;
bool work_queued = false;
DM_IRQ_TABLE_LOCK(adev, irq_table_flags); if (list_empty(handler_list))
return;
list_for_each_entry (handler_data, handler_list, list) {
if (!queue_work(system_highpri_wq, &handler_data->work)) {
continue;
} else {
work_queued = true;
break;
}
}
if (!list_empty(&adev->dm.irq_handler_list_low_tab[irq_source].head)) if (!work_queued) {
work = &adev->dm.irq_handler_list_low_tab[irq_source].work; struct amdgpu_dm_irq_handler_data *handler_data_add;
/*get the amdgpu_dm_irq_handler_data of first item pointed by handler_list*/
handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list);
DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); /*allocate a new amdgpu_dm_irq_handler_data*/
handler_data_add = kzalloc(sizeof(*handler_data), GFP_KERNEL);
if (!handler_data_add) {
DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n");
return;
}
if (work) { /*copy new amdgpu_dm_irq_handler_data members from handler_data*/
if (!schedule_work(work)) handler_data_add->handler = handler_data->handler;
DRM_INFO("amdgpu_dm_irq_schedule_work FAILED src %d\n", handler_data_add->handler_arg = handler_data->handler_arg;
irq_source); handler_data_add->dm = handler_data->dm;
} handler_data_add->irq_source = irq_source;
list_add_tail(&handler_data_add->list, handler_list);
INIT_WORK(&handler_data_add->work, dm_irq_work_func);
if (queue_work(system_highpri_wq, &handler_data_add->work))
DRM_DEBUG("Queued work for handling interrupt from "
"display for IRQ source %d\n",
irq_source);
else
DRM_ERROR("Failed to queue work for handling interrupt "
"from display for IRQ source %d\n",
irq_source);
}
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment