Commit 216f0efd authored by Bob Peterson's avatar Bob Peterson Committed by David Teigland

dlm: Don't swamp the CPU with callbacks queued during recovery

Before this patch, recovery would cause all callbacks to be delayed,
put on a queue, and afterward they were all queued to the callback
work queue. This patch does the same thing, but occasionally takes
a break after 25 of them so it won't swamp the CPU at the expense
of other RT processes like corosync.
Signed-off-by: default avatarBob Peterson <rpeterso@redhat.com>
Signed-off-by: default avatarDavid Teigland <teigland@redhat.com>
parent 9de30f3f
...@@ -292,6 +292,8 @@ void dlm_callback_suspend(struct dlm_ls *ls) ...@@ -292,6 +292,8 @@ void dlm_callback_suspend(struct dlm_ls *ls)
flush_workqueue(ls->ls_callback_wq); flush_workqueue(ls->ls_callback_wq);
} }
#define MAX_CB_QUEUE 25
void dlm_callback_resume(struct dlm_ls *ls) void dlm_callback_resume(struct dlm_ls *ls)
{ {
struct dlm_lkb *lkb, *safe; struct dlm_lkb *lkb, *safe;
...@@ -302,15 +304,23 @@ void dlm_callback_resume(struct dlm_ls *ls) ...@@ -302,15 +304,23 @@ void dlm_callback_resume(struct dlm_ls *ls)
if (!ls->ls_callback_wq) if (!ls->ls_callback_wq)
return; return;
more:
mutex_lock(&ls->ls_cb_mutex); mutex_lock(&ls->ls_cb_mutex);
list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) { list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
list_del_init(&lkb->lkb_cb_list); list_del_init(&lkb->lkb_cb_list);
queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work); queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
count++; count++;
if (count == MAX_CB_QUEUE)
break;
} }
mutex_unlock(&ls->ls_cb_mutex); mutex_unlock(&ls->ls_cb_mutex);
if (count) if (count)
log_rinfo(ls, "dlm_callback_resume %d", count); log_rinfo(ls, "dlm_callback_resume %d", count);
if (count == MAX_CB_QUEUE) {
count = 0;
cond_resched();
goto more;
}
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment