Commit 90ca4912 authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Greg Kroah-Hartman

IB/hfi1: Avoid hardlockup with flushlist_lock

[ Upstream commit cf131a81 ]

Heavy contention of the sde flushlist_lock can cause hard lockups at
extreme scale when the flushing logic is under stress.

Mitigate by replacing the item at a time copy to the local list with
an O(1) list_splice_init() and using the high priority work queue to
do the flushes.

Fixes: 77241056 ("IB/hfi1: add driver files")
Cc: <stable@vger.kernel.org>
Reviewed-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent fa717fc4
...@@ -410,10 +410,7 @@ static void sdma_flush(struct sdma_engine *sde) ...@@ -410,10 +410,7 @@ static void sdma_flush(struct sdma_engine *sde)
sdma_flush_descq(sde); sdma_flush_descq(sde);
spin_lock_irqsave(&sde->flushlist_lock, flags); spin_lock_irqsave(&sde->flushlist_lock, flags);
/* copy flush list */ /* copy flush list */
list_for_each_entry_safe(txp, txp_next, &sde->flushlist, list) { list_splice_init(&sde->flushlist, &flushlist);
list_del_init(&txp->list);
list_add_tail(&txp->list, &flushlist);
}
spin_unlock_irqrestore(&sde->flushlist_lock, flags); spin_unlock_irqrestore(&sde->flushlist_lock, flags);
/* flush from flush list */ /* flush from flush list */
list_for_each_entry_safe(txp, txp_next, &flushlist, list) list_for_each_entry_safe(txp, txp_next, &flushlist, list)
...@@ -2426,7 +2423,7 @@ int sdma_send_txreq(struct sdma_engine *sde, ...@@ -2426,7 +2423,7 @@ int sdma_send_txreq(struct sdma_engine *sde,
wait->tx_count++; wait->tx_count++;
wait->count += tx->num_desc; wait->count += tx->num_desc;
} }
schedule_work(&sde->flush_worker); queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
ret = -ECOMM; ret = -ECOMM;
goto unlock; goto unlock;
nodesc: nodesc:
...@@ -2526,7 +2523,7 @@ int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait, ...@@ -2526,7 +2523,7 @@ int sdma_send_txlist(struct sdma_engine *sde, struct iowait *wait,
} }
} }
spin_unlock(&sde->flushlist_lock); spin_unlock(&sde->flushlist_lock);
schedule_work(&sde->flush_worker); queue_work_on(sde->cpu, system_highpri_wq, &sde->flush_worker);
ret = -ECOMM; ret = -ECOMM;
goto update_tail; goto update_tail;
nodesc: nodesc:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment