Commit 2ff25c1c authored by Jean-Nicolas Graux's avatar Jean-Nicolas Graux Committed by Vinod Koul

dmaengine: pl08x: be fair when re-assigning physical channel

Current way we find a waiting virtual channel for the next transfer
at the time one physical channel becomes free is not really fair.

More in details, in case there is more than one channel waiting at a time,
by just going through the arrays of memcpy and slave channels and stopping
as soon as state match waiting state, channels with high indexes can be
penalized.

Whenever dma engine is substantially overloaded so that we constantly
get several channels waiting, channels with highest indexes might not
be served for a substantial time which in the worse case, might hang
task that wait for dma transfer to complete.

This patch makes physical channel re-assignment more fair by storing
time in jiffies when a channel is put in waiting state. Whenever a
physical channel has to be re-assigned, this time is used to select
channel that is waiting for the longest time.
Signed-off-by: default avatarJean-Nicolas Graux <jean-nicolas.graux@st.com>
Reviewed-by: default avatarLinus Walleij <linus.walleij@linaro.org>
Reviewed-by: default avatarNicolas Guion <nicolas.guion@st.com>
Signed-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent 921234e0
...@@ -254,6 +254,7 @@ enum pl08x_dma_chan_state { ...@@ -254,6 +254,7 @@ enum pl08x_dma_chan_state {
* @slave: whether this channel is a device (slave) or for memcpy * @slave: whether this channel is a device (slave) or for memcpy
* @signal: the physical DMA request signal which this channel is using * @signal: the physical DMA request signal which this channel is using
* @mux_use: count of descriptors using this DMA request signal setting * @mux_use: count of descriptors using this DMA request signal setting
* @waiting_at: time in jiffies when this channel moved to waiting state
*/ */
struct pl08x_dma_chan { struct pl08x_dma_chan {
struct virt_dma_chan vc; struct virt_dma_chan vc;
...@@ -267,6 +268,7 @@ struct pl08x_dma_chan { ...@@ -267,6 +268,7 @@ struct pl08x_dma_chan {
bool slave; bool slave;
int signal; int signal;
unsigned mux_use; unsigned mux_use;
unsigned long waiting_at;
}; };
/** /**
...@@ -875,6 +877,7 @@ static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) ...@@ -875,6 +877,7 @@ static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan)
if (!ch) { if (!ch) {
dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
plchan->state = PL08X_CHAN_WAITING; plchan->state = PL08X_CHAN_WAITING;
plchan->waiting_at = jiffies;
return; return;
} }
...@@ -913,22 +916,29 @@ static void pl08x_phy_free(struct pl08x_dma_chan *plchan) ...@@ -913,22 +916,29 @@ static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
{ {
struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_dma_chan *p, *next; struct pl08x_dma_chan *p, *next;
unsigned long waiting_at;
retry: retry:
next = NULL; next = NULL;
waiting_at = jiffies;
/* Find a waiting virtual channel for the next transfer. */ /*
* Find a waiting virtual channel for the next transfer.
* To be fair, time when each channel reached waiting state is compared
* to select channel that is waiting for the longest time.
*/
list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node) list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node)
if (p->state == PL08X_CHAN_WAITING) { if (p->state == PL08X_CHAN_WAITING &&
p->waiting_at <= waiting_at) {
next = p; next = p;
break; waiting_at = p->waiting_at;
} }
if (!next && pl08x->has_slave) { if (!next && pl08x->has_slave) {
list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node) list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
if (p->state == PL08X_CHAN_WAITING) { if (p->state == PL08X_CHAN_WAITING &&
p->waiting_at <= waiting_at) {
next = p; next = p;
break; waiting_at = p->waiting_at;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment