Commit 52ad9a8e authored by Wolfram Sang's avatar Wolfram Sang Committed by Ulf Hansson

mmc: tmio: ensure end of DMA and SD access are in sync

The current code assumes that DMA is finished before SD access end is
flagged. Thus, it schedules the 'dma_complete' tasklet in the SD card
interrupt routine when DATAEND is set. The assumption is not safe,
though. Even by mounting an SD card, it can be seen that sometimes DMA
complete is first, sometimes DATAEND. It seems they are usually close
enough timewise to not cause problems. However, a customer reported that
with CMD53 sometimes things really break apart. As a result, the BSP has
a patch which introduces flags for both events and makes sure both flags
are set before scheduling the tasklet. The customer accepted the patch,
yet it doesn't seem a proper upstream solution to me.

This patch refactors the code to replace the tasklet with already
existing and more lightweight mechanisms. First of all, we set the
callback in a DMA descriptor to automatically get notified when DMA is
done. In the callback, we then use a completion to make sure the SD
access has already ended. Then, we proceed as before.
Signed-off-by: default avatarWolfram Sang <wsa+renesas@sang-engineering.com>
Signed-off-by: default avatarUlf Hansson <ulf.hansson@linaro.org>
parent 21b22284
...@@ -137,7 +137,7 @@ struct tmio_mmc_host { ...@@ -137,7 +137,7 @@ struct tmio_mmc_host {
bool force_pio; bool force_pio;
struct dma_chan *chan_rx; struct dma_chan *chan_rx;
struct dma_chan *chan_tx; struct dma_chan *chan_tx;
struct tasklet_struct dma_complete; struct completion dma_dataend;
struct tasklet_struct dma_issue; struct tasklet_struct dma_issue;
struct scatterlist bounce_sg; struct scatterlist bounce_sg;
u8 *bounce_buf; u8 *bounce_buf;
......
...@@ -43,6 +43,31 @@ void tmio_mmc_abort_dma(struct tmio_mmc_host *host) ...@@ -43,6 +43,31 @@ void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
tmio_mmc_enable_dma(host, true); tmio_mmc_enable_dma(host, true);
} }
static void tmio_mmc_dma_callback(void *arg)
{
struct tmio_mmc_host *host = arg;
wait_for_completion(&host->dma_dataend);
spin_lock_irq(&host->lock);
if (!host->data)
goto out;
if (host->data->flags & MMC_DATA_READ)
dma_unmap_sg(host->chan_rx->device->dev,
host->sg_ptr, host->sg_len,
DMA_FROM_DEVICE);
else
dma_unmap_sg(host->chan_tx->device->dev,
host->sg_ptr, host->sg_len,
DMA_TO_DEVICE);
tmio_mmc_do_data_irq(host);
out:
spin_unlock_irq(&host->lock);
}
static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
{ {
struct scatterlist *sg = host->sg_ptr, *sg_tmp; struct scatterlist *sg = host->sg_ptr, *sg_tmp;
...@@ -88,6 +113,10 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) ...@@ -88,6 +113,10 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
DMA_DEV_TO_MEM, DMA_CTRL_ACK); DMA_DEV_TO_MEM, DMA_CTRL_ACK);
if (desc) { if (desc) {
reinit_completion(&host->dma_dataend);
desc->callback = tmio_mmc_dma_callback;
desc->callback_param = host;
cookie = dmaengine_submit(desc); cookie = dmaengine_submit(desc);
if (cookie < 0) { if (cookie < 0) {
desc = NULL; desc = NULL;
...@@ -162,6 +191,10 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) ...@@ -162,6 +191,10 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
DMA_MEM_TO_DEV, DMA_CTRL_ACK); DMA_MEM_TO_DEV, DMA_CTRL_ACK);
if (desc) { if (desc) {
reinit_completion(&host->dma_dataend);
desc->callback = tmio_mmc_dma_callback;
desc->callback_param = host;
cookie = dmaengine_submit(desc); cookie = dmaengine_submit(desc);
if (cookie < 0) { if (cookie < 0) {
desc = NULL; desc = NULL;
...@@ -221,29 +254,6 @@ static void tmio_mmc_issue_tasklet_fn(unsigned long priv) ...@@ -221,29 +254,6 @@ static void tmio_mmc_issue_tasklet_fn(unsigned long priv)
dma_async_issue_pending(chan); dma_async_issue_pending(chan);
} }
static void tmio_mmc_tasklet_fn(unsigned long arg)
{
struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
spin_lock_irq(&host->lock);
if (!host->data)
goto out;
if (host->data->flags & MMC_DATA_READ)
dma_unmap_sg(host->chan_rx->device->dev,
host->sg_ptr, host->sg_len,
DMA_FROM_DEVICE);
else
dma_unmap_sg(host->chan_tx->device->dev,
host->sg_ptr, host->sg_len,
DMA_TO_DEVICE);
tmio_mmc_do_data_irq(host);
out:
spin_unlock_irq(&host->lock);
}
void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata)
{ {
/* We can only either use DMA for both Tx and Rx or not use it at all */ /* We can only either use DMA for both Tx and Rx or not use it at all */
...@@ -306,7 +316,7 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat ...@@ -306,7 +316,7 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat
if (!host->bounce_buf) if (!host->bounce_buf)
goto ebouncebuf; goto ebouncebuf;
tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); init_completion(&host->dma_dataend);
tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host);
} }
......
...@@ -596,11 +596,11 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat) ...@@ -596,11 +596,11 @@ static void tmio_mmc_data_irq(struct tmio_mmc_host *host, unsigned int stat)
if (done) { if (done) {
tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
tasklet_schedule(&host->dma_complete); complete(&host->dma_dataend);
} }
} else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) { } else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) {
tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND);
tasklet_schedule(&host->dma_complete); complete(&host->dma_dataend);
} else { } else {
tmio_mmc_do_data_irq(host); tmio_mmc_do_data_irq(host);
tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP); tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment