Commit 6f4c739d authored by Thomas Petazzoni's avatar Thomas Petazzoni Committed by Greg Kroah-Hartman

dmaengine: mv_xor_v2: fix tx_submit() implementation

commit 44d5887a upstream.

The mv_xor_v2_tx_submit() gets the next available HW descriptor by
calling mv_xor_v2_get_desq_write_ptr(), which reads a HW register
telling the next available HW descriptor. This was working fine when HW
descriptors were issued for processing directly in tx_submit().

However, as part of the review process of the driver, a change was
requested to move the actual kick-off of HW descriptors processing to
->issue_pending(). Due to this, reading the HW register to know the next
available HW descriptor no longer works.

So instead of using this HW register, we implemented a software index
pointing to the next available HW descriptor.

Fixes: 19a340b1 ("dmaengine: mv_xor_v2: new driver")
Signed-off-by: default avatarThomas Petazzoni <thomas.petazzoni@free-electrons.com>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent a4634dfb
......@@ -161,6 +161,7 @@ struct mv_xor_v2_device {
struct mv_xor_v2_sw_desc *sw_desq;
int desc_size;
unsigned int npendings;
unsigned int hw_queue_idx;
};
/**
......@@ -213,18 +214,6 @@ static void mv_xor_v2_set_data_buffers(struct mv_xor_v2_device *xor_dev,
}
}
/*
* Return the next available index in the DESQ.
*/
static int mv_xor_v2_get_desq_write_ptr(struct mv_xor_v2_device *xor_dev)
{
/* read the index for the next available descriptor in the DESQ */
u32 reg = readl(xor_dev->dma_base + MV_XOR_V2_DMA_DESQ_ALLOC_OFF);
return ((reg >> MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_SHIFT)
& MV_XOR_V2_DMA_DESQ_ALLOC_WRPTR_MASK);
}
/*
* notify the engine of new descriptors, and update the available index.
*/
......@@ -306,7 +295,6 @@ static irqreturn_t mv_xor_v2_interrupt_handler(int irq, void *data)
static dma_cookie_t
mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
{
int desq_ptr;
void *dest_hw_desc;
dma_cookie_t cookie;
struct mv_xor_v2_sw_desc *sw_desc =
......@@ -322,15 +310,15 @@ mv_xor_v2_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_bh(&xor_dev->lock);
cookie = dma_cookie_assign(tx);
/* get the next available slot in the DESQ */
desq_ptr = mv_xor_v2_get_desq_write_ptr(xor_dev);
/* copy the HW descriptor from the SW descriptor to the DESQ */
dest_hw_desc = xor_dev->hw_desq_virt + desq_ptr;
dest_hw_desc = xor_dev->hw_desq_virt + xor_dev->hw_queue_idx;
memcpy(dest_hw_desc, &sw_desc->hw_desc, xor_dev->desc_size);
xor_dev->npendings++;
xor_dev->hw_queue_idx++;
if (xor_dev->hw_queue_idx >= MV_XOR_V2_DESC_NUM)
xor_dev->hw_queue_idx = 0;
spin_unlock_bh(&xor_dev->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment