Commit 03734485 authored by Andy Shevchenko's avatar Andy Shevchenko Committed by Vinod Koul

dmaengine: hsu: remove excessive lock

All hardware accesses are done under virtual channel lock. That's why specific
channel lock is excessive and can be removed safely. This has been tested on
Intel Medfield and Merrifield.
Signed-off-by: default avatarAndy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent b6c52c63
...@@ -99,21 +99,13 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc) ...@@ -99,21 +99,13 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc) static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc)
{ {
unsigned long flags;
spin_lock_irqsave(&hsuc->lock, flags);
hsu_chan_disable(hsuc); hsu_chan_disable(hsuc);
hsu_chan_writel(hsuc, HSU_CH_DCR, 0); hsu_chan_writel(hsuc, HSU_CH_DCR, 0);
spin_unlock_irqrestore(&hsuc->lock, flags);
} }
static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc) static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc)
{ {
unsigned long flags;
spin_lock_irqsave(&hsuc->lock, flags);
hsu_dma_chan_start(hsuc); hsu_dma_chan_start(hsuc);
spin_unlock_irqrestore(&hsuc->lock, flags);
} }
static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc) static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc)
...@@ -139,9 +131,9 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc) ...@@ -139,9 +131,9 @@ static u32 hsu_dma_chan_get_sr(struct hsu_dma_chan *hsuc)
unsigned long flags; unsigned long flags;
u32 sr; u32 sr;
spin_lock_irqsave(&hsuc->lock, flags); spin_lock_irqsave(&hsuc->vchan.lock, flags);
sr = hsu_chan_readl(hsuc, HSU_CH_SR); sr = hsu_chan_readl(hsuc, HSU_CH_SR);
spin_unlock_irqrestore(&hsuc->lock, flags); spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
return sr; return sr;
} }
...@@ -273,14 +265,11 @@ static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc) ...@@ -273,14 +265,11 @@ static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc)
struct hsu_dma_desc *desc = hsuc->desc; struct hsu_dma_desc *desc = hsuc->desc;
size_t bytes = hsu_dma_desc_size(desc); size_t bytes = hsu_dma_desc_size(desc);
int i; int i;
unsigned long flags;
spin_lock_irqsave(&hsuc->lock, flags);
i = desc->active % HSU_DMA_CHAN_NR_DESC; i = desc->active % HSU_DMA_CHAN_NR_DESC;
do { do {
bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i)); bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i));
} while (--i >= 0); } while (--i >= 0);
spin_unlock_irqrestore(&hsuc->lock, flags);
return bytes; return bytes;
} }
...@@ -327,24 +316,6 @@ static int hsu_dma_slave_config(struct dma_chan *chan, ...@@ -327,24 +316,6 @@ static int hsu_dma_slave_config(struct dma_chan *chan,
return 0; return 0;
} }
static void hsu_dma_chan_deactivate(struct hsu_dma_chan *hsuc)
{
unsigned long flags;
spin_lock_irqsave(&hsuc->lock, flags);
hsu_chan_disable(hsuc);
spin_unlock_irqrestore(&hsuc->lock, flags);
}
static void hsu_dma_chan_activate(struct hsu_dma_chan *hsuc)
{
unsigned long flags;
spin_lock_irqsave(&hsuc->lock, flags);
hsu_chan_enable(hsuc);
spin_unlock_irqrestore(&hsuc->lock, flags);
}
static int hsu_dma_pause(struct dma_chan *chan) static int hsu_dma_pause(struct dma_chan *chan)
{ {
struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan);
...@@ -352,7 +323,7 @@ static int hsu_dma_pause(struct dma_chan *chan) ...@@ -352,7 +323,7 @@ static int hsu_dma_pause(struct dma_chan *chan)
spin_lock_irqsave(&hsuc->vchan.lock, flags); spin_lock_irqsave(&hsuc->vchan.lock, flags);
if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) { if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) {
hsu_dma_chan_deactivate(hsuc); hsu_chan_disable(hsuc);
hsuc->desc->status = DMA_PAUSED; hsuc->desc->status = DMA_PAUSED;
} }
spin_unlock_irqrestore(&hsuc->vchan.lock, flags); spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
...@@ -368,7 +339,7 @@ static int hsu_dma_resume(struct dma_chan *chan) ...@@ -368,7 +339,7 @@ static int hsu_dma_resume(struct dma_chan *chan)
spin_lock_irqsave(&hsuc->vchan.lock, flags); spin_lock_irqsave(&hsuc->vchan.lock, flags);
if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) { if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) {
hsuc->desc->status = DMA_IN_PROGRESS; hsuc->desc->status = DMA_IN_PROGRESS;
hsu_dma_chan_activate(hsuc); hsu_chan_enable(hsuc);
} }
spin_unlock_irqrestore(&hsuc->vchan.lock, flags); spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
...@@ -441,8 +412,6 @@ int hsu_dma_probe(struct hsu_dma_chip *chip) ...@@ -441,8 +412,6 @@ int hsu_dma_probe(struct hsu_dma_chip *chip)
hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH; hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH;
spin_lock_init(&hsuc->lock);
} }
dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask); dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask);
......
...@@ -78,7 +78,6 @@ struct hsu_dma_chan { ...@@ -78,7 +78,6 @@ struct hsu_dma_chan {
struct virt_dma_chan vchan; struct virt_dma_chan vchan;
void __iomem *reg; void __iomem *reg;
spinlock_t lock;
/* hardware configuration */ /* hardware configuration */
enum dma_transfer_direction direction; enum dma_transfer_direction direction;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment