Commit 212c7f66 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-fix-4.4' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine fixes from Vinod Koul:
 "Late fixes for 4.4 are three fixes for drivers which include a revert
  of mic-x100 fix which is causing regression, xgene fix for double IRQ
  and async_tx fix to use GFP_NOWAIT"

* tag 'dmaengine-fix-4.4' of git://git.infradead.org/users/vkoul/slave-dma:
  dmaengine: xgene-dma: Fix double IRQ issue by setting IRQ_DISABLE_UNLAZY flag
  async_tx: use GFP_NOWAIT rather than GFP_IO
  dmaengine: Revert "dmaengine: mic_x100: add missing spin_unlock"
parents 436950a6 b0b79024
...@@ -53,7 +53,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset, ...@@ -53,7 +53,7 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
struct dmaengine_unmap_data *unmap = NULL; struct dmaengine_unmap_data *unmap = NULL;
if (device) if (device)
unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOIO); unmap = dmaengine_get_unmap_data(device->dev, 2, GFP_NOWAIT);
if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) { if (unmap && is_dma_copy_aligned(device, src_offset, dest_offset, len)) {
unsigned long dma_prep_flags = 0; unsigned long dma_prep_flags = 0;
......
...@@ -188,7 +188,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks, ...@@ -188,7 +188,7 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks))); BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
if (device) if (device)
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
/* XORing P/Q is only implemented in software */ /* XORing P/Q is only implemented in software */
if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) && if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) &&
...@@ -307,7 +307,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks, ...@@ -307,7 +307,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
BUG_ON(disks < 4); BUG_ON(disks < 4);
if (device) if (device)
unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOIO); unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT);
if (unmap && disks <= dma_maxpq(device, 0) && if (unmap && disks <= dma_maxpq(device, 0) &&
is_dma_pq_aligned(device, offset, 0, len)) { is_dma_pq_aligned(device, offset, 0, len)) {
......
...@@ -41,7 +41,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef, ...@@ -41,7 +41,7 @@ async_sum_product(struct page *dest, struct page **srcs, unsigned char *coef,
u8 *a, *b, *c; u8 *a, *b, *c;
if (dma) if (dma)
unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
if (unmap) { if (unmap) {
struct device *dev = dma->dev; struct device *dev = dma->dev;
...@@ -105,7 +105,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len, ...@@ -105,7 +105,7 @@ async_mult(struct page *dest, struct page *src, u8 coef, size_t len,
u8 *d, *s; u8 *d, *s;
if (dma) if (dma)
unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOIO); unmap = dmaengine_get_unmap_data(dma->dev, 3, GFP_NOWAIT);
if (unmap) { if (unmap) {
dma_addr_t dma_dest[2]; dma_addr_t dma_dest[2];
......
...@@ -182,7 +182,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset, ...@@ -182,7 +182,7 @@ async_xor(struct page *dest, struct page **src_list, unsigned int offset,
BUG_ON(src_cnt <= 1); BUG_ON(src_cnt <= 1);
if (device) if (device)
unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOIO); unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT);
if (unmap && is_dma_xor_aligned(device, offset, 0, len)) { if (unmap && is_dma_xor_aligned(device, offset, 0, len)) {
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
...@@ -278,7 +278,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, ...@@ -278,7 +278,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
BUG_ON(src_cnt <= 1); BUG_ON(src_cnt <= 1);
if (device) if (device)
unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOIO); unmap = dmaengine_get_unmap_data(device->dev, src_cnt, GFP_NOWAIT);
if (unmap && src_cnt <= device->max_xor && if (unmap && src_cnt <= device->max_xor &&
is_dma_xor_aligned(device, offset, 0, len)) { is_dma_xor_aligned(device, offset, 0, len)) {
......
...@@ -317,7 +317,6 @@ mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest, ...@@ -317,7 +317,6 @@ mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest,
struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
struct device *dev = mic_dma_ch_to_device(mic_ch); struct device *dev = mic_dma_ch_to_device(mic_ch);
int result; int result;
struct dma_async_tx_descriptor *tx = NULL;
if (!len && !flags) if (!len && !flags)
return NULL; return NULL;
...@@ -325,13 +324,10 @@ mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest, ...@@ -325,13 +324,10 @@ mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest,
spin_lock(&mic_ch->prep_lock); spin_lock(&mic_ch->prep_lock);
result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len); result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len);
if (result >= 0) if (result >= 0)
tx = allocate_tx(mic_ch); return allocate_tx(mic_ch);
dev_err(dev, "Error enqueueing dma, error=%d\n", result);
if (!tx)
dev_err(dev, "Error enqueueing dma, error=%d\n", result);
spin_unlock(&mic_ch->prep_lock); spin_unlock(&mic_ch->prep_lock);
return tx; return NULL;
} }
static struct dma_async_tx_descriptor * static struct dma_async_tx_descriptor *
...@@ -339,14 +335,13 @@ mic_dma_prep_interrupt_lock(struct dma_chan *ch, unsigned long flags) ...@@ -339,14 +335,13 @@ mic_dma_prep_interrupt_lock(struct dma_chan *ch, unsigned long flags)
{ {
struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
int ret; int ret;
struct dma_async_tx_descriptor *tx = NULL;
spin_lock(&mic_ch->prep_lock); spin_lock(&mic_ch->prep_lock);
ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0); ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0);
if (!ret) if (!ret)
tx = allocate_tx(mic_ch); return allocate_tx(mic_ch);
spin_unlock(&mic_ch->prep_lock); spin_unlock(&mic_ch->prep_lock);
return tx; return NULL;
} }
/* Return the status of the transaction */ /* Return the status of the transaction */
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/dmapool.h> #include <linux/dmapool.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/irq.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of_device.h> #include <linux/of_device.h>
...@@ -1610,6 +1611,7 @@ static int xgene_dma_request_irqs(struct xgene_dma *pdma) ...@@ -1610,6 +1611,7 @@ static int xgene_dma_request_irqs(struct xgene_dma *pdma)
/* Register DMA channel rx irq */ /* Register DMA channel rx irq */
for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
chan = &pdma->chan[i]; chan = &pdma->chan[i];
irq_set_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY);
ret = devm_request_irq(chan->dev, chan->rx_irq, ret = devm_request_irq(chan->dev, chan->rx_irq,
xgene_dma_chan_ring_isr, xgene_dma_chan_ring_isr,
0, chan->name, chan); 0, chan->name, chan);
...@@ -1620,6 +1622,7 @@ static int xgene_dma_request_irqs(struct xgene_dma *pdma) ...@@ -1620,6 +1622,7 @@ static int xgene_dma_request_irqs(struct xgene_dma *pdma)
for (j = 0; j < i; j++) { for (j = 0; j < i; j++) {
chan = &pdma->chan[i]; chan = &pdma->chan[i];
irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY);
devm_free_irq(chan->dev, chan->rx_irq, chan); devm_free_irq(chan->dev, chan->rx_irq, chan);
} }
...@@ -1640,6 +1643,7 @@ static void xgene_dma_free_irqs(struct xgene_dma *pdma) ...@@ -1640,6 +1643,7 @@ static void xgene_dma_free_irqs(struct xgene_dma *pdma)
for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) {
chan = &pdma->chan[i]; chan = &pdma->chan[i];
irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY);
devm_free_irq(chan->dev, chan->rx_irq, chan); devm_free_irq(chan->dev, chan->rx_irq, chan);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment