Commit 94fb175c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine

Pull dmaengine fixes from Dan Williams:

1/ regression fix for Xen as it now trips over a broken assumption
   about the dma address size on 32-bit builds

2/ new quirk for netdma to ignore dma channels that cannot meet
   netdma alignment requirements

3/ fixes for two long standing issues in ioatdma (ring size overflow)
   and iop-adma (potential stack corruption)

* tag 'dmaengine-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/dmaengine:
  netdma: adding alignment check for NETDMA ops
  ioatdma: DMA copy alignment needed to address IOAT DMA silicon errata
  ioat: ring size variables need to be 32bit to avoid overflow
  iop-adma: Corrected array overflow in RAID6 Xscale(R) test.
  ioat: fix size of 'completion' for Xen
parents a9e1e53b a2bd1140
...@@ -332,6 +332,20 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) ...@@ -332,6 +332,20 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
} }
EXPORT_SYMBOL(dma_find_channel); EXPORT_SYMBOL(dma_find_channel);
/*
* net_dma_find_channel - find a channel for net_dma
* net_dma has alignment requirements
*/
struct dma_chan *net_dma_find_channel(void)
{
struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
return NULL;
return chan;
}
EXPORT_SYMBOL(net_dma_find_channel);
/** /**
* dma_issue_pending_all - flush all pending operations across all channels * dma_issue_pending_all - flush all pending operations across all channels
*/ */
......
...@@ -546,9 +546,9 @@ void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, ...@@ -546,9 +546,9 @@ void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
PCI_DMA_TODEVICE, flags, 0); PCI_DMA_TODEVICE, flags, 0);
} }
unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
{ {
unsigned long phys_complete; dma_addr_t phys_complete;
u64 completion; u64 completion;
completion = *chan->completion; completion = *chan->completion;
...@@ -569,7 +569,7 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) ...@@ -569,7 +569,7 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
} }
bool ioat_cleanup_preamble(struct ioat_chan_common *chan, bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
unsigned long *phys_complete) dma_addr_t *phys_complete)
{ {
*phys_complete = ioat_get_current_completion(chan); *phys_complete = ioat_get_current_completion(chan);
if (*phys_complete == chan->last_completion) if (*phys_complete == chan->last_completion)
...@@ -580,14 +580,14 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan, ...@@ -580,14 +580,14 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
return true; return true;
} }
static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete) static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
{ {
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
struct list_head *_desc, *n; struct list_head *_desc, *n;
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n", dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n",
__func__, phys_complete); __func__, (unsigned long long) phys_complete);
list_for_each_safe(_desc, n, &ioat->used_desc) { list_for_each_safe(_desc, n, &ioat->used_desc) {
struct ioat_desc_sw *desc; struct ioat_desc_sw *desc;
...@@ -652,7 +652,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete) ...@@ -652,7 +652,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
static void ioat1_cleanup(struct ioat_dma_chan *ioat) static void ioat1_cleanup(struct ioat_dma_chan *ioat)
{ {
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete; dma_addr_t phys_complete;
prefetch(chan->completion); prefetch(chan->completion);
...@@ -698,7 +698,7 @@ static void ioat1_timer_event(unsigned long data) ...@@ -698,7 +698,7 @@ static void ioat1_timer_event(unsigned long data)
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
spin_unlock_bh(&ioat->desc_lock); spin_unlock_bh(&ioat->desc_lock);
} else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
unsigned long phys_complete; dma_addr_t phys_complete;
spin_lock_bh(&ioat->desc_lock); spin_lock_bh(&ioat->desc_lock);
/* if we haven't made progress and we have already /* if we haven't made progress and we have already
......
...@@ -88,7 +88,7 @@ struct ioatdma_device { ...@@ -88,7 +88,7 @@ struct ioatdma_device {
struct ioat_chan_common { struct ioat_chan_common {
struct dma_chan common; struct dma_chan common;
void __iomem *reg_base; void __iomem *reg_base;
unsigned long last_completion; dma_addr_t last_completion;
spinlock_t cleanup_lock; spinlock_t cleanup_lock;
unsigned long state; unsigned long state;
#define IOAT_COMPLETION_PENDING 0 #define IOAT_COMPLETION_PENDING 0
...@@ -310,7 +310,7 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device); ...@@ -310,7 +310,7 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device);
void __devexit ioat_dma_remove(struct ioatdma_device *device); void __devexit ioat_dma_remove(struct ioatdma_device *device);
struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
void __iomem *iobase); void __iomem *iobase);
unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan);
void ioat_init_channel(struct ioatdma_device *device, void ioat_init_channel(struct ioatdma_device *device,
struct ioat_chan_common *chan, int idx); struct ioat_chan_common *chan, int idx);
enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
...@@ -318,7 +318,7 @@ enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, ...@@ -318,7 +318,7 @@ enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
size_t len, struct ioat_dma_descriptor *hw); size_t len, struct ioat_dma_descriptor *hw);
bool ioat_cleanup_preamble(struct ioat_chan_common *chan, bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
unsigned long *phys_complete); dma_addr_t *phys_complete);
void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
void ioat_kobject_del(struct ioatdma_device *device); void ioat_kobject_del(struct ioatdma_device *device);
extern const struct sysfs_ops ioat_sysfs_ops; extern const struct sysfs_ops ioat_sysfs_ops;
......
...@@ -128,7 +128,7 @@ static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) ...@@ -128,7 +128,7 @@ static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
spin_unlock_bh(&ioat->prep_lock); spin_unlock_bh(&ioat->prep_lock);
} }
static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
{ {
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
...@@ -179,7 +179,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) ...@@ -179,7 +179,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
static void ioat2_cleanup(struct ioat2_dma_chan *ioat) static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
{ {
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete; dma_addr_t phys_complete;
spin_lock_bh(&chan->cleanup_lock); spin_lock_bh(&chan->cleanup_lock);
if (ioat_cleanup_preamble(chan, &phys_complete)) if (ioat_cleanup_preamble(chan, &phys_complete))
...@@ -260,7 +260,7 @@ int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo) ...@@ -260,7 +260,7 @@ int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
{ {
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete; dma_addr_t phys_complete;
ioat2_quiesce(chan, 0); ioat2_quiesce(chan, 0);
if (ioat_cleanup_preamble(chan, &phys_complete)) if (ioat_cleanup_preamble(chan, &phys_complete))
...@@ -275,7 +275,7 @@ void ioat2_timer_event(unsigned long data) ...@@ -275,7 +275,7 @@ void ioat2_timer_event(unsigned long data)
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
unsigned long phys_complete; dma_addr_t phys_complete;
u64 status; u64 status;
status = ioat_chansts(chan); status = ioat_chansts(chan);
...@@ -572,9 +572,9 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order) ...@@ -572,9 +572,9 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
*/ */
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
struct dma_chan *c = &chan->common; struct dma_chan *c = &chan->common;
const u16 curr_size = ioat2_ring_size(ioat); const u32 curr_size = ioat2_ring_size(ioat);
const u16 active = ioat2_ring_active(ioat); const u16 active = ioat2_ring_active(ioat);
const u16 new_size = 1 << order; const u32 new_size = 1 << order;
struct ioat_ring_ent **ring; struct ioat_ring_ent **ring;
u16 i; u16 i;
......
...@@ -74,7 +74,7 @@ static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c) ...@@ -74,7 +74,7 @@ static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
return container_of(chan, struct ioat2_dma_chan, base); return container_of(chan, struct ioat2_dma_chan, base);
} }
static inline u16 ioat2_ring_size(struct ioat2_dma_chan *ioat) static inline u32 ioat2_ring_size(struct ioat2_dma_chan *ioat)
{ {
return 1 << ioat->alloc_order; return 1 << ioat->alloc_order;
} }
...@@ -91,7 +91,7 @@ static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat) ...@@ -91,7 +91,7 @@ static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat)); return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat));
} }
static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat) static inline u32 ioat2_ring_space(struct ioat2_dma_chan *ioat)
{ {
return ioat2_ring_size(ioat) - ioat2_ring_active(ioat); return ioat2_ring_size(ioat) - ioat2_ring_active(ioat);
} }
......
...@@ -257,7 +257,7 @@ static bool desc_has_ext(struct ioat_ring_ent *desc) ...@@ -257,7 +257,7 @@ static bool desc_has_ext(struct ioat_ring_ent *desc)
* The difference from the dma_v2.c __cleanup() is that this routine * The difference from the dma_v2.c __cleanup() is that this routine
* handles extended descriptors and dma-unmapping raid operations. * handles extended descriptors and dma-unmapping raid operations.
*/ */
static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
{ {
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
struct ioat_ring_ent *desc; struct ioat_ring_ent *desc;
...@@ -314,7 +314,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) ...@@ -314,7 +314,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
static void ioat3_cleanup(struct ioat2_dma_chan *ioat) static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
{ {
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete; dma_addr_t phys_complete;
spin_lock_bh(&chan->cleanup_lock); spin_lock_bh(&chan->cleanup_lock);
if (ioat_cleanup_preamble(chan, &phys_complete)) if (ioat_cleanup_preamble(chan, &phys_complete))
...@@ -333,7 +333,7 @@ static void ioat3_cleanup_event(unsigned long data) ...@@ -333,7 +333,7 @@ static void ioat3_cleanup_event(unsigned long data)
static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
{ {
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete; dma_addr_t phys_complete;
ioat2_quiesce(chan, 0); ioat2_quiesce(chan, 0);
if (ioat_cleanup_preamble(chan, &phys_complete)) if (ioat_cleanup_preamble(chan, &phys_complete))
...@@ -348,7 +348,7 @@ static void ioat3_timer_event(unsigned long data) ...@@ -348,7 +348,7 @@ static void ioat3_timer_event(unsigned long data)
struct ioat_chan_common *chan = &ioat->base; struct ioat_chan_common *chan = &ioat->base;
if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
unsigned long phys_complete; dma_addr_t phys_complete;
u64 status; u64 status;
status = ioat_chansts(chan); status = ioat_chansts(chan);
...@@ -1149,6 +1149,44 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan) ...@@ -1149,6 +1149,44 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan)
return ioat2_reset_sync(chan, msecs_to_jiffies(200)); return ioat2_reset_sync(chan, msecs_to_jiffies(200));
} }
static bool is_jf_ioat(struct pci_dev *pdev)
{
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
return true;
default:
return false;
}
}
static bool is_snb_ioat(struct pci_dev *pdev)
{
switch (pdev->device) {
case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
return true;
default:
return false;
}
}
int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
{ {
struct pci_dev *pdev = device->pdev; struct pci_dev *pdev = device->pdev;
...@@ -1169,6 +1207,9 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) ...@@ -1169,6 +1207,9 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources; dma->device_free_chan_resources = ioat2_free_chan_resources;
if (is_jf_ioat(pdev) || is_snb_ioat(pdev))
dma->copy_align = 6;
dma_cap_set(DMA_INTERRUPT, dma->cap_mask); dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
......
...@@ -1252,8 +1252,8 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device) ...@@ -1252,8 +1252,8 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2]; struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
/* address conversion buffers (dma_map / page_address) */ /* address conversion buffers (dma_map / page_address) */
void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2]; void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2];
dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST]; dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST+2];
dma_addr_t pq_dest[2]; dma_addr_t *pq_dest = &pq_src[IOP_ADMA_NUM_SRC_TEST];
int i; int i;
struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *tx;
......
...@@ -974,6 +974,7 @@ int dma_async_device_register(struct dma_device *device); ...@@ -974,6 +974,7 @@ int dma_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device); void dma_async_device_unregister(struct dma_device *device);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx); void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
struct dma_chan *net_dma_find_channel(void);
#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) #define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
/* --- Helper iov-locking functions --- */ /* --- Helper iov-locking functions --- */
......
...@@ -1452,7 +1452,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1452,7 +1452,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if ((available < target) && if ((available < target) &&
(len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
!sysctl_tcp_low_latency && !sysctl_tcp_low_latency &&
dma_find_channel(DMA_MEMCPY)) { net_dma_find_channel()) {
preempt_enable_no_resched(); preempt_enable_no_resched();
tp->ucopy.pinned_list = tp->ucopy.pinned_list =
dma_pin_iovec_pages(msg->msg_iov, len); dma_pin_iovec_pages(msg->msg_iov, len);
...@@ -1667,7 +1667,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1667,7 +1667,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (!(flags & MSG_TRUNC)) { if (!(flags & MSG_TRUNC)) {
#ifdef CONFIG_NET_DMA #ifdef CONFIG_NET_DMA
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); tp->ucopy.dma_chan = net_dma_find_channel();
if (tp->ucopy.dma_chan) { if (tp->ucopy.dma_chan) {
tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
......
...@@ -5225,7 +5225,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, ...@@ -5225,7 +5225,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
return 0; return 0;
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); tp->ucopy.dma_chan = net_dma_find_channel();
if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
......
...@@ -1730,7 +1730,7 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -1730,7 +1730,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
#ifdef CONFIG_NET_DMA #ifdef CONFIG_NET_DMA
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); tp->ucopy.dma_chan = net_dma_find_channel();
if (tp->ucopy.dma_chan) if (tp->ucopy.dma_chan)
ret = tcp_v4_do_rcv(sk, skb); ret = tcp_v4_do_rcv(sk, skb);
else else
......
...@@ -1645,7 +1645,7 @@ static int tcp_v6_rcv(struct sk_buff *skb) ...@@ -1645,7 +1645,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
#ifdef CONFIG_NET_DMA #ifdef CONFIG_NET_DMA
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); tp->ucopy.dma_chan = net_dma_find_channel();
if (tp->ucopy.dma_chan) if (tp->ucopy.dma_chan)
ret = tcp_v6_do_rcv(sk, skb); ret = tcp_v6_do_rcv(sk, skb);
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment