Commit f67b4599 authored by Dan Williams's avatar Dan Williams

net_dma: convert to dma_find_channel

Use the general-purpose channel allocation provided by dmaengine.
Reviewed-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 2ba05622
...@@ -1113,9 +1113,6 @@ struct softnet_data ...@@ -1113,9 +1113,6 @@ struct softnet_data
struct sk_buff *completion_queue; struct sk_buff *completion_queue;
struct napi_struct backlog; struct napi_struct backlog;
#ifdef CONFIG_NET_DMA
struct dma_chan *net_dma;
#endif
}; };
DECLARE_PER_CPU(struct softnet_data,softnet_data); DECLARE_PER_CPU(struct softnet_data,softnet_data);
......
...@@ -24,17 +24,6 @@ ...@@ -24,17 +24,6 @@
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
static inline struct dma_chan *get_softnet_dma(void)
{
struct dma_chan *chan;
rcu_read_lock();
chan = rcu_dereference(__get_cpu_var(softnet_data).net_dma);
rcu_read_unlock();
return chan;
}
int dma_skb_copy_datagram_iovec(struct dma_chan* chan, int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
struct sk_buff *skb, int offset, struct iovec *to, struct sk_buff *skb, int offset, struct iovec *to,
size_t len, struct dma_pinned_list *pinned_list); size_t len, struct dma_pinned_list *pinned_list);
......
...@@ -4827,44 +4827,6 @@ static int dev_cpu_callback(struct notifier_block *nfb, ...@@ -4827,44 +4827,6 @@ static int dev_cpu_callback(struct notifier_block *nfb,
} }
#ifdef CONFIG_NET_DMA #ifdef CONFIG_NET_DMA
/**
* net_dma_rebalance - try to maintain one DMA channel per CPU
* @net_dma: DMA client and associated data (lock, channels, channel_mask)
*
* This is called when the number of channels allocated to the net_dma client
* changes. The net_dma client tries to have one DMA channel per CPU.
*/
static void net_dma_rebalance(struct net_dma *net_dma)
{
unsigned int cpu, i, n, chan_idx;
struct dma_chan *chan;
if (cpus_empty(net_dma->channel_mask)) {
for_each_online_cpu(cpu)
rcu_assign_pointer(per_cpu(softnet_data, cpu).net_dma, NULL);
return;
}
i = 0;
cpu = first_cpu(cpu_online_map);
for_each_cpu_mask_nr(chan_idx, net_dma->channel_mask) {
chan = net_dma->channels[chan_idx];
n = ((num_online_cpus() / cpus_weight(net_dma->channel_mask))
+ (i < (num_online_cpus() %
cpus_weight(net_dma->channel_mask)) ? 1 : 0));
while(n) {
per_cpu(softnet_data, cpu).net_dma = chan;
cpu = next_cpu(cpu, cpu_online_map);
n--;
}
i++;
}
}
/** /**
* netdev_dma_event - event callback for the net_dma_client * netdev_dma_event - event callback for the net_dma_client
* @client: should always be net_dma_client * @client: should always be net_dma_client
...@@ -4894,7 +4856,6 @@ netdev_dma_event(struct dma_client *client, struct dma_chan *chan, ...@@ -4894,7 +4856,6 @@ netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
ack = DMA_ACK; ack = DMA_ACK;
net_dma->channels[pos] = chan; net_dma->channels[pos] = chan;
cpu_set(pos, net_dma->channel_mask); cpu_set(pos, net_dma->channel_mask);
net_dma_rebalance(net_dma);
} }
break; break;
case DMA_RESOURCE_REMOVED: case DMA_RESOURCE_REMOVED:
...@@ -4909,7 +4870,6 @@ netdev_dma_event(struct dma_client *client, struct dma_chan *chan, ...@@ -4909,7 +4870,6 @@ netdev_dma_event(struct dma_client *client, struct dma_chan *chan,
ack = DMA_ACK; ack = DMA_ACK;
cpu_clear(pos, net_dma->channel_mask); cpu_clear(pos, net_dma->channel_mask);
net_dma->channels[i] = NULL; net_dma->channels[i] = NULL;
net_dma_rebalance(net_dma);
} }
break; break;
default: default:
......
...@@ -1317,7 +1317,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1317,7 +1317,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if ((available < target) && if ((available < target) &&
(len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
!sysctl_tcp_low_latency && !sysctl_tcp_low_latency &&
__get_cpu_var(softnet_data).net_dma) { dma_find_channel(DMA_MEMCPY)) {
preempt_enable_no_resched(); preempt_enable_no_resched();
tp->ucopy.pinned_list = tp->ucopy.pinned_list =
dma_pin_iovec_pages(msg->msg_iov, len); dma_pin_iovec_pages(msg->msg_iov, len);
...@@ -1527,7 +1527,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1527,7 +1527,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (!(flags & MSG_TRUNC)) { if (!(flags & MSG_TRUNC)) {
#ifdef CONFIG_NET_DMA #ifdef CONFIG_NET_DMA
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = get_softnet_dma(); tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
if (tp->ucopy.dma_chan) { if (tp->ucopy.dma_chan) {
tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
......
...@@ -5005,7 +5005,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, ...@@ -5005,7 +5005,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
return 0; return 0;
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = get_softnet_dma(); tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
......
...@@ -1594,7 +1594,7 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -1594,7 +1594,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
#ifdef CONFIG_NET_DMA #ifdef CONFIG_NET_DMA
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = get_softnet_dma(); tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
if (tp->ucopy.dma_chan) if (tp->ucopy.dma_chan)
ret = tcp_v4_do_rcv(sk, skb); ret = tcp_v4_do_rcv(sk, skb);
else else
......
...@@ -1640,7 +1640,7 @@ static int tcp_v6_rcv(struct sk_buff *skb) ...@@ -1640,7 +1640,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
#ifdef CONFIG_NET_DMA #ifdef CONFIG_NET_DMA
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
tp->ucopy.dma_chan = get_softnet_dma(); tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY);
if (tp->ucopy.dma_chan) if (tp->ucopy.dma_chan)
ret = tcp_v6_do_rcv(sk, skb); ret = tcp_v6_do_rcv(sk, skb);
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment