Commit 7db6b048 authored by Sridhar Samudrala's avatar Sridhar Samudrala Committed by David S. Miller

net: Commonize busy polling code to focus on napi_id instead of socket

Move the core functionality in sk_busy_loop() to napi_busy_loop() and
make it independent of sk.

This enables re-using this function in epoll busy loop implementation.
Signed-off-by: default avatarSridhar Samudrala <sridhar.samudrala@intel.com>
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Acked-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 37056719
...@@ -51,7 +51,11 @@ static inline bool sk_can_busy_loop(const struct sock *sk) ...@@ -51,7 +51,11 @@ static inline bool sk_can_busy_loop(const struct sock *sk)
return sk->sk_ll_usec && !signal_pending(current); return sk->sk_ll_usec && !signal_pending(current);
} }
void sk_busy_loop(struct sock *sk, int nonblock); bool sk_busy_loop_end(void *p, unsigned long start_time);
void napi_busy_loop(unsigned int napi_id,
bool (*loop_end)(void *, unsigned long),
void *loop_end_arg);
#else /* CONFIG_NET_RX_BUSY_POLL */ #else /* CONFIG_NET_RX_BUSY_POLL */
static inline unsigned long net_busy_loop_on(void) static inline unsigned long net_busy_loop_on(void)
...@@ -64,10 +68,6 @@ static inline bool sk_can_busy_loop(struct sock *sk) ...@@ -64,10 +68,6 @@ static inline bool sk_can_busy_loop(struct sock *sk)
return false; return false;
} }
static inline void sk_busy_loop(struct sock *sk, int nonblock)
{
}
#endif /* CONFIG_NET_RX_BUSY_POLL */ #endif /* CONFIG_NET_RX_BUSY_POLL */
static inline unsigned long busy_loop_current_time(void) static inline unsigned long busy_loop_current_time(void)
...@@ -111,6 +111,16 @@ static inline bool sk_busy_loop_timeout(struct sock *sk, ...@@ -111,6 +111,16 @@ static inline bool sk_busy_loop_timeout(struct sock *sk,
return true; return true;
} }
static inline void sk_busy_loop(struct sock *sk, int nonblock)
{
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int napi_id = READ_ONCE(sk->sk_napi_id);
if (napi_id >= MIN_NAPI_ID)
napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk);
#endif
}
/* used in the NIC receive handler to mark the skb */ /* used in the NIC receive handler to mark the skb */
static inline void skb_mark_napi_id(struct sk_buff *skb, static inline void skb_mark_napi_id(struct sk_buff *skb,
struct napi_struct *napi) struct napi_struct *napi)
......
...@@ -5060,19 +5060,16 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock) ...@@ -5060,19 +5060,16 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock)
do_softirq(); do_softirq();
} }
void sk_busy_loop(struct sock *sk, int nonblock) void napi_busy_loop(unsigned int napi_id,
bool (*loop_end)(void *, unsigned long),
void *loop_end_arg)
{ {
unsigned long start_time = nonblock ? 0 : busy_loop_current_time(); unsigned long start_time = loop_end ? busy_loop_current_time() : 0;
int (*napi_poll)(struct napi_struct *napi, int budget); int (*napi_poll)(struct napi_struct *napi, int budget);
void *have_poll_lock = NULL; void *have_poll_lock = NULL;
struct napi_struct *napi; struct napi_struct *napi;
unsigned int napi_id;
restart: restart:
napi_id = READ_ONCE(sk->sk_napi_id);
if (napi_id < MIN_NAPI_ID)
return;
napi_poll = NULL; napi_poll = NULL;
rcu_read_lock(); rcu_read_lock();
...@@ -5106,12 +5103,11 @@ void sk_busy_loop(struct sock *sk, int nonblock) ...@@ -5106,12 +5103,11 @@ void sk_busy_loop(struct sock *sk, int nonblock)
trace_napi_poll(napi, work, BUSY_POLL_BUDGET); trace_napi_poll(napi, work, BUSY_POLL_BUDGET);
count: count:
if (work > 0) if (work > 0)
__NET_ADD_STATS(sock_net(sk), __NET_ADD_STATS(dev_net(napi->dev),
LINUX_MIB_BUSYPOLLRXPACKETS, work); LINUX_MIB_BUSYPOLLRXPACKETS, work);
local_bh_enable(); local_bh_enable();
if (nonblock || !skb_queue_empty(&sk->sk_receive_queue) || if (!loop_end || loop_end(loop_end_arg, start_time))
sk_busy_loop_timeout(sk, start_time))
break; break;
if (unlikely(need_resched())) { if (unlikely(need_resched())) {
...@@ -5120,8 +5116,7 @@ void sk_busy_loop(struct sock *sk, int nonblock) ...@@ -5120,8 +5116,7 @@ void sk_busy_loop(struct sock *sk, int nonblock)
preempt_enable(); preempt_enable();
rcu_read_unlock(); rcu_read_unlock();
cond_resched(); cond_resched();
if (!skb_queue_empty(&sk->sk_receive_queue) || if (loop_end(loop_end_arg, start_time))
sk_busy_loop_timeout(sk, start_time))
return; return;
goto restart; goto restart;
} }
...@@ -5133,7 +5128,7 @@ void sk_busy_loop(struct sock *sk, int nonblock) ...@@ -5133,7 +5128,7 @@ void sk_busy_loop(struct sock *sk, int nonblock)
out: out:
rcu_read_unlock(); rcu_read_unlock();
} }
EXPORT_SYMBOL(sk_busy_loop); EXPORT_SYMBOL(napi_busy_loop);
#endif /* CONFIG_NET_RX_BUSY_POLL */ #endif /* CONFIG_NET_RX_BUSY_POLL */
......
...@@ -3237,3 +3237,14 @@ static int __init proto_init(void) ...@@ -3237,3 +3237,14 @@ static int __init proto_init(void)
subsys_initcall(proto_init); subsys_initcall(proto_init);
#endif /* PROC_FS */ #endif /* PROC_FS */
#ifdef CONFIG_NET_RX_BUSY_POLL
bool sk_busy_loop_end(void *p, unsigned long start_time)
{
struct sock *sk = p;
return !skb_queue_empty(&sk->sk_receive_queue) ||
sk_busy_loop_timeout(sk, start_time);
}
EXPORT_SYMBOL(sk_busy_loop_end);
#endif /* CONFIG_NET_RX_BUSY_POLL */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment