Commit 3413f041 authored by Xuan Zhuo's avatar Xuan Zhuo Committed by Daniel Borkmann

xsk: Change the tx writeable condition

Modify the tx writeable condition from the queue is not full to the
number of present tx queues is less than the half of the total number
of queues. Because the tx queue not full is a very short time, this will
cause a large number of EPOLLOUT events, and cause a large number of
process wake up.

Fixes: 35fcde7f ("xsk: support for Tx")
Signed-off-by: default avatarXuan Zhuo <xuanzhuo@linux.alibaba.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarMagnus Karlsson <magnus.karlsson@intel.com>
Link: https://lore.kernel.org/bpf/508fef55188d4e1160747ead64c6dcda36735880.1606555939.git.xuanzhuo@linux.alibaba.com
parent f5da5418
...@@ -211,6 +211,14 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len, ...@@ -211,6 +211,14 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len,
return 0; return 0;
} }
static bool xsk_tx_writeable(struct xdp_sock *xs)
{
if (xskq_cons_present_entries(xs->tx) > xs->tx->nentries / 2)
return false;
return true;
}
static bool xsk_is_bound(struct xdp_sock *xs) static bool xsk_is_bound(struct xdp_sock *xs)
{ {
if (READ_ONCE(xs->state) == XSK_BOUND) { if (READ_ONCE(xs->state) == XSK_BOUND) {
...@@ -296,7 +304,8 @@ void xsk_tx_release(struct xsk_buff_pool *pool) ...@@ -296,7 +304,8 @@ void xsk_tx_release(struct xsk_buff_pool *pool)
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
__xskq_cons_release(xs->tx); __xskq_cons_release(xs->tx);
xs->sk.sk_write_space(&xs->sk); if (xsk_tx_writeable(xs))
xs->sk.sk_write_space(&xs->sk);
} }
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -436,7 +445,8 @@ static int xsk_generic_xmit(struct sock *sk) ...@@ -436,7 +445,8 @@ static int xsk_generic_xmit(struct sock *sk)
out: out:
if (sent_frame) if (sent_frame)
sk->sk_write_space(sk); if (xsk_tx_writeable(xs))
sk->sk_write_space(sk);
mutex_unlock(&xs->mutex); mutex_unlock(&xs->mutex);
return err; return err;
...@@ -493,7 +503,7 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock, ...@@ -493,7 +503,7 @@ static __poll_t xsk_poll(struct file *file, struct socket *sock,
if (xs->rx && !xskq_prod_is_empty(xs->rx)) if (xs->rx && !xskq_prod_is_empty(xs->rx))
mask |= EPOLLIN | EPOLLRDNORM; mask |= EPOLLIN | EPOLLRDNORM;
if (xs->tx && !xskq_cons_is_full(xs->tx)) if (xs->tx && xsk_tx_writeable(xs))
mask |= EPOLLOUT | EPOLLWRNORM; mask |= EPOLLOUT | EPOLLWRNORM;
return mask; return mask;
......
...@@ -264,6 +264,12 @@ static inline bool xskq_cons_is_full(struct xsk_queue *q) ...@@ -264,6 +264,12 @@ static inline bool xskq_cons_is_full(struct xsk_queue *q)
q->nentries; q->nentries;
} }
static inline u32 xskq_cons_present_entries(struct xsk_queue *q)
{
/* No barriers needed since data is not accessed */
return READ_ONCE(q->ring->producer) - READ_ONCE(q->ring->consumer);
}
/* Functions for producers */ /* Functions for producers */
static inline bool xskq_prod_is_full(struct xsk_queue *q) static inline bool xskq_prod_is_full(struct xsk_queue *q)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment