Commit e0d1095a authored by Cong Wang's avatar Cong Wang Committed by David S. Miller

net: rename CONFIG_NET_LL_RX_POLL to CONFIG_NET_RX_BUSY_POLL

Eliezer renames several *ll_poll to *busy_poll, but forgets
CONFIG_NET_LL_RX_POLL, so in case of confusion, rename it too.

Cc: Eliezer Tamir <eliezer.tamir@linux.intel.com>
Cc: David S. Miller <davem@davemloft.net>
Signed-off-by: default avatarCong Wang <amwang@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent dfcefb0b
...@@ -52,7 +52,7 @@ Default: 64 ...@@ -52,7 +52,7 @@ Default: 64
busy_read busy_read
---------------- ----------------
Low latency busy poll timeout for socket reads. (needs CONFIG_NET_LL_RX_POLL) Low latency busy poll timeout for socket reads. (needs CONFIG_NET_RX_BUSY_POLL)
Approximate time in us to busy loop waiting for packets on the device queue. Approximate time in us to busy loop waiting for packets on the device queue.
This sets the default value of the SO_BUSY_POLL socket option. This sets the default value of the SO_BUSY_POLL socket option.
Can be set or overridden per socket by setting socket option SO_BUSY_POLL, Can be set or overridden per socket by setting socket option SO_BUSY_POLL,
...@@ -63,7 +63,7 @@ Default: 0 (off) ...@@ -63,7 +63,7 @@ Default: 0 (off)
busy_poll busy_poll
---------------- ----------------
Low latency busy poll timeout for poll and select. (needs CONFIG_NET_LL_RX_POLL) Low latency busy poll timeout for poll and select. (needs CONFIG_NET_RX_BUSY_POLL)
Approximate time in us to busy loop waiting for events. Approximate time in us to busy loop waiting for events.
Recommended value depends on the number of sockets you poll on. Recommended value depends on the number of sockets you poll on.
For several sockets 50, for several hundreds 100. For several sockets 50, for several hundreds 100.
......
...@@ -486,7 +486,7 @@ struct bnx2x_fastpath { ...@@ -486,7 +486,7 @@ struct bnx2x_fastpath {
struct napi_struct napi; struct napi_struct napi;
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state; unsigned int state;
#define BNX2X_FP_STATE_IDLE 0 #define BNX2X_FP_STATE_IDLE 0
#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */ #define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
...@@ -498,7 +498,7 @@ struct bnx2x_fastpath { ...@@ -498,7 +498,7 @@ struct bnx2x_fastpath {
#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD) #define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
/* protect state */ /* protect state */
spinlock_t lock; spinlock_t lock;
#endif /* CONFIG_NET_LL_RX_POLL */ #endif /* CONFIG_NET_RX_BUSY_POLL */
union host_hc_status_block status_blk; union host_hc_status_block status_blk;
/* chip independent shortcuts into sb structure */ /* chip independent shortcuts into sb structure */
...@@ -572,7 +572,7 @@ struct bnx2x_fastpath { ...@@ -572,7 +572,7 @@ struct bnx2x_fastpath {
#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index])) #define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index]))
#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
{ {
spin_lock_init(&fp->lock); spin_lock_init(&fp->lock);
...@@ -680,7 +680,7 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) ...@@ -680,7 +680,7 @@ static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
{ {
return false; return false;
} }
#endif /* CONFIG_NET_LL_RX_POLL */ #endif /* CONFIG_NET_RX_BUSY_POLL */
/* Use 2500 as a mini-jumbo MTU for FCoE */ /* Use 2500 as a mini-jumbo MTU for FCoE */
#define BNX2X_FCOE_MINI_JUMBO_MTU 2500 #define BNX2X_FCOE_MINI_JUMBO_MTU 2500
......
...@@ -3117,7 +3117,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget) ...@@ -3117,7 +3117,7 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
return work_done; return work_done;
} }
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
/* must be called with local_bh_disable()d */ /* must be called with local_bh_disable()d */
int bnx2x_low_latency_recv(struct napi_struct *napi) int bnx2x_low_latency_recv(struct napi_struct *napi)
{ {
......
...@@ -12026,7 +12026,7 @@ static const struct net_device_ops bnx2x_netdev_ops = { ...@@ -12026,7 +12026,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn, .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
#endif #endif
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = bnx2x_low_latency_recv, .ndo_busy_poll = bnx2x_low_latency_recv,
#endif #endif
}; };
......
...@@ -54,7 +54,7 @@ ...@@ -54,7 +54,7 @@
#include <net/busy_poll.h> #include <net/busy_poll.h>
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
#define LL_EXTENDED_STATS #define LL_EXTENDED_STATS
#endif #endif
/* common prefix used by pr_<> macros */ /* common prefix used by pr_<> macros */
...@@ -366,7 +366,7 @@ struct ixgbe_q_vector { ...@@ -366,7 +366,7 @@ struct ixgbe_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */ struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9]; char name[IFNAMSIZ + 9];
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state; unsigned int state;
#define IXGBE_QV_STATE_IDLE 0 #define IXGBE_QV_STATE_IDLE 0
#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */ #define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */
...@@ -377,12 +377,12 @@ struct ixgbe_q_vector { ...@@ -377,12 +377,12 @@ struct ixgbe_q_vector {
#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD) #define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD)
#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD) #define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD)
spinlock_t lock; spinlock_t lock;
#endif /* CONFIG_NET_LL_RX_POLL */ #endif /* CONFIG_NET_RX_BUSY_POLL */
/* for dynamic allocation of rings associated with this q_vector */ /* for dynamic allocation of rings associated with this q_vector */
struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
}; };
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
{ {
...@@ -462,7 +462,7 @@ static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector) ...@@ -462,7 +462,7 @@ static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED)); WARN_ON(!(q_vector->state & IXGBE_QV_LOCKED));
return q_vector->state & IXGBE_QV_USER_PEND; return q_vector->state & IXGBE_QV_USER_PEND;
} }
#else /* CONFIG_NET_LL_RX_POLL */ #else /* CONFIG_NET_RX_BUSY_POLL */
static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector)
{ {
} }
...@@ -491,7 +491,7 @@ static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector) ...@@ -491,7 +491,7 @@ static inline bool ixgbe_qv_ll_polling(struct ixgbe_q_vector *q_vector)
{ {
return false; return false;
} }
#endif /* CONFIG_NET_LL_RX_POLL */ #endif /* CONFIG_NET_RX_BUSY_POLL */
#ifdef CONFIG_IXGBE_HWMON #ifdef CONFIG_IXGBE_HWMON
......
...@@ -1998,7 +1998,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -1998,7 +1998,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
return total_rx_packets; return total_rx_packets;
} }
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
/* must be called with local_bh_disable()d */ /* must be called with local_bh_disable()d */
static int ixgbe_low_latency_recv(struct napi_struct *napi) static int ixgbe_low_latency_recv(struct napi_struct *napi)
{ {
...@@ -2030,7 +2030,7 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi) ...@@ -2030,7 +2030,7 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi)
return found; return found;
} }
#endif /* CONFIG_NET_LL_RX_POLL */ #endif /* CONFIG_NET_RX_BUSY_POLL */
/** /**
* ixgbe_configure_msix - Configure MSI-X hardware * ixgbe_configure_msix - Configure MSI-X hardware
...@@ -7227,7 +7227,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { ...@@ -7227,7 +7227,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbe_netpoll, .ndo_poll_controller = ixgbe_netpoll,
#endif #endif
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = ixgbe_low_latency_recv, .ndo_busy_poll = ixgbe_low_latency_recv,
#endif #endif
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
......
...@@ -223,7 +223,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset) ...@@ -223,7 +223,7 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
case ETH_SS_STATS: case ETH_SS_STATS:
return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) + return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) +
(priv->tx_ring_num * 2) + (priv->tx_ring_num * 2) +
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
(priv->rx_ring_num * 5); (priv->rx_ring_num * 5);
#else #else
(priv->rx_ring_num * 2); (priv->rx_ring_num * 2);
...@@ -276,7 +276,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev, ...@@ -276,7 +276,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < priv->rx_ring_num; i++) { for (i = 0; i < priv->rx_ring_num; i++) {
data[index++] = priv->rx_ring[i].packets; data[index++] = priv->rx_ring[i].packets;
data[index++] = priv->rx_ring[i].bytes; data[index++] = priv->rx_ring[i].bytes;
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
data[index++] = priv->rx_ring[i].yields; data[index++] = priv->rx_ring[i].yields;
data[index++] = priv->rx_ring[i].misses; data[index++] = priv->rx_ring[i].misses;
data[index++] = priv->rx_ring[i].cleaned; data[index++] = priv->rx_ring[i].cleaned;
...@@ -344,7 +344,7 @@ static void mlx4_en_get_strings(struct net_device *dev, ...@@ -344,7 +344,7 @@ static void mlx4_en_get_strings(struct net_device *dev,
"rx%d_packets", i); "rx%d_packets", i);
sprintf(data + (index++) * ETH_GSTRING_LEN, sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_bytes", i); "rx%d_bytes", i);
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
sprintf(data + (index++) * ETH_GSTRING_LEN, sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_napi_yield", i); "rx%d_napi_yield", i);
sprintf(data + (index++) * ETH_GSTRING_LEN, sprintf(data + (index++) * ETH_GSTRING_LEN,
......
...@@ -68,7 +68,7 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up) ...@@ -68,7 +68,7 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
return 0; return 0;
} }
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
/* must be called with local_bh_disable()d */ /* must be called with local_bh_disable()d */
static int mlx4_en_low_latency_recv(struct napi_struct *napi) static int mlx4_en_low_latency_recv(struct napi_struct *napi)
{ {
...@@ -94,7 +94,7 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi) ...@@ -94,7 +94,7 @@ static int mlx4_en_low_latency_recv(struct napi_struct *napi)
return done; return done;
} }
#endif /* CONFIG_NET_LL_RX_POLL */ #endif /* CONFIG_NET_RX_BUSY_POLL */
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
...@@ -2140,7 +2140,7 @@ static const struct net_device_ops mlx4_netdev_ops = { ...@@ -2140,7 +2140,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx4_en_filter_rfs, .ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif #endif
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = mlx4_en_low_latency_recv, .ndo_busy_poll = mlx4_en_low_latency_recv,
#endif #endif
}; };
......
...@@ -292,7 +292,7 @@ struct mlx4_en_rx_ring { ...@@ -292,7 +292,7 @@ struct mlx4_en_rx_ring {
void *rx_info; void *rx_info;
unsigned long bytes; unsigned long bytes;
unsigned long packets; unsigned long packets;
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
unsigned long yields; unsigned long yields;
unsigned long misses; unsigned long misses;
unsigned long cleaned; unsigned long cleaned;
...@@ -318,7 +318,7 @@ struct mlx4_en_cq { ...@@ -318,7 +318,7 @@ struct mlx4_en_cq {
struct mlx4_cqe *buf; struct mlx4_cqe *buf;
#define MLX4_EN_OPCODE_ERROR 0x1e #define MLX4_EN_OPCODE_ERROR 0x1e
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state; unsigned int state;
#define MLX4_EN_CQ_STATE_IDLE 0 #define MLX4_EN_CQ_STATE_IDLE 0
#define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */ #define MLX4_EN_CQ_STATE_NAPI 1 /* NAPI owns this CQ */
...@@ -329,7 +329,7 @@ struct mlx4_en_cq { ...@@ -329,7 +329,7 @@ struct mlx4_en_cq {
#define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD) #define CQ_YIELD (MLX4_EN_CQ_STATE_NAPI_YIELD | MLX4_EN_CQ_STATE_POLL_YIELD)
#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD) #define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
spinlock_t poll_lock; /* protects from LLS/napi conflicts */ spinlock_t poll_lock; /* protects from LLS/napi conflicts */
#endif /* CONFIG_NET_LL_RX_POLL */ #endif /* CONFIG_NET_RX_BUSY_POLL */
}; };
struct mlx4_en_port_profile { struct mlx4_en_port_profile {
...@@ -580,7 +580,7 @@ struct mlx4_mac_entry { ...@@ -580,7 +580,7 @@ struct mlx4_mac_entry {
struct rcu_head rcu; struct rcu_head rcu;
}; };
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq) static inline void mlx4_en_cq_init_lock(struct mlx4_en_cq *cq)
{ {
spin_lock_init(&cq->poll_lock); spin_lock_init(&cq->poll_lock);
...@@ -687,7 +687,7 @@ static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq) ...@@ -687,7 +687,7 @@ static inline bool mlx4_en_cq_ll_polling(struct mlx4_en_cq *cq)
{ {
return false; return false;
} }
#endif /* CONFIG_NET_LL_RX_POLL */ #endif /* CONFIG_NET_RX_BUSY_POLL */
#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63) #define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
......
...@@ -973,7 +973,7 @@ struct net_device_ops { ...@@ -973,7 +973,7 @@ struct net_device_ops {
gfp_t gfp); gfp_t gfp);
void (*ndo_netpoll_cleanup)(struct net_device *dev); void (*ndo_netpoll_cleanup)(struct net_device *dev);
#endif #endif
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
int (*ndo_busy_poll)(struct napi_struct *dev); int (*ndo_busy_poll)(struct napi_struct *dev);
#endif #endif
int (*ndo_set_vf_mac)(struct net_device *dev, int (*ndo_set_vf_mac)(struct net_device *dev,
......
...@@ -501,7 +501,7 @@ struct sk_buff { ...@@ -501,7 +501,7 @@ struct sk_buff {
/* 7/9 bit hole (depending on ndisc_nodetype presence) */ /* 7/9 bit hole (depending on ndisc_nodetype presence) */
kmemcheck_bitfield_end(flags2); kmemcheck_bitfield_end(flags2);
#if defined CONFIG_NET_DMA || defined CONFIG_NET_LL_RX_POLL #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
union { union {
unsigned int napi_id; unsigned int napi_id;
dma_cookie_t dma_cookie; dma_cookie_t dma_cookie;
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <net/ip.h> #include <net/ip.h>
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
struct napi_struct; struct napi_struct;
extern unsigned int sysctl_net_busy_read __read_mostly; extern unsigned int sysctl_net_busy_read __read_mostly;
...@@ -146,7 +146,7 @@ static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb) ...@@ -146,7 +146,7 @@ static inline void sk_mark_napi_id(struct sock *sk, struct sk_buff *skb)
sk->sk_napi_id = skb->napi_id; sk->sk_napi_id = skb->napi_id;
} }
#else /* CONFIG_NET_LL_RX_POLL */ #else /* CONFIG_NET_RX_BUSY_POLL */
static inline unsigned long net_busy_loop_on(void) static inline unsigned long net_busy_loop_on(void)
{ {
return 0; return 0;
...@@ -186,5 +186,5 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock) ...@@ -186,5 +186,5 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock)
return false; return false;
} }
#endif /* CONFIG_NET_LL_RX_POLL */ #endif /* CONFIG_NET_RX_BUSY_POLL */
#endif /* _LINUX_NET_BUSY_POLL_H */ #endif /* _LINUX_NET_BUSY_POLL_H */
...@@ -327,7 +327,7 @@ struct sock { ...@@ -327,7 +327,7 @@ struct sock {
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
__u32 sk_rxhash; __u32 sk_rxhash;
#endif #endif
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sk_napi_id; unsigned int sk_napi_id;
unsigned int sk_ll_usec; unsigned int sk_ll_usec;
#endif #endif
......
...@@ -244,7 +244,7 @@ config NETPRIO_CGROUP ...@@ -244,7 +244,7 @@ config NETPRIO_CGROUP
Cgroup subsystem for use in assigning processes to network priorities on Cgroup subsystem for use in assigning processes to network priorities on
a per-interface basis a per-interface basis
config NET_LL_RX_POLL config NET_RX_BUSY_POLL
boolean boolean
default y default y
......
...@@ -740,7 +740,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) ...@@ -740,7 +740,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
skb_copy_secmark(new, old); skb_copy_secmark(new, old);
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
new->napi_id = old->napi_id; new->napi_id = old->napi_id;
#endif #endif
} }
......
...@@ -900,7 +900,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname, ...@@ -900,7 +900,7 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool); sock_valbool_flag(sk, SOCK_SELECT_ERR_QUEUE, valbool);
break; break;
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
case SO_BUSY_POLL: case SO_BUSY_POLL:
/* allow unprivileged users to decrease the value */ /* allow unprivileged users to decrease the value */
if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN)) if ((val > sk->sk_ll_usec) && !capable(CAP_NET_ADMIN))
...@@ -1170,7 +1170,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, ...@@ -1170,7 +1170,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE); v.val = sock_flag(sk, SOCK_SELECT_ERR_QUEUE);
break; break;
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
case SO_BUSY_POLL: case SO_BUSY_POLL:
v.val = sk->sk_ll_usec; v.val = sk->sk_ll_usec;
break; break;
...@@ -2292,7 +2292,7 @@ void sock_init_data(struct socket *sock, struct sock *sk) ...@@ -2292,7 +2292,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_stamp = ktime_set(-1L, 0); sk->sk_stamp = ktime_set(-1L, 0);
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
sk->sk_napi_id = 0; sk->sk_napi_id = 0;
sk->sk_ll_usec = sysctl_net_busy_read; sk->sk_ll_usec = sysctl_net_busy_read;
#endif #endif
......
...@@ -298,7 +298,7 @@ static struct ctl_table net_core_table[] = { ...@@ -298,7 +298,7 @@ static struct ctl_table net_core_table[] = {
.proc_handler = flow_limit_table_len_sysctl .proc_handler = flow_limit_table_len_sysctl
}, },
#endif /* CONFIG_NET_FLOW_LIMIT */ #endif /* CONFIG_NET_FLOW_LIMIT */
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
{ {
.procname = "busy_poll", .procname = "busy_poll",
.data = &sysctl_net_busy_poll, .data = &sysctl_net_busy_poll,
......
...@@ -106,7 +106,7 @@ ...@@ -106,7 +106,7 @@
#include <linux/atalk.h> #include <linux/atalk.h>
#include <net/busy_poll.h> #include <net/busy_poll.h>
#ifdef CONFIG_NET_LL_RX_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sysctl_net_busy_read __read_mostly; unsigned int sysctl_net_busy_read __read_mostly;
unsigned int sysctl_net_busy_poll __read_mostly; unsigned int sysctl_net_busy_poll __read_mostly;
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment