Commit b0a03819 authored by David S. Miller's avatar David S. Miller

Merge branch 'socket-poll-related-cleanups-v2'

Christoph Hellwig says:

====================
socket poll related cleanups v2

A couple of cleanups I stumbled upon when studying the networking
poll code.

Changes since v1:
 - drop a dispute patch from this series (to be sent separately)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3f6bcc51 a331de3b
......@@ -1071,7 +1071,7 @@ __poll_t af_alg_poll(struct file *file, struct socket *sock,
struct af_alg_ctx *ctx = ask->private;
__poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait);
sock_poll_wait(file, wait);
mask = 0;
if (!ctx->more || ctx->used)
......
......@@ -121,21 +121,6 @@ static inline void sk_busy_loop(struct sock *sk, int nonblock)
#endif
}
static inline void sock_poll_busy_loop(struct socket *sock, __poll_t events)
{
if (sk_can_busy_loop(sock->sk) &&
events && (events & POLL_BUSY_LOOP)) {
/* once, only if requested by syscall */
sk_busy_loop(sock->sk, 1);
}
}
/* if this socket can poll_ll, tell the system call */
static inline __poll_t sock_poll_busy_flag(struct socket *sock)
{
return sk_can_busy_loop(sock->sk) ? POLL_BUSY_LOOP : 0;
}
/* used in the NIC receive handler to mark the skb */
static inline void skb_mark_napi_id(struct sk_buff *skb,
struct napi_struct *napi)
......
......@@ -2057,16 +2057,16 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
/**
* sock_poll_wait - place memory barrier behind the poll_wait call.
* @filp: file
* @wait_address: socket wait queue
* @p: poll_table
*
* See the comments in the wq_has_sleeper function.
*/
static inline void sock_poll_wait(struct file *filp,
wait_queue_head_t *wait_address, poll_table *p)
static inline void sock_poll_wait(struct file *filp, poll_table *p)
{
if (!poll_does_not_wait(p) && wait_address) {
poll_wait(filp, wait_address, p);
struct socket *sock = filp->private_data;
if (!poll_does_not_wait(p)) {
poll_wait(filp, &sock->wq->wait, p);
/* We need to be sure we are in sync with the
* socket flags modification.
*
......
......@@ -653,7 +653,7 @@ __poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
struct atm_vcc *vcc;
__poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait);
sock_poll_wait(file, wait);
mask = 0;
vcc = ATM_SD(sock);
......
......@@ -941,7 +941,7 @@ static __poll_t caif_poll(struct file *file,
__poll_t mask;
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
sock_poll_wait(file, sk_sleep(sk), wait);
sock_poll_wait(file, wait);
mask = 0;
/* exceptional events? */
......
......@@ -837,7 +837,7 @@ __poll_t datagram_poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk;
__poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait);
sock_poll_wait(file, wait);
mask = 0;
/* exceptional events? */
......
......@@ -325,7 +325,7 @@ __poll_t dccp_poll(struct file *file, struct socket *sock,
__poll_t mask;
struct sock *sk = sock->sk;
sock_poll_wait(file, sk_sleep(sk), wait);
sock_poll_wait(file, wait);
if (sk->sk_state == DCCP_LISTEN)
return inet_csk_listen_poll(sk);
......
......@@ -507,7 +507,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
const struct tcp_sock *tp = tcp_sk(sk);
int state;
sock_poll_wait(file, sk_sleep(sk), wait);
sock_poll_wait(file, wait);
state = inet_sk_state_load(sk);
if (state == TCP_LISTEN)
......
......@@ -1494,7 +1494,7 @@ __poll_t iucv_sock_poll(struct file *file, struct socket *sock,
struct sock *sk = sock->sk;
__poll_t mask = 0;
sock_poll_wait(file, sk_sleep(sk), wait);
sock_poll_wait(file, wait);
if (sk->sk_state == IUCV_LISTEN)
return iucv_accept_poll(sk);
......
......@@ -556,7 +556,7 @@ static __poll_t llcp_sock_poll(struct file *file, struct socket *sock,
pr_debug("%p\n", sk);
sock_poll_wait(file, sk_sleep(sk), wait);
sock_poll_wait(file, wait);
if (sk->sk_state == LLCP_LISTEN)
return llcp_accept_poll(sk);
......
......@@ -741,7 +741,7 @@ static __poll_t rxrpc_poll(struct file *file, struct socket *sock,
struct rxrpc_sock *rx = rxrpc_sk(sk);
__poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait);
sock_poll_wait(file, wait);
mask = 0;
/* the socket is readable if there are any messages waiting on the Rx
......
......@@ -1535,7 +1535,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
mask |= EPOLLERR;
} else {
if (sk->sk_state != SMC_CLOSED)
sock_poll_wait(file, sk_sleep(sk), wait);
sock_poll_wait(file, wait);
if (sk->sk_err)
mask |= EPOLLERR;
if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
......
......@@ -1130,12 +1130,21 @@ EXPORT_SYMBOL(sock_create_lite);
static __poll_t sock_poll(struct file *file, poll_table *wait)
{
struct socket *sock = file->private_data;
__poll_t events = poll_requested_events(wait);
__poll_t events = poll_requested_events(wait), flag = 0;
sock_poll_busy_loop(sock, events);
if (!sock->ops->poll)
return 0;
return sock->ops->poll(file, sock, wait) | sock_poll_busy_flag(sock);
if (sk_can_busy_loop(sock->sk)) {
/* poll once if requested by the syscall */
if (events & POLL_BUSY_LOOP)
sk_busy_loop(sock->sk, 1);
/* if this socket can poll_ll, tell the system call */
flag = POLL_BUSY_LOOP;
}
return sock->ops->poll(file, sock, wait) | flag;
}
static int sock_mmap(struct file *file, struct vm_area_struct *vma)
......
......@@ -716,7 +716,7 @@ static __poll_t tipc_poll(struct file *file, struct socket *sock,
struct tipc_sock *tsk = tipc_sk(sk);
__poll_t revents = 0;
sock_poll_wait(file, sk_sleep(sk), wait);
sock_poll_wait(file, wait);
if (sk->sk_shutdown & RCV_SHUTDOWN)
revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
......
......@@ -2635,7 +2635,7 @@ static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wa
struct sock *sk = sock->sk;
__poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait);
sock_poll_wait(file, wait);
mask = 0;
/* exceptional events? */
......@@ -2672,7 +2672,7 @@ static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
unsigned int writable;
__poll_t mask;
sock_poll_wait(file, sk_sleep(sk), wait);
sock_poll_wait(file, wait);
mask = 0;
/* exceptional events? */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment