Commit 8dce2786 authored by Ursula Braun's avatar Ursula Braun Committed by David S. Miller

net/smc: smc_poll improvements

Increase the socket refcount during poll wait.
Take the socket lock before checking socket state.
For a listening socket return a mask independent of state SMC_ACTIVE and
cover errors or closed state as well.
Get rid of the accept_q loop in smc_accept_poll().
Signed-off-by: default avatarUrsula Braun <ubraun@linux.vnet.ibm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent da05bf29
......@@ -1122,21 +1122,15 @@ static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
static unsigned int smc_accept_poll(struct sock *parent)
{
struct smc_sock *isk;
struct sock *sk;
lock_sock(parent);
list_for_each_entry(isk, &smc_sk(parent)->accept_q, accept_q) {
sk = (struct sock *)isk;
struct smc_sock *isk = smc_sk(parent);
int mask = 0;
if (sk->sk_state == SMC_ACTIVE) {
release_sock(parent);
return POLLIN | POLLRDNORM;
}
}
release_sock(parent);
spin_lock(&isk->accept_q_lock);
if (!list_empty(&isk->accept_q))
mask = POLLIN | POLLRDNORM;
spin_unlock(&isk->accept_q_lock);
return 0;
return mask;
}
static unsigned int smc_poll(struct file *file, struct socket *sock,
......@@ -1147,9 +1141,15 @@ static unsigned int smc_poll(struct file *file, struct socket *sock,
struct smc_sock *smc;
int rc;
if (!sk)
return POLLNVAL;
smc = smc_sk(sock->sk);
sock_hold(sk);
lock_sock(sk);
if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
/* delegate to CLC child sock */
release_sock(sk);
mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
/* if non-blocking connect finished ... */
lock_sock(sk);
......@@ -1161,21 +1161,27 @@ static unsigned int smc_poll(struct file *file, struct socket *sock,
rc = smc_connect_rdma(smc);
if (rc < 0)
mask |= POLLERR;
else
/* success cases including fallback */
mask |= POLLOUT | POLLWRNORM;
}
}
release_sock(sk);
} else {
if (sk->sk_state != SMC_CLOSED) {
release_sock(sk);
sock_poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == SMC_LISTEN)
/* woken up by sk_data_ready in smc_listen_work() */
mask |= smc_accept_poll(sk);
lock_sock(sk);
}
if (sk->sk_err)
mask |= POLLERR;
if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
(sk->sk_state == SMC_CLOSED))
mask |= POLLHUP;
if (sk->sk_state == SMC_LISTEN) {
/* woken up by sk_data_ready in smc_listen_work() */
mask = smc_accept_poll(sk);
} else {
if (atomic_read(&smc->conn.sndbuf_space) ||
(sk->sk_shutdown & SEND_SHUTDOWN)) {
sk->sk_shutdown & SEND_SHUTDOWN) {
mask |= POLLOUT | POLLWRNORM;
} else {
sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
......@@ -1183,15 +1189,15 @@ static unsigned int smc_poll(struct file *file, struct socket *sock,
}
if (atomic_read(&smc->conn.bytes_to_rcv))
mask |= POLLIN | POLLRDNORM;
if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
(sk->sk_state == SMC_CLOSED))
mask |= POLLHUP;
if (sk->sk_shutdown & RCV_SHUTDOWN)
mask |= POLLIN | POLLRDNORM | POLLRDHUP;
if (sk->sk_state == SMC_APPCLOSEWAIT1)
mask |= POLLIN;
}
}
release_sock(sk);
sock_put(sk);
return mask;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment