Commit 02fc6c36 authored by Tom Tucker's avatar Tom Tucker Committed by J. Bruce Fields

svc: Move sk_flags to the svc_xprt structure

This functionally trivial change moves the transport independent sk_flags
field to the transport independent svc_xprt structure.
Signed-off-by: default avatarTom Tucker <tom@opengridcomputing.com>
Acked-by: default avatarNeil Brown <neilb@suse.de>
Reviewed-by: default avatarChuck Lever <chuck.lever@oracle.com>
Reviewed-by: default avatarGreg Banks <gnb@sgi.com>
Signed-off-by: default avatarJ. Bruce Fields <bfields@citi.umich.edu>
parent e1b3157f
...@@ -36,6 +36,18 @@ struct svc_xprt { ...@@ -36,6 +36,18 @@ struct svc_xprt {
struct svc_xprt_class *xpt_class; struct svc_xprt_class *xpt_class;
struct svc_xprt_ops *xpt_ops; struct svc_xprt_ops *xpt_ops;
struct kref xpt_ref; struct kref xpt_ref;
unsigned long xpt_flags;
#define XPT_BUSY 0 /* enqueued/receiving */
#define XPT_CONN 1 /* conn pending */
#define XPT_CLOSE 2 /* dead or dying */
#define XPT_DATA 3 /* data pending */
#define XPT_TEMP 4 /* connected transport */
#define XPT_DEAD 6 /* transport closed */
#define XPT_CHNGBUF 7 /* need to change snd/rcv buf sizes */
#define XPT_DEFERRED 8 /* deferred request pending */
#define XPT_OLD 9 /* used for xprt aging mark+sweep */
#define XPT_DETACHED 10 /* detached from tempsocks list */
#define XPT_LISTENER 11 /* listening endpoint */
}; };
int svc_reg_xprt_class(struct svc_xprt_class *); int svc_reg_xprt_class(struct svc_xprt_class *);
......
...@@ -24,19 +24,6 @@ struct svc_sock { ...@@ -24,19 +24,6 @@ struct svc_sock {
struct svc_pool * sk_pool; /* current pool iff queued */ struct svc_pool * sk_pool; /* current pool iff queued */
struct svc_serv * sk_server; /* service for this socket */ struct svc_serv * sk_server; /* service for this socket */
unsigned long sk_flags;
#define SK_BUSY 0 /* enqueued/receiving */
#define SK_CONN 1 /* conn pending */
#define SK_CLOSE 2 /* dead or dying */
#define SK_DATA 3 /* data pending */
#define SK_TEMP 4 /* temp (TCP) socket */
#define SK_DEAD 6 /* socket closed */
#define SK_CHNGBUF 7 /* need to change snd/rcv buffer sizes */
#define SK_DEFERRED 8 /* request on sk_deferred */
#define SK_OLD 9 /* used for temp socket aging mark+sweep */
#define SK_DETACHED 10 /* detached from tempsocks list */
#define SK_LISTENER 11 /* listening endpoint */
atomic_t sk_reserved; /* space on outq that is reserved */ atomic_t sk_reserved; /* space on outq that is reserved */
spinlock_t sk_lock; /* protects sk_deferred and spinlock_t sk_lock; /* protects sk_deferred and
......
...@@ -56,22 +56,23 @@ ...@@ -56,22 +56,23 @@
* BKL protects svc_serv->sv_nrthread. * BKL protects svc_serv->sv_nrthread.
* svc_sock->sk_lock protects the svc_sock->sk_deferred list * svc_sock->sk_lock protects the svc_sock->sk_deferred list
* and the ->sk_info_authunix cache. * and the ->sk_info_authunix cache.
* svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply. * svc_sock->sk_xprt.xpt_flags.XPT_BUSY prevents a svc_sock being
* enqueued multiply.
* *
* Some flags can be set to certain values at any time * Some flags can be set to certain values at any time
* providing that certain rules are followed: * providing that certain rules are followed:
* *
* SK_CONN, SK_DATA, can be set or cleared at any time. * XPT_CONN, XPT_DATA, can be set or cleared at any time.
* after a set, svc_sock_enqueue must be called. * after a set, svc_sock_enqueue must be called.
* after a clear, the socket must be read/accepted * after a clear, the socket must be read/accepted
* if this succeeds, it must be set again. * if this succeeds, it must be set again.
* SK_CLOSE can set at any time. It is never cleared. * XPT_CLOSE can set at any time. It is never cleared.
* xpt_ref contains a bias of '1' until SK_DEAD is set. * xpt_ref contains a bias of '1' until XPT_DEAD is set.
* so when xprt_ref hits zero, we know the transport is dead * so when xprt_ref hits zero, we know the transport is dead
* and no-one is using it. * and no-one is using it.
* SK_DEAD can only be set while SK_BUSY is held which ensures * XPT_DEAD can only be set while XPT_BUSY is held which ensures
* no other thread will be using the socket or will try to * no other thread will be using the socket or will try to
* set SK_DEAD. * set XPT_DEAD.
* *
*/ */
...@@ -219,10 +220,10 @@ svc_sock_enqueue(struct svc_sock *svsk) ...@@ -219,10 +220,10 @@ svc_sock_enqueue(struct svc_sock *svsk)
struct svc_rqst *rqstp; struct svc_rqst *rqstp;
int cpu; int cpu;
if (!(svsk->sk_flags & if (!(svsk->sk_xprt.xpt_flags &
( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) )) ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED))))
return; return;
if (test_bit(SK_DEAD, &svsk->sk_flags)) if (test_bit(XPT_DEAD, &svsk->sk_xprt.xpt_flags))
return; return;
cpu = get_cpu(); cpu = get_cpu();
...@@ -236,7 +237,7 @@ svc_sock_enqueue(struct svc_sock *svsk) ...@@ -236,7 +237,7 @@ svc_sock_enqueue(struct svc_sock *svsk)
printk(KERN_ERR printk(KERN_ERR
"svc_sock_enqueue: threads and sockets both waiting??\n"); "svc_sock_enqueue: threads and sockets both waiting??\n");
if (test_bit(SK_DEAD, &svsk->sk_flags)) { if (test_bit(XPT_DEAD, &svsk->sk_xprt.xpt_flags)) {
/* Don't enqueue dead sockets */ /* Don't enqueue dead sockets */
dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk); dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk);
goto out_unlock; goto out_unlock;
...@@ -244,10 +245,10 @@ svc_sock_enqueue(struct svc_sock *svsk) ...@@ -244,10 +245,10 @@ svc_sock_enqueue(struct svc_sock *svsk)
/* Mark socket as busy. It will remain in this state until the /* Mark socket as busy. It will remain in this state until the
* server has processed all pending data and put the socket back * server has processed all pending data and put the socket back
* on the idle list. We update SK_BUSY atomically because * on the idle list. We update XPT_BUSY atomically because
* it also guards against trying to enqueue the svc_sock twice. * it also guards against trying to enqueue the svc_sock twice.
*/ */
if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) { if (test_and_set_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)) {
/* Don't enqueue socket while already enqueued */ /* Don't enqueue socket while already enqueued */
dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk); dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk);
goto out_unlock; goto out_unlock;
...@@ -256,11 +257,11 @@ svc_sock_enqueue(struct svc_sock *svsk) ...@@ -256,11 +257,11 @@ svc_sock_enqueue(struct svc_sock *svsk)
svsk->sk_pool = pool; svsk->sk_pool = pool;
/* Handle pending connection */ /* Handle pending connection */
if (test_bit(SK_CONN, &svsk->sk_flags)) if (test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags))
goto process; goto process;
/* Handle close in-progress */ /* Handle close in-progress */
if (test_bit(SK_CLOSE, &svsk->sk_flags)) if (test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags))
goto process; goto process;
/* Check if we have space to reply to a request */ /* Check if we have space to reply to a request */
...@@ -268,7 +269,7 @@ svc_sock_enqueue(struct svc_sock *svsk) ...@@ -268,7 +269,7 @@ svc_sock_enqueue(struct svc_sock *svsk)
/* Don't enqueue while not enough space for reply */ /* Don't enqueue while not enough space for reply */
dprintk("svc: no write space, socket %p not enqueued\n", svsk); dprintk("svc: no write space, socket %p not enqueued\n", svsk);
svsk->sk_pool = NULL; svsk->sk_pool = NULL;
clear_bit(SK_BUSY, &svsk->sk_flags); clear_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags);
goto out_unlock; goto out_unlock;
} }
...@@ -324,14 +325,14 @@ svc_sock_dequeue(struct svc_pool *pool) ...@@ -324,14 +325,14 @@ svc_sock_dequeue(struct svc_pool *pool)
/* /*
* Having read something from a socket, check whether it * Having read something from a socket, check whether it
* needs to be re-enqueued. * needs to be re-enqueued.
* Note: SK_DATA only gets cleared when a read-attempt finds * Note: XPT_DATA only gets cleared when a read-attempt finds
* no (or insufficient) data. * no (or insufficient) data.
*/ */
static inline void static inline void
svc_sock_received(struct svc_sock *svsk) svc_sock_received(struct svc_sock *svsk)
{ {
svsk->sk_pool = NULL; svsk->sk_pool = NULL;
clear_bit(SK_BUSY, &svsk->sk_flags); clear_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags);
svc_sock_enqueue(svsk); svc_sock_enqueue(svsk);
} }
...@@ -680,8 +681,9 @@ svc_udp_data_ready(struct sock *sk, int count) ...@@ -680,8 +681,9 @@ svc_udp_data_ready(struct sock *sk, int count)
if (svsk) { if (svsk) {
dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n", dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n",
svsk, sk, count, test_bit(SK_BUSY, &svsk->sk_flags)); svsk, sk, count,
set_bit(SK_DATA, &svsk->sk_flags); test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
svc_sock_enqueue(svsk); svc_sock_enqueue(svsk);
} }
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
...@@ -698,7 +700,7 @@ svc_write_space(struct sock *sk) ...@@ -698,7 +700,7 @@ svc_write_space(struct sock *sk)
if (svsk) { if (svsk) {
dprintk("svc: socket %p(inet %p), write_space busy=%d\n", dprintk("svc: socket %p(inet %p), write_space busy=%d\n",
svsk, sk, test_bit(SK_BUSY, &svsk->sk_flags)); svsk, sk, test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags));
svc_sock_enqueue(svsk); svc_sock_enqueue(svsk);
} }
...@@ -748,7 +750,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) ...@@ -748,7 +750,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
.msg_flags = MSG_DONTWAIT, .msg_flags = MSG_DONTWAIT,
}; };
if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
/* udp sockets need large rcvbuf as all pending /* udp sockets need large rcvbuf as all pending
* requests are still in that buffer. sndbuf must * requests are still in that buffer. sndbuf must
* also be large enough that there is enough space * also be large enough that there is enough space
...@@ -766,7 +768,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) ...@@ -766,7 +768,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
return svc_deferred_recv(rqstp); return svc_deferred_recv(rqstp);
} }
clear_bit(SK_DATA, &svsk->sk_flags); clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
skb = NULL; skb = NULL;
err = kernel_recvmsg(svsk->sk_sock, &msg, NULL, err = kernel_recvmsg(svsk->sk_sock, &msg, NULL,
0, 0, MSG_PEEK | MSG_DONTWAIT); 0, 0, MSG_PEEK | MSG_DONTWAIT);
...@@ -777,7 +779,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) ...@@ -777,7 +779,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
if (err != -EAGAIN) { if (err != -EAGAIN) {
/* possibly an icmp error */ /* possibly an icmp error */
dprintk("svc: recvfrom returned error %d\n", -err); dprintk("svc: recvfrom returned error %d\n", -err);
set_bit(SK_DATA, &svsk->sk_flags); set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
} }
svc_sock_received(svsk); svc_sock_received(svsk);
return -EAGAIN; return -EAGAIN;
...@@ -789,7 +791,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp) ...@@ -789,7 +791,7 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
need that much accuracy */ need that much accuracy */
} }
svsk->sk_sk->sk_stamp = skb->tstamp; svsk->sk_sk->sk_stamp = skb->tstamp;
set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */ set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* there may be more data... */
/* /*
* Maybe more packets - kick another thread ASAP. * Maybe more packets - kick another thread ASAP.
...@@ -936,8 +938,8 @@ svc_udp_init(struct svc_sock *svsk) ...@@ -936,8 +938,8 @@ svc_udp_init(struct svc_sock *svsk)
3 * svsk->sk_server->sv_max_mesg, 3 * svsk->sk_server->sv_max_mesg,
3 * svsk->sk_server->sv_max_mesg); 3 * svsk->sk_server->sv_max_mesg);
set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */ set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); /* might have come in before data_ready set up */
set_bit(SK_CHNGBUF, &svsk->sk_flags); set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
oldfs = get_fs(); oldfs = get_fs();
set_fs(KERNEL_DS); set_fs(KERNEL_DS);
...@@ -971,7 +973,7 @@ svc_tcp_listen_data_ready(struct sock *sk, int count_unused) ...@@ -971,7 +973,7 @@ svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
*/ */
if (sk->sk_state == TCP_LISTEN) { if (sk->sk_state == TCP_LISTEN) {
if (svsk) { if (svsk) {
set_bit(SK_CONN, &svsk->sk_flags); set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
svc_sock_enqueue(svsk); svc_sock_enqueue(svsk);
} else } else
printk("svc: socket %p: no user data\n", sk); printk("svc: socket %p: no user data\n", sk);
...@@ -995,7 +997,7 @@ svc_tcp_state_change(struct sock *sk) ...@@ -995,7 +997,7 @@ svc_tcp_state_change(struct sock *sk)
if (!svsk) if (!svsk)
printk("svc: socket %p: no user data\n", sk); printk("svc: socket %p: no user data\n", sk);
else { else {
set_bit(SK_CLOSE, &svsk->sk_flags); set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
svc_sock_enqueue(svsk); svc_sock_enqueue(svsk);
} }
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
...@@ -1010,7 +1012,7 @@ svc_tcp_data_ready(struct sock *sk, int count) ...@@ -1010,7 +1012,7 @@ svc_tcp_data_ready(struct sock *sk, int count)
dprintk("svc: socket %p TCP data ready (svsk %p)\n", dprintk("svc: socket %p TCP data ready (svsk %p)\n",
sk, sk->sk_user_data); sk, sk->sk_user_data);
if (svsk) { if (svsk) {
set_bit(SK_DATA, &svsk->sk_flags); set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
svc_sock_enqueue(svsk); svc_sock_enqueue(svsk);
} }
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
...@@ -1050,7 +1052,7 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt) ...@@ -1050,7 +1052,7 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
if (!sock) if (!sock)
return NULL; return NULL;
clear_bit(SK_CONN, &svsk->sk_flags); clear_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
err = kernel_accept(sock, &newsock, O_NONBLOCK); err = kernel_accept(sock, &newsock, O_NONBLOCK);
if (err < 0) { if (err < 0) {
if (err == -ENOMEM) if (err == -ENOMEM)
...@@ -1061,8 +1063,7 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt) ...@@ -1061,8 +1063,7 @@ static struct svc_xprt *svc_tcp_accept(struct svc_xprt *xprt)
serv->sv_name, -err); serv->sv_name, -err);
return NULL; return NULL;
} }
set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
set_bit(SK_CONN, &svsk->sk_flags);
err = kernel_getpeername(newsock, sin, &slen); err = kernel_getpeername(newsock, sin, &slen);
if (err < 0) { if (err < 0) {
...@@ -1127,16 +1128,16 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp) ...@@ -1127,16 +1128,16 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
int pnum, vlen; int pnum, vlen;
dprintk("svc: tcp_recv %p data %d conn %d close %d\n", dprintk("svc: tcp_recv %p data %d conn %d close %d\n",
svsk, test_bit(SK_DATA, &svsk->sk_flags), svsk, test_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags),
test_bit(SK_CONN, &svsk->sk_flags), test_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags),
test_bit(SK_CLOSE, &svsk->sk_flags)); test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags));
if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) { if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) {
svc_sock_received(svsk); svc_sock_received(svsk);
return svc_deferred_recv(rqstp); return svc_deferred_recv(rqstp);
} }
if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) if (test_and_clear_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags))
/* sndbuf needs to have room for one request /* sndbuf needs to have room for one request
* per thread, otherwise we can stall even when the * per thread, otherwise we can stall even when the
* network isn't a bottleneck. * network isn't a bottleneck.
...@@ -1153,7 +1154,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp) ...@@ -1153,7 +1154,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
(serv->sv_nrthreads+3) * serv->sv_max_mesg, (serv->sv_nrthreads+3) * serv->sv_max_mesg,
3 * serv->sv_max_mesg); 3 * serv->sv_max_mesg);
clear_bit(SK_DATA, &svsk->sk_flags); clear_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
/* Receive data. If we haven't got the record length yet, get /* Receive data. If we haven't got the record length yet, get
* the next four bytes. Otherwise try to gobble up as much as * the next four bytes. Otherwise try to gobble up as much as
...@@ -1212,7 +1213,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp) ...@@ -1212,7 +1213,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
return -EAGAIN; /* record not complete */ return -EAGAIN; /* record not complete */
} }
len = svsk->sk_reclen; len = svsk->sk_reclen;
set_bit(SK_DATA, &svsk->sk_flags); set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
vec = rqstp->rq_vec; vec = rqstp->rq_vec;
vec[0] = rqstp->rq_arg.head[0]; vec[0] = rqstp->rq_arg.head[0];
...@@ -1255,7 +1256,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp) ...@@ -1255,7 +1256,7 @@ svc_tcp_recvfrom(struct svc_rqst *rqstp)
return len; return len;
err_delete: err_delete:
set_bit(SK_CLOSE, &svsk->sk_flags); set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
return -EAGAIN; return -EAGAIN;
error: error:
...@@ -1288,7 +1289,7 @@ svc_tcp_sendto(struct svc_rqst *rqstp) ...@@ -1288,7 +1289,7 @@ svc_tcp_sendto(struct svc_rqst *rqstp)
reclen = htonl(0x80000000|((xbufp->len ) - 4)); reclen = htonl(0x80000000|((xbufp->len ) - 4));
memcpy(xbufp->head[0].iov_base, &reclen, 4); memcpy(xbufp->head[0].iov_base, &reclen, 4);
if (test_bit(SK_DEAD, &rqstp->rq_sock->sk_flags)) if (test_bit(XPT_DEAD, &rqstp->rq_sock->sk_xprt.xpt_flags))
return -ENOTCONN; return -ENOTCONN;
sent = svc_sendto(rqstp, &rqstp->rq_res); sent = svc_sendto(rqstp, &rqstp->rq_res);
...@@ -1297,7 +1298,7 @@ svc_tcp_sendto(struct svc_rqst *rqstp) ...@@ -1297,7 +1298,7 @@ svc_tcp_sendto(struct svc_rqst *rqstp)
rqstp->rq_sock->sk_server->sv_name, rqstp->rq_sock->sk_server->sv_name,
(sent<0)?"got error":"sent only", (sent<0)?"got error":"sent only",
sent, xbufp->len); sent, xbufp->len);
set_bit(SK_CLOSE, &rqstp->rq_sock->sk_flags); set_bit(XPT_CLOSE, &rqstp->rq_sock->sk_xprt.xpt_flags);
svc_sock_enqueue(rqstp->rq_sock); svc_sock_enqueue(rqstp->rq_sock);
sent = -EAGAIN; sent = -EAGAIN;
} }
...@@ -1387,9 +1388,9 @@ svc_tcp_init(struct svc_sock *svsk) ...@@ -1387,9 +1388,9 @@ svc_tcp_init(struct svc_sock *svsk)
if (sk->sk_state == TCP_LISTEN) { if (sk->sk_state == TCP_LISTEN) {
dprintk("setting up TCP socket for listening\n"); dprintk("setting up TCP socket for listening\n");
set_bit(SK_LISTENER, &svsk->sk_flags); set_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags);
sk->sk_data_ready = svc_tcp_listen_data_ready; sk->sk_data_ready = svc_tcp_listen_data_ready;
set_bit(SK_CONN, &svsk->sk_flags); set_bit(XPT_CONN, &svsk->sk_xprt.xpt_flags);
} else { } else {
dprintk("setting up TCP socket for reading\n"); dprintk("setting up TCP socket for reading\n");
sk->sk_state_change = svc_tcp_state_change; sk->sk_state_change = svc_tcp_state_change;
...@@ -1409,10 +1410,10 @@ svc_tcp_init(struct svc_sock *svsk) ...@@ -1409,10 +1410,10 @@ svc_tcp_init(struct svc_sock *svsk)
3 * svsk->sk_server->sv_max_mesg, 3 * svsk->sk_server->sv_max_mesg,
3 * svsk->sk_server->sv_max_mesg); 3 * svsk->sk_server->sv_max_mesg);
set_bit(SK_CHNGBUF, &svsk->sk_flags); set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
set_bit(SK_DATA, &svsk->sk_flags); set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
if (sk->sk_state != TCP_ESTABLISHED) if (sk->sk_state != TCP_ESTABLISHED)
set_bit(SK_CLOSE, &svsk->sk_flags); set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
} }
} }
...@@ -1429,12 +1430,12 @@ svc_sock_update_bufs(struct svc_serv *serv) ...@@ -1429,12 +1430,12 @@ svc_sock_update_bufs(struct svc_serv *serv)
list_for_each(le, &serv->sv_permsocks) { list_for_each(le, &serv->sv_permsocks) {
struct svc_sock *svsk = struct svc_sock *svsk =
list_entry(le, struct svc_sock, sk_list); list_entry(le, struct svc_sock, sk_list);
set_bit(SK_CHNGBUF, &svsk->sk_flags); set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
} }
list_for_each(le, &serv->sv_tempsocks) { list_for_each(le, &serv->sv_tempsocks) {
struct svc_sock *svsk = struct svc_sock *svsk =
list_entry(le, struct svc_sock, sk_list); list_entry(le, struct svc_sock, sk_list);
set_bit(SK_CHNGBUF, &svsk->sk_flags); set_bit(XPT_CHNGBUF, &svsk->sk_xprt.xpt_flags);
} }
spin_unlock_bh(&serv->sv_lock); spin_unlock_bh(&serv->sv_lock);
} }
...@@ -1471,7 +1472,7 @@ static void svc_check_conn_limits(struct svc_serv *serv) ...@@ -1471,7 +1472,7 @@ static void svc_check_conn_limits(struct svc_serv *serv)
svsk = list_entry(serv->sv_tempsocks.prev, svsk = list_entry(serv->sv_tempsocks.prev,
struct svc_sock, struct svc_sock,
sk_list); sk_list);
set_bit(SK_CLOSE, &svsk->sk_flags); set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
svc_xprt_get(&svsk->sk_xprt); svc_xprt_get(&svsk->sk_xprt);
} }
spin_unlock_bh(&serv->sv_lock); spin_unlock_bh(&serv->sv_lock);
...@@ -1575,10 +1576,10 @@ svc_recv(struct svc_rqst *rqstp, long timeout) ...@@ -1575,10 +1576,10 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
spin_unlock_bh(&pool->sp_lock); spin_unlock_bh(&pool->sp_lock);
len = 0; len = 0;
if (test_bit(SK_CLOSE, &svsk->sk_flags)) { if (test_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags)) {
dprintk("svc_recv: found SK_CLOSE\n"); dprintk("svc_recv: found XPT_CLOSE\n");
svc_delete_socket(svsk); svc_delete_socket(svsk);
} else if (test_bit(SK_LISTENER, &svsk->sk_flags)) { } else if (test_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags)) {
struct svc_xprt *newxpt; struct svc_xprt *newxpt;
newxpt = svsk->sk_xprt.xpt_ops->xpo_accept(&svsk->sk_xprt); newxpt = svsk->sk_xprt.xpt_ops->xpo_accept(&svsk->sk_xprt);
if (newxpt) { if (newxpt) {
...@@ -1605,7 +1606,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout) ...@@ -1605,7 +1606,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout)
return -EAGAIN; return -EAGAIN;
} }
svsk->sk_lastrecv = get_seconds(); svsk->sk_lastrecv = get_seconds();
clear_bit(SK_OLD, &svsk->sk_flags); clear_bit(XPT_OLD, &svsk->sk_xprt.xpt_flags);
rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp));
rqstp->rq_chandle.defer = svc_defer; rqstp->rq_chandle.defer = svc_defer;
...@@ -1652,7 +1653,7 @@ svc_send(struct svc_rqst *rqstp) ...@@ -1652,7 +1653,7 @@ svc_send(struct svc_rqst *rqstp)
/* Grab svsk->sk_mutex to serialize outgoing data. */ /* Grab svsk->sk_mutex to serialize outgoing data. */
mutex_lock(&svsk->sk_mutex); mutex_lock(&svsk->sk_mutex);
if (test_bit(SK_DEAD, &svsk->sk_flags)) if (test_bit(XPT_DEAD, &svsk->sk_xprt.xpt_flags))
len = -ENOTCONN; len = -ENOTCONN;
else else
len = svsk->sk_xprt.xpt_ops->xpo_sendto(rqstp); len = svsk->sk_xprt.xpt_ops->xpo_sendto(rqstp);
...@@ -1688,21 +1689,21 @@ svc_age_temp_sockets(unsigned long closure) ...@@ -1688,21 +1689,21 @@ svc_age_temp_sockets(unsigned long closure)
list_for_each_safe(le, next, &serv->sv_tempsocks) { list_for_each_safe(le, next, &serv->sv_tempsocks) {
svsk = list_entry(le, struct svc_sock, sk_list); svsk = list_entry(le, struct svc_sock, sk_list);
if (!test_and_set_bit(SK_OLD, &svsk->sk_flags)) if (!test_and_set_bit(XPT_OLD, &svsk->sk_xprt.xpt_flags))
continue; continue;
if (atomic_read(&svsk->sk_xprt.xpt_ref.refcount) > 1 if (atomic_read(&svsk->sk_xprt.xpt_ref.refcount) > 1
|| test_bit(SK_BUSY, &svsk->sk_flags)) || test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags))
continue; continue;
svc_xprt_get(&svsk->sk_xprt); svc_xprt_get(&svsk->sk_xprt);
list_move(le, &to_be_aged); list_move(le, &to_be_aged);
set_bit(SK_CLOSE, &svsk->sk_flags); set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
set_bit(SK_DETACHED, &svsk->sk_flags); set_bit(XPT_DETACHED, &svsk->sk_xprt.xpt_flags);
} }
spin_unlock_bh(&serv->sv_lock); spin_unlock_bh(&serv->sv_lock);
while (!list_empty(&to_be_aged)) { while (!list_empty(&to_be_aged)) {
le = to_be_aged.next; le = to_be_aged.next;
/* fiddling the sk_list node is safe 'cos we're SK_DETACHED */ /* fiddling the sk_list node is safe 'cos we're XPT_DETACHED */
list_del_init(le); list_del_init(le);
svsk = list_entry(le, struct svc_sock, sk_list); svsk = list_entry(le, struct svc_sock, sk_list);
...@@ -1748,7 +1749,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, ...@@ -1748,7 +1749,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
return NULL; return NULL;
} }
set_bit(SK_BUSY, &svsk->sk_flags); set_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags);
inet->sk_user_data = svsk; inet->sk_user_data = svsk;
svsk->sk_sock = sock; svsk->sk_sock = sock;
svsk->sk_sk = inet; svsk->sk_sk = inet;
...@@ -1770,7 +1771,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, ...@@ -1770,7 +1771,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
spin_lock_bh(&serv->sv_lock); spin_lock_bh(&serv->sv_lock);
if (is_temporary) { if (is_temporary) {
set_bit(SK_TEMP, &svsk->sk_flags); set_bit(XPT_TEMP, &svsk->sk_xprt.xpt_flags);
list_add(&svsk->sk_list, &serv->sv_tempsocks); list_add(&svsk->sk_list, &serv->sv_tempsocks);
serv->sv_tmpcnt++; serv->sv_tmpcnt++;
if (serv->sv_temptimer.function == NULL) { if (serv->sv_temptimer.function == NULL) {
...@@ -1781,7 +1782,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv, ...@@ -1781,7 +1782,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
jiffies + svc_conn_age_period * HZ); jiffies + svc_conn_age_period * HZ);
} }
} else { } else {
clear_bit(SK_TEMP, &svsk->sk_flags); clear_bit(XPT_TEMP, &svsk->sk_xprt.xpt_flags);
list_add(&svsk->sk_list, &serv->sv_permsocks); list_add(&svsk->sk_list, &serv->sv_permsocks);
} }
spin_unlock_bh(&serv->sv_lock); spin_unlock_bh(&serv->sv_lock);
...@@ -1931,7 +1932,7 @@ svc_delete_socket(struct svc_sock *svsk) ...@@ -1931,7 +1932,7 @@ svc_delete_socket(struct svc_sock *svsk)
spin_lock_bh(&serv->sv_lock); spin_lock_bh(&serv->sv_lock);
if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags)) if (!test_and_set_bit(XPT_DETACHED, &svsk->sk_xprt.xpt_flags))
list_del_init(&svsk->sk_list); list_del_init(&svsk->sk_list);
/* /*
* We used to delete the svc_sock from whichever list * We used to delete the svc_sock from whichever list
...@@ -1940,9 +1941,9 @@ svc_delete_socket(struct svc_sock *svsk) ...@@ -1940,9 +1941,9 @@ svc_delete_socket(struct svc_sock *svsk)
* while still attached to a queue, the queue itself * while still attached to a queue, the queue itself
* is about to be destroyed (in svc_destroy). * is about to be destroyed (in svc_destroy).
*/ */
if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags)) { if (!test_and_set_bit(XPT_DEAD, &svsk->sk_xprt.xpt_flags)) {
BUG_ON(atomic_read(&svsk->sk_xprt.xpt_ref.refcount) < 2); BUG_ON(atomic_read(&svsk->sk_xprt.xpt_ref.refcount) < 2);
if (test_bit(SK_TEMP, &svsk->sk_flags)) if (test_bit(XPT_TEMP, &svsk->sk_xprt.xpt_flags))
serv->sv_tmpcnt--; serv->sv_tmpcnt--;
svc_xprt_put(&svsk->sk_xprt); svc_xprt_put(&svsk->sk_xprt);
} }
...@@ -1952,26 +1953,26 @@ svc_delete_socket(struct svc_sock *svsk) ...@@ -1952,26 +1953,26 @@ svc_delete_socket(struct svc_sock *svsk)
static void svc_close_socket(struct svc_sock *svsk) static void svc_close_socket(struct svc_sock *svsk)
{ {
set_bit(SK_CLOSE, &svsk->sk_flags); set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) if (test_and_set_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags))
/* someone else will have to effect the close */ /* someone else will have to effect the close */
return; return;
svc_xprt_get(&svsk->sk_xprt); svc_xprt_get(&svsk->sk_xprt);
svc_delete_socket(svsk); svc_delete_socket(svsk);
clear_bit(SK_BUSY, &svsk->sk_flags); clear_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags);
svc_xprt_put(&svsk->sk_xprt); svc_xprt_put(&svsk->sk_xprt);
} }
void svc_force_close_socket(struct svc_sock *svsk) void svc_force_close_socket(struct svc_sock *svsk)
{ {
set_bit(SK_CLOSE, &svsk->sk_flags); set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
if (test_bit(SK_BUSY, &svsk->sk_flags)) { if (test_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags)) {
/* Waiting to be processed, but no threads left, /* Waiting to be processed, but no threads left,
* So just remove it from the waiting list * So just remove it from the waiting list
*/ */
list_del_init(&svsk->sk_ready); list_del_init(&svsk->sk_ready);
clear_bit(SK_BUSY, &svsk->sk_flags); clear_bit(XPT_BUSY, &svsk->sk_xprt.xpt_flags);
} }
svc_close_socket(svsk); svc_close_socket(svsk);
} }
...@@ -1996,7 +1997,7 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many) ...@@ -1996,7 +1997,7 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
spin_lock(&svsk->sk_lock); spin_lock(&svsk->sk_lock);
list_add(&dr->handle.recent, &svsk->sk_deferred); list_add(&dr->handle.recent, &svsk->sk_deferred);
spin_unlock(&svsk->sk_lock); spin_unlock(&svsk->sk_lock);
set_bit(SK_DEFERRED, &svsk->sk_flags); set_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags);
svc_sock_enqueue(svsk); svc_sock_enqueue(svsk);
svc_xprt_put(&svsk->sk_xprt); svc_xprt_put(&svsk->sk_xprt);
} }
...@@ -2059,16 +2060,16 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) ...@@ -2059,16 +2060,16 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
{ {
struct svc_deferred_req *dr = NULL; struct svc_deferred_req *dr = NULL;
if (!test_bit(SK_DEFERRED, &svsk->sk_flags)) if (!test_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags))
return NULL; return NULL;
spin_lock(&svsk->sk_lock); spin_lock(&svsk->sk_lock);
clear_bit(SK_DEFERRED, &svsk->sk_flags); clear_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags);
if (!list_empty(&svsk->sk_deferred)) { if (!list_empty(&svsk->sk_deferred)) {
dr = list_entry(svsk->sk_deferred.next, dr = list_entry(svsk->sk_deferred.next,
struct svc_deferred_req, struct svc_deferred_req,
handle.recent); handle.recent);
list_del_init(&dr->handle.recent); list_del_init(&dr->handle.recent);
set_bit(SK_DEFERRED, &svsk->sk_flags); set_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags);
} }
spin_unlock(&svsk->sk_lock); spin_unlock(&svsk->sk_lock);
return dr; return dr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment