Commit e387819a authored by Trond Myklebust's avatar Trond Myklebust Committed by Linus Torvalds

[PATCH] Do RPC over TCP reply message delivery in sock->data_ready()

xprt.c:
  Speed up synchronous RPC over TCP calls by having the
  replies delivered by the IPV4 "bottom half", instead of
  switching to the rpciod process in order to call recvmsg().
   - Remove sock_recvmsg() interface.
   - Remove rpc_xprt_pending list and rpciod_tcp_dispatcher() interface.
   - Use the new tcp_read_sock() interface to deliver data directly
     from within tcp_data_ready().
sched.c:
   - Remove references to rpciod_tcp_dispatcher.
xprt.h:
   - New set of flags to reflect the TCP record read state.

Cheers,
  Trond
parent d755a07e
...@@ -120,6 +120,11 @@ struct rpc_rqst { ...@@ -120,6 +120,11 @@ struct rpc_rqst {
#define rq_rnr rq_rcv_buf.io_nr #define rq_rnr rq_rcv_buf.io_nr
#define rq_rlen rq_rcv_buf.io_len #define rq_rlen rq_rcv_buf.io_len
#define XPRT_LAST_FRAG (1 << 0)
#define XPRT_COPY_RECM (1 << 1)
#define XPRT_COPY_XID (1 << 2)
#define XPRT_COPY_DATA (1 << 3)
struct rpc_xprt { struct rpc_xprt {
struct socket * sock; /* BSD socket layer */ struct socket * sock; /* BSD socket layer */
struct sock * inet; /* INET layer */ struct sock * inet; /* INET layer */
...@@ -140,18 +145,17 @@ struct rpc_xprt { ...@@ -140,18 +145,17 @@ struct rpc_xprt {
unsigned long sockstate; /* Socket state */ unsigned long sockstate; /* Socket state */
unsigned char shutdown : 1, /* being shut down */ unsigned char shutdown : 1, /* being shut down */
nocong : 1, /* no congestion control */ nocong : 1, /* no congestion control */
stream : 1, /* TCP */ stream : 1; /* TCP */
tcp_more : 1; /* more record fragments */
/* /*
* State of TCP reply receive stuff * State of TCP reply receive stuff
*/ */
u32 tcp_recm; /* Fragment header */ u32 tcp_recm, /* Fragment header */
u32 tcp_xid; /* Current XID */ tcp_xid, /* Current XID */
unsigned int tcp_reclen, /* fragment length */ tcp_reclen, /* fragment length */
tcp_offset, /* fragment offset */ tcp_offset; /* fragment offset */
tcp_copied; /* copied to request */ unsigned long tcp_copied, /* copied to request */
struct list_head rx_pending; /* receive pending list */ tcp_flags;
/* /*
* Send stuff * Send stuff
...@@ -185,8 +189,6 @@ int xprt_adjust_timeout(struct rpc_timeout *); ...@@ -185,8 +189,6 @@ int xprt_adjust_timeout(struct rpc_timeout *);
void xprt_release(struct rpc_task *); void xprt_release(struct rpc_task *);
void xprt_reconnect(struct rpc_task *); void xprt_reconnect(struct rpc_task *);
int xprt_clear_backlog(struct rpc_xprt *); int xprt_clear_backlog(struct rpc_xprt *);
int xprt_tcp_pending(void);
void __rpciod_tcp_dispatcher(void);
#define XPRT_WSPACE 0 #define XPRT_WSPACE 0
#define XPRT_CONNECT 1 #define XPRT_CONNECT 1
...@@ -200,13 +202,6 @@ void __rpciod_tcp_dispatcher(void); ...@@ -200,13 +202,6 @@ void __rpciod_tcp_dispatcher(void);
#define xprt_test_and_set_connected(xp) (test_and_set_bit(XPRT_CONNECT, &(xp)->sockstate)) #define xprt_test_and_set_connected(xp) (test_and_set_bit(XPRT_CONNECT, &(xp)->sockstate))
#define xprt_clear_connected(xp) (clear_bit(XPRT_CONNECT, &(xp)->sockstate)) #define xprt_clear_connected(xp) (clear_bit(XPRT_CONNECT, &(xp)->sockstate))
static inline
void rpciod_tcp_dispatcher(void)
{
if (xprt_tcp_pending())
__rpciod_tcp_dispatcher();
}
#endif /* __KERNEL__*/ #endif /* __KERNEL__*/
#endif /* _LINUX_SUNRPC_XPRT_H */ #endif /* _LINUX_SUNRPC_XPRT_H */
...@@ -704,9 +704,6 @@ __rpc_schedule(void) ...@@ -704,9 +704,6 @@ __rpc_schedule(void)
dprintk("RPC: rpc_schedule enter\n"); dprintk("RPC: rpc_schedule enter\n");
while (1) { while (1) {
/* Ensure equal rights for tcp tasks... */
rpciod_tcp_dispatcher();
spin_lock_bh(&rpc_queue_lock); spin_lock_bh(&rpc_queue_lock);
task_for_first(task, &schedq.tasks) { task_for_first(task, &schedq.tasks) {
...@@ -1030,7 +1027,7 @@ static DECLARE_MUTEX_LOCKED(rpciod_running); ...@@ -1030,7 +1027,7 @@ static DECLARE_MUTEX_LOCKED(rpciod_running);
static inline int static inline int
rpciod_task_pending(void) rpciod_task_pending(void)
{ {
return !list_empty(&schedq.tasks) || xprt_tcp_pending(); return !list_empty(&schedq.tasks);
} }
......
...@@ -64,6 +64,7 @@ ...@@ -64,6 +64,7 @@
#include <net/sock.h> #include <net/sock.h>
#include <net/checksum.h> #include <net/checksum.h>
#include <net/udp.h> #include <net/udp.h>
#include <net/tcp.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
...@@ -88,7 +89,6 @@ static void xprt_disconnect(struct rpc_xprt *); ...@@ -88,7 +89,6 @@ static void xprt_disconnect(struct rpc_xprt *);
static void xprt_reconn_status(struct rpc_task *task); static void xprt_reconn_status(struct rpc_task *task);
static struct socket *xprt_create_socket(int, struct rpc_timeout *); static struct socket *xprt_create_socket(int, struct rpc_timeout *);
static int xprt_bind_socket(struct rpc_xprt *, struct socket *); static int xprt_bind_socket(struct rpc_xprt *, struct socket *);
static void xprt_remove_pending(struct rpc_xprt *);
#ifdef RPC_DEBUG_DATA #ifdef RPC_DEBUG_DATA
/* /*
...@@ -269,43 +269,6 @@ xprt_sendmsg(struct rpc_xprt *xprt, struct rpc_rqst *req) ...@@ -269,43 +269,6 @@ xprt_sendmsg(struct rpc_xprt *xprt, struct rpc_rqst *req)
return result; return result;
} }
/*
* Read data from socket
*/
static int
xprt_recvmsg(struct rpc_xprt *xprt, struct iovec *iov, int nr, unsigned len, unsigned shift)
{
struct socket *sock = xprt->sock;
struct msghdr msg;
mm_segment_t oldfs;
struct iovec niv[MAX_IOVEC];
int result;
if (!sock)
return -ENOTCONN;
msg.msg_flags = MSG_DONTWAIT|MSG_NOSIGNAL;
msg.msg_iov = iov;
msg.msg_iovlen = nr;
msg.msg_name = NULL;
msg.msg_namelen = 0;
msg.msg_control = NULL;
msg.msg_controllen = 0;
/* Adjust the iovec if we've already filled it */
if (shift)
xprt_move_iov(&msg, niv, shift);
oldfs = get_fs(); set_fs(get_ds());
result = sock_recvmsg(sock, &msg, len, MSG_DONTWAIT);
set_fs(oldfs);
dprintk("RPC: xprt_recvmsg(iov %p, len %d) = %d\n",
iov, len, result);
return result;
}
/* /*
* Adjust RPC congestion window * Adjust RPC congestion window
* We use a time-smoothed congestion estimator to avoid heavy oscillation. * We use a time-smoothed congestion estimator to avoid heavy oscillation.
...@@ -423,7 +386,6 @@ xprt_disconnect(struct rpc_xprt *xprt) ...@@ -423,7 +386,6 @@ xprt_disconnect(struct rpc_xprt *xprt)
{ {
dprintk("RPC: disconnected transport %p\n", xprt); dprintk("RPC: disconnected transport %p\n", xprt);
xprt_clear_connected(xprt); xprt_clear_connected(xprt);
xprt_remove_pending(xprt);
rpc_wake_up_status(&xprt->pending, -ENOTCONN); rpc_wake_up_status(&xprt->pending, -ENOTCONN);
} }
...@@ -473,7 +435,7 @@ xprt_reconnect(struct rpc_task *task) ...@@ -473,7 +435,7 @@ xprt_reconnect(struct rpc_task *task)
xprt->tcp_offset = 0; xprt->tcp_offset = 0;
xprt->tcp_reclen = 0; xprt->tcp_reclen = 0;
xprt->tcp_copied = 0; xprt->tcp_copied = 0;
xprt->tcp_more = 0; xprt->tcp_flags = XPRT_COPY_RECM | XPRT_COPY_XID;
/* Now connect it asynchronously. */ /* Now connect it asynchronously. */
dprintk("RPC: %4d connecting new socket\n", task->tk_pid); dprintk("RPC: %4d connecting new socket\n", task->tk_pid);
...@@ -716,309 +678,229 @@ udp_data_ready(struct sock *sk, int len) ...@@ -716,309 +678,229 @@ udp_data_ready(struct sock *sk, int len)
wake_up_interruptible(sk->sleep); wake_up_interruptible(sk->sleep);
} }
typedef struct {
struct sk_buff *skb;
unsigned offset;
size_t count;
} skb_reader_t;
/* /*
* TCP read fragment marker * Copy from an skb into memory and shrink the skb.
*/ */
static inline int static inline size_t
tcp_read_fraghdr(struct rpc_xprt *xprt) tcp_copy_data(skb_reader_t *desc, void *p, size_t len)
{ {
struct iovec riov; if (len > desc->count)
int want, result; len = desc->count;
skb_copy_bits(desc->skb, desc->offset, p, len);
if (xprt->tcp_offset >= sizeof(xprt->tcp_recm)) desc->offset += len;
goto done; desc->count -= len;
return len;
want = sizeof(xprt->tcp_recm) - xprt->tcp_offset;
dprintk("RPC: reading header (%d bytes)\n", want);
do {
riov.iov_base = ((u8*) &xprt->tcp_recm) + xprt->tcp_offset;
riov.iov_len = want;
result = xprt_recvmsg(xprt, &riov, 1, want, 0);
if (result < 0)
return result;
xprt->tcp_offset += result;
want -= result;
} while (want);
/* Get the record length and mask out the last fragment bit */
xprt->tcp_reclen = ntohl(xprt->tcp_recm);
xprt->tcp_more = (xprt->tcp_reclen & 0x80000000) ? 0 : 1;
xprt->tcp_reclen &= 0x7fffffff;
dprintk("RPC: New record reclen %d morefrags %d\n",
xprt->tcp_reclen, xprt->tcp_more);
done:
return xprt->tcp_reclen + sizeof(xprt->tcp_recm) - xprt->tcp_offset;
} }
/* /*
* TCP read xid * TCP read fragment marker
*/ */
static inline int static inline void
tcp_read_xid(struct rpc_xprt *xprt, int avail) tcp_read_fraghdr(struct rpc_xprt *xprt, skb_reader_t *desc)
{ {
struct iovec riov; size_t len, used;
int want, result; char *p;
if (xprt->tcp_copied >= sizeof(xprt->tcp_xid) || !avail) p = ((char *) &xprt->tcp_recm) + xprt->tcp_offset;
goto done; len = sizeof(xprt->tcp_recm) - xprt->tcp_offset;
want = min_t(unsigned int, sizeof(xprt->tcp_xid) - xprt->tcp_copied, avail); used = tcp_copy_data(desc, p, len);
do { xprt->tcp_offset += used;
dprintk("RPC: reading xid (%d bytes)\n", want); if (used != len)
riov.iov_base = ((u8*) &xprt->tcp_xid) + xprt->tcp_copied; return;
riov.iov_len = want; xprt->tcp_reclen = ntohl(xprt->tcp_recm);
result = xprt_recvmsg(xprt, &riov, 1, want, 0); if (xprt->tcp_reclen & 0x80000000)
if (result < 0) xprt->tcp_flags |= XPRT_LAST_FRAG;
return result; else
xprt->tcp_copied += result; xprt->tcp_flags &= ~XPRT_LAST_FRAG;
xprt->tcp_offset += result; xprt->tcp_reclen &= 0x7fffffff;
want -= result; xprt->tcp_flags &= ~XPRT_COPY_RECM;
avail -= result; xprt->tcp_offset = 0;
} while (want); /* Sanity check of the record length */
done: if (xprt->tcp_reclen < 4) {
return avail; printk(KERN_ERR "RPC: Invalid TCP record fragment length\n");
xprt_disconnect(xprt);
}
dprintk("RPC: reading TCP record fragment of length %d\n",
xprt->tcp_reclen);
} }
/* static void
* TCP read and complete request tcp_check_recm(struct rpc_xprt *xprt)
*/
static inline int
tcp_read_request(struct rpc_xprt *xprt, struct rpc_rqst *req, int avail)
{ {
int want, result; if (xprt->tcp_offset == xprt->tcp_reclen) {
xprt->tcp_flags |= XPRT_COPY_RECM;
if (req->rq_rlen <= xprt->tcp_copied || !avail) xprt->tcp_offset = 0;
goto done; if (xprt->tcp_flags & XPRT_LAST_FRAG) {
want = min_t(unsigned int, req->rq_rlen - xprt->tcp_copied, avail); xprt->tcp_flags &= ~XPRT_COPY_DATA;
do { xprt->tcp_flags |= XPRT_COPY_XID;
dprintk("RPC: %4d TCP receiving %d bytes\n", xprt->tcp_copied = 0;
req->rq_task->tk_pid, want); }
}
result = xprt_recvmsg(xprt, req->rq_rvec, req->rq_rnr, want, xprt->tcp_copied);
if (result < 0)
return result;
xprt->tcp_copied += result;
xprt->tcp_offset += result;
avail -= result;
want -= result;
} while (want);
done:
if (req->rq_rlen > xprt->tcp_copied && xprt->tcp_more)
return avail;
dprintk("RPC: %4d received reply complete\n", req->rq_task->tk_pid);
xprt_complete_rqst(xprt, req, xprt->tcp_copied);
return avail;
} }
/* /*
* TCP discard extra bytes from a short read * TCP read xid
*/ */
static inline int static inline void
tcp_read_discard(struct rpc_xprt *xprt, int avail) tcp_read_xid(struct rpc_xprt *xprt, skb_reader_t *desc)
{ {
struct iovec riov; size_t len, used;
static u8 dummy[64]; char *p;
int want, result = 0;
len = sizeof(xprt->tcp_xid) - xprt->tcp_offset;
while (avail) { dprintk("RPC: reading XID (%Zu bytes)\n", len);
want = min_t(unsigned int, avail, sizeof(dummy)); p = ((char *) &xprt->tcp_xid) + xprt->tcp_offset;
riov.iov_base = dummy; used = tcp_copy_data(desc, p, len);
riov.iov_len = want; xprt->tcp_offset += used;
dprintk("RPC: TCP skipping %d bytes\n", want); if (used != len)
result = xprt_recvmsg(xprt, &riov, 1, want, 0); return;
if (result < 0) xprt->tcp_flags &= ~XPRT_COPY_XID;
return result; xprt->tcp_flags |= XPRT_COPY_DATA;
xprt->tcp_offset += result; xprt->tcp_copied = 4;
avail -= result; dprintk("RPC: reading reply for XID %08x\n", xprt->tcp_xid);
} tcp_check_recm(xprt);
return avail;
} }
/* /*
* TCP record receive routine * TCP read and complete request
* This is not the most efficient code since we call recvfrom thrice--
* first receiving the record marker, then the XID, then the data.
*
* The optimal solution would be a RPC support in the TCP layer, which
* would gather all data up to the next record marker and then pass us
* the list of all TCP segments ready to be copied.
*/ */
static int static inline void
tcp_input_record(struct rpc_xprt *xprt) tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
{ {
struct rpc_rqst *req = NULL; struct rpc_rqst *req;
struct rpc_task *task = NULL; struct iovec *iov;
int avail, result; char *p;
unsigned long skip;
dprintk("RPC: tcp_input_record\n"); size_t len, used;
int n;
if (xprt->shutdown)
return -EIO;
if (!xprt_connected(xprt))
return -ENOTCONN;
/* Read in a new fragment marker if necessary */
/* Can we ever really expect to get completely empty fragments? */
if ((result = tcp_read_fraghdr(xprt)) < 0)
return result;
avail = result;
/* Read in the xid if necessary */
if ((result = tcp_read_xid(xprt, avail)) < 0)
return result;
if (!(avail = result))
goto out_ok;
/* Find and lock the request corresponding to this xid */ /* Find and lock the request corresponding to this xid */
req = xprt_lookup_rqst(xprt, xprt->tcp_xid); req = xprt_lookup_rqst(xprt, xprt->tcp_xid);
if (req) { if (!req) {
task = req->rq_task; xprt->tcp_flags &= ~XPRT_COPY_DATA;
/* Read in the request data */ dprintk("RPC: XID %08x request not found!\n",
result = tcp_read_request(xprt, req, avail); xprt->tcp_xid);
rpc_unlock_task(task); return;
if (result < 0)
return result;
avail = result;
} }
skip = xprt->tcp_copied;
/* Skip over any trailing bytes on short reads */ iov = req->rq_rvec;
if ((result = tcp_read_discard(xprt, avail)) < 0) for (n = req->rq_rnr; n != 0; n--, iov++) {
return result; if (skip >= iov->iov_len) {
skip -= iov->iov_len;
out_ok: continue;
dprintk("RPC: tcp_input_record done (off %d reclen %d copied %d)\n", }
xprt->tcp_offset, xprt->tcp_reclen, xprt->tcp_copied); p = iov->iov_base;
result = xprt->tcp_reclen; len = iov->iov_len;
xprt->tcp_reclen = 0; if (skip) {
xprt->tcp_offset = 0; p += skip;
if (!xprt->tcp_more) len -= skip;
xprt->tcp_copied = 0; skip = 0;
return result; }
} if (xprt->tcp_offset + len > xprt->tcp_reclen)
len = xprt->tcp_reclen - xprt->tcp_offset;
/* used = tcp_copy_data(desc, p, len);
* TCP task queue stuff xprt->tcp_copied += used;
*/ xprt->tcp_offset += used;
LIST_HEAD(rpc_xprt_pending); /* List of xprts having pending tcp requests */ if (used != len)
break;
static inline if (xprt->tcp_copied == req->rq_rlen) {
void tcp_rpciod_queue(void) xprt->tcp_flags &= ~XPRT_COPY_DATA;
{ break;
rpciod_wake_up(); }
} if (xprt->tcp_offset == xprt->tcp_reclen) {
if (xprt->tcp_flags & XPRT_LAST_FRAG)
int xprt_tcp_pending(void) xprt->tcp_flags &= ~XPRT_COPY_DATA;
{ break;
int retval; }
spin_lock_bh(&rpc_queue_lock);
retval = !list_empty(&rpc_xprt_pending);
spin_unlock_bh(&rpc_queue_lock);
return retval;
}
static inline
void xprt_append_pending(struct rpc_xprt *xprt)
{
spin_lock_bh(&rpc_queue_lock);
if (list_empty(&xprt->rx_pending)) {
list_add(&xprt->rx_pending, rpc_xprt_pending.prev);
dprintk("RPC: xprt queue %p\n", xprt);
tcp_rpciod_queue();
} }
spin_unlock_bh(&rpc_queue_lock);
}
static if (!(xprt->tcp_flags & XPRT_COPY_DATA)) {
void xprt_remove_pending(struct rpc_xprt *xprt) dprintk("RPC: %4d received reply complete\n",
{ req->rq_task->tk_pid);
spin_lock_bh(&rpc_queue_lock); xprt_complete_rqst(xprt, req, xprt->tcp_copied);
if (!list_empty(&xprt->rx_pending)) {
list_del(&xprt->rx_pending);
INIT_LIST_HEAD(&xprt->rx_pending);
} }
spin_unlock_bh(&rpc_queue_lock); rpc_unlock_task(req->rq_task);
tcp_check_recm(xprt);
} }
static inline /*
struct rpc_xprt *xprt_remove_pending_next(void) * TCP discard extra bytes from a short read
*/
static inline void
tcp_read_discard(struct rpc_xprt *xprt, skb_reader_t *desc)
{ {
struct rpc_xprt *xprt = NULL; size_t len;
spin_lock_bh(&rpc_queue_lock); len = xprt->tcp_reclen - xprt->tcp_offset;
if (!list_empty(&rpc_xprt_pending)) { if (len > desc->count)
xprt = list_entry(rpc_xprt_pending.next, struct rpc_xprt, rx_pending); len = desc->count;
list_del(&xprt->rx_pending); desc->count -= len;
INIT_LIST_HEAD(&xprt->rx_pending); desc->offset += len;
} xprt->tcp_offset += len;
spin_unlock_bh(&rpc_queue_lock); tcp_check_recm(xprt);
return xprt;
} }
/* /*
* This is protected from tcp_data_ready and the stack as its run * TCP record receive routine
* inside of the RPC I/O daemon * We first have to grab the record marker, then the XID, then the data.
*/ */
void static int
__rpciod_tcp_dispatcher(void) tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
unsigned int offset, size_t len)
{ {
struct rpc_xprt *xprt; struct rpc_xprt *xprt = (struct rpc_xprt *)rd_desc->buf;
int safe_retry = 0, result; skb_reader_t desc = { skb, offset, len };
dprintk("rpciod_tcp_dispatcher: Queue Running\n");
/*
* Empty each pending socket
*/
while ((xprt = xprt_remove_pending_next()) != NULL) {
dprintk("rpciod_tcp_dispatcher: Processing %p\n", xprt);
do {
result = tcp_input_record(xprt);
} while (result >= 0);
if (safe_retry++ > 200) { dprintk("RPC: tcp_data_recv\n");
schedule(); do {
safe_retry = 0; /* Read in a new fragment marker if necessary */
/* Can we ever really expect to get completely empty fragments? */
if (xprt->tcp_flags & XPRT_COPY_RECM) {
tcp_read_fraghdr(xprt, &desc);
continue;
} }
} /* Read in the xid if necessary */
if (xprt->tcp_flags & XPRT_COPY_XID) {
tcp_read_xid(xprt, &desc);
continue;
}
/* Read in the request data */
if (xprt->tcp_flags & XPRT_COPY_DATA) {
tcp_read_request(xprt, &desc);
continue;
}
/* Skip over any trailing bytes on short reads */
tcp_read_discard(xprt, &desc);
} while (desc.count && xprt_connected(xprt));
dprintk("RPC: tcp_data_recv done\n");
return len - desc.count;
} }
/* static void tcp_data_ready(struct sock *sk, int bytes)
* data_ready callback for TCP. We can't just jump into the
* tcp recvmsg functions inside of the network receive bh or
* bad things occur. We queue it to pick up after networking
* is done.
*/
static void tcp_data_ready(struct sock *sk, int len)
{ {
struct rpc_xprt *xprt; struct rpc_xprt *xprt;
read_descriptor_t rd_desc;
dprintk("RPC: tcp_data_ready...\n"); dprintk("RPC: tcp_data_ready...\n");
if (!(xprt = xprt_from_sock(sk))) if (!(xprt = xprt_from_sock(sk))) {
{ printk("RPC: tcp_data_ready socket info not found!\n");
printk("Not a socket with xprt %p\n", sk); return;
goto out;
} }
if (xprt->shutdown) if (xprt->shutdown)
goto out; return;
xprt_append_pending(xprt);
dprintk("RPC: tcp_data_ready client %p\n", xprt); /* We use rd_desc to pass struct xprt to tcp_data_recv */
dprintk("RPC: state %x conn %d dead %d zapped %d\n", rd_desc.buf = (char *)xprt;
sk->state, xprt_connected(xprt), rd_desc.count = 65536;
sk->dead, sk->zapped); tcp_read_sock(sk, &rd_desc, tcp_data_recv);
out:
if (sk->sleep && waitqueue_active(sk->sleep))
wake_up_interruptible(sk->sleep);
} }
static void static void
tcp_state_change(struct sock *sk) tcp_state_change(struct sock *sk)
{ {
...@@ -1483,8 +1365,6 @@ xprt_setup(struct socket *sock, int proto, ...@@ -1483,8 +1365,6 @@ xprt_setup(struct socket *sock, int proto,
req->rq_next = NULL; req->rq_next = NULL;
xprt->free = xprt->slot; xprt->free = xprt->slot;
INIT_LIST_HEAD(&xprt->rx_pending);
dprintk("RPC: created transport %p\n", xprt); dprintk("RPC: created transport %p\n", xprt);
xprt_bind_socket(xprt, sock); xprt_bind_socket(xprt, sock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment