Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
3e9487c8
Commit
3e9487c8
authored
Aug 21, 2003
by
Trond Myklebust
Browse files
Options
Browse Files
Download
Plain Diff
Merge
http://nfsclient.bkbits.net/linux-2.5
into fys.uio.no:/home/linux/bitkeeper/nfsclient-2.5
parents
7c2b7264
957ca757
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
89 additions
and
89 deletions
+89
-89
fs/nfs/dir.c
fs/nfs/dir.c
+4
-2
include/linux/sunrpc/timer.h
include/linux/sunrpc/timer.h
+0
-16
include/linux/sunrpc/xprt.h
include/linux/sunrpc/xprt.h
+5
-1
net/sunrpc/clnt.c
net/sunrpc/clnt.c
+23
-12
net/sunrpc/timer.c
net/sunrpc/timer.c
+5
-7
net/sunrpc/xprt.c
net/sunrpc/xprt.c
+52
-51
No files found.
fs/nfs/dir.c
View file @
3e9487c8
...
...
@@ -557,7 +557,7 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
/* Force a full look up iff the parent directory has changed */
if
(
nfs_check_verifier
(
dir
,
dentry
))
{
if
(
nfs_lookup_verify_inode
(
inode
,
isopen
))
goto
out_
bad
;
goto
out_
zap_parent
;
goto
out_valid
;
}
...
...
@@ -566,7 +566,7 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
if
(
memcmp
(
NFS_FH
(
inode
),
&
fhandle
,
sizeof
(
struct
nfs_fh
))
!=
0
)
goto
out_bad
;
if
(
nfs_lookup_verify_inode
(
inode
,
isopen
))
goto
out_
bad
;
goto
out_
zap_parent
;
goto
out_valid_renew
;
}
...
...
@@ -587,6 +587,8 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
unlock_kernel
();
dput
(
parent
);
return
1
;
out_zap_parent:
nfs_zap_caches
(
dir
);
out_bad:
NFS_CACHEINV
(
dir
);
if
(
inode
&&
S_ISDIR
(
inode
->
i_mode
))
{
...
...
include/linux/sunrpc/timer.h
View file @
3e9487c8
...
...
@@ -15,7 +15,6 @@ struct rpc_rtt {
unsigned
long
timeo
;
/* default timeout value */
unsigned
long
srtt
[
5
];
/* smoothed round trip time << 3 */
unsigned
long
sdrtt
[
5
];
/* smoothed medium deviation of RTT */
atomic_t
ntimeouts
;
/* Global count of the number of timeouts */
};
...
...
@@ -23,19 +22,4 @@ extern void rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo);
extern
void
rpc_update_rtt
(
struct
rpc_rtt
*
rt
,
unsigned
timer
,
long
m
);
extern
unsigned
long
rpc_calc_rto
(
struct
rpc_rtt
*
rt
,
unsigned
timer
);
static
inline
void
rpc_inc_timeo
(
struct
rpc_rtt
*
rt
)
{
atomic_inc
(
&
rt
->
ntimeouts
);
}
static
inline
void
rpc_clear_timeo
(
struct
rpc_rtt
*
rt
)
{
atomic_set
(
&
rt
->
ntimeouts
,
0
);
}
static
inline
int
rpc_ntimeo
(
struct
rpc_rtt
*
rt
)
{
return
atomic_read
(
&
rt
->
ntimeouts
);
}
#endif
/* _LINUX_SUNRPC_TIMER_H */
include/linux/sunrpc/xprt.h
View file @
3e9487c8
...
...
@@ -98,6 +98,10 @@ struct rpc_rqst {
struct
list_head
rq_list
;
struct
xdr_buf
rq_private_buf
;
/* The receive buffer
* used in the softirq.
*/
/*
* For authentication (e.g. auth_des)
*/
...
...
@@ -111,7 +115,7 @@ struct rpc_rqst {
unsigned
long
rq_xtime
;
/* when transmitted */
int
rq_ntimeo
;
int
rq_n
resend
;
int
rq_n
trans
;
};
#define rq_svec rq_snd_buf.head
#define rq_slen rq_snd_buf.len
...
...
net/sunrpc/clnt.c
View file @
3e9487c8
...
...
@@ -659,7 +659,7 @@ call_transmit(struct rpc_task *task)
if
(
task
->
tk_status
<
0
)
return
;
task
->
tk_status
=
xprt_prepare_transmit
(
task
);
if
(
task
->
tk_status
<
0
)
if
(
task
->
tk_status
!=
0
)
return
;
/* Encode here so that rpcsec_gss can use correct sequence number. */
if
(
!
task
->
tk_rqstp
->
rq_bytes_sent
)
...
...
@@ -685,7 +685,7 @@ call_status(struct rpc_task *task)
struct
rpc_rqst
*
req
=
task
->
tk_rqstp
;
int
status
;
if
(
req
->
rq_received
!=
0
)
if
(
req
->
rq_received
>
0
&&
!
req
->
rq_bytes_sent
)
task
->
tk_status
=
req
->
rq_received
;
dprintk
(
"RPC: %4d call_status (status %d)
\n
"
,
...
...
@@ -744,14 +744,14 @@ call_timeout(struct rpc_task *task)
dprintk
(
"RPC: %4d call_timeout (major)
\n
"
,
task
->
tk_pid
);
if
(
clnt
->
cl_softrtry
)
{
if
(
clnt
->
cl_chatty
&&
!
task
->
tk_exit
)
if
(
clnt
->
cl_chatty
)
printk
(
KERN_NOTICE
"%s: server %s not responding, timed out
\n
"
,
clnt
->
cl_protname
,
clnt
->
cl_server
);
rpc_exit
(
task
,
-
EIO
);
return
;
}
if
(
clnt
->
cl_chatty
&&
!
(
task
->
tk_flags
&
RPC_CALL_MAJORSEEN
)
&&
rpc_ntimeo
(
&
clnt
->
cl_rtt
)
>
7
)
{
if
(
clnt
->
cl_chatty
&&
!
(
task
->
tk_flags
&
RPC_CALL_MAJORSEEN
))
{
task
->
tk_flags
|=
RPC_CALL_MAJORSEEN
;
printk
(
KERN_NOTICE
"%s: server %s not responding, still trying
\n
"
,
clnt
->
cl_protname
,
clnt
->
cl_server
);
...
...
@@ -787,19 +787,26 @@ call_decode(struct rpc_task *task)
if
(
task
->
tk_status
<
12
)
{
if
(
!
clnt
->
cl_softrtry
)
{
task
->
tk_action
=
call_
transmit
;
task
->
tk_action
=
call_
bind
;
clnt
->
cl_stats
->
rpcretrans
++
;
}
else
{
printk
(
KERN_WARNING
"%s: too small RPC reply size (%d bytes)
\n
"
,
clnt
->
cl_protname
,
task
->
tk_status
);
rpc_exit
(
task
,
-
EIO
);
goto
out_retry
;
}
printk
(
KERN_WARNING
"%s: too small RPC reply size (%d bytes)
\n
"
,
clnt
->
cl_protname
,
task
->
tk_status
);
rpc_exit
(
task
,
-
EIO
);
return
;
}
/* Check that the softirq receive buffer is valid */
WARN_ON
(
memcmp
(
&
req
->
rq_rcv_buf
,
&
req
->
rq_private_buf
,
sizeof
(
req
->
rq_rcv_buf
))
!=
0
);
/* Verify the RPC header */
if
(
!
(
p
=
call_verify
(
task
)))
return
;
if
(
!
(
p
=
call_verify
(
task
)))
{
if
(
task
->
tk_action
==
NULL
)
return
;
goto
out_retry
;
}
/*
* The following is an NFS-specific hack to cater for setuid
...
...
@@ -812,7 +819,7 @@ call_decode(struct rpc_task *task)
task
->
tk_flags
^=
RPC_CALL_REALUID
;
task
->
tk_action
=
call_bind
;
task
->
tk_suid_retry
--
;
return
;
goto
out_retry
;
}
}
...
...
@@ -822,6 +829,10 @@ call_decode(struct rpc_task *task)
task
->
tk_status
=
decode
(
req
,
p
,
task
->
tk_msg
.
rpc_resp
);
dprintk
(
"RPC: %4d call_decode result %d
\n
"
,
task
->
tk_pid
,
task
->
tk_status
);
return
;
out_retry:
req
->
rq_received
=
0
;
task
->
tk_status
=
0
;
}
/*
...
...
net/sunrpc/timer.c
View file @
3e9487c8
...
...
@@ -25,7 +25,7 @@
#define RPC_RTO_MAX (60*HZ)
#define RPC_RTO_INIT (HZ/5)
#define RPC_RTO_MIN (
2
)
#define RPC_RTO_MIN (
HZ/10
)
void
rpc_init_rtt
(
struct
rpc_rtt
*
rt
,
unsigned
long
timeo
)
...
...
@@ -41,8 +41,6 @@ rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo)
rt
->
srtt
[
i
]
=
init
;
rt
->
sdrtt
[
i
]
=
RPC_RTO_INIT
;
}
atomic_set
(
&
rt
->
ntimeouts
,
0
);
}
/*
...
...
@@ -52,7 +50,7 @@ rpc_init_rtt(struct rpc_rtt *rt, unsigned long timeo)
void
rpc_update_rtt
(
struct
rpc_rtt
*
rt
,
unsigned
timer
,
long
m
)
{
unsigned
long
*
srtt
,
*
sdrtt
;
long
*
srtt
,
*
sdrtt
;
if
(
timer
--
==
0
)
return
;
...
...
@@ -64,14 +62,14 @@ rpc_update_rtt(struct rpc_rtt *rt, unsigned timer, long m)
if
(
m
==
0
)
m
=
1L
;
srtt
=
&
rt
->
srtt
[
timer
];
srtt
=
(
long
*
)
&
rt
->
srtt
[
timer
];
m
-=
*
srtt
>>
3
;
*
srtt
+=
m
;
if
(
m
<
0
)
m
=
-
m
;
sdrtt
=
&
rt
->
sdrtt
[
timer
];
sdrtt
=
(
long
*
)
&
rt
->
sdrtt
[
timer
];
m
-=
*
sdrtt
>>
2
;
*
sdrtt
+=
m
;
...
...
@@ -101,7 +99,7 @@ rpc_calc_rto(struct rpc_rtt *rt, unsigned timer)
if
(
timer
--
==
0
)
return
rt
->
timeo
;
res
=
(
rt
->
srtt
[
timer
]
>>
3
)
+
rt
->
sdrtt
[
timer
];
res
=
(
(
rt
->
srtt
[
timer
]
+
7
)
>>
3
)
+
rt
->
sdrtt
[
timer
];
if
(
res
>
RPC_RTO_MAX
)
res
=
RPC_RTO_MAX
;
...
...
net/sunrpc/xprt.c
View file @
3e9487c8
...
...
@@ -138,15 +138,22 @@ xprt_from_sock(struct sock *sk)
static
int
__xprt_lock_write
(
struct
rpc_xprt
*
xprt
,
struct
rpc_task
*
task
)
{
struct
rpc_rqst
*
req
=
task
->
tk_rqstp
;
if
(
!
xprt
->
snd_task
)
{
if
(
xprt
->
nocong
||
__xprt_get_cong
(
xprt
,
task
))
if
(
xprt
->
nocong
||
__xprt_get_cong
(
xprt
,
task
))
{
xprt
->
snd_task
=
task
;
if
(
req
)
{
req
->
rq_bytes_sent
=
0
;
req
->
rq_ntrans
++
;
}
}
}
if
(
xprt
->
snd_task
!=
task
)
{
dprintk
(
"RPC: %4d TCP write queue full
\n
"
,
task
->
tk_pid
);
task
->
tk_timeout
=
0
;
task
->
tk_status
=
-
EAGAIN
;
if
(
task
->
tk_rqstp
&&
task
->
tk_rqstp
->
rq_nresend
)
if
(
req
&&
req
->
rq_ntrans
)
rpc_sleep_on
(
&
xprt
->
resend
,
task
,
NULL
,
NULL
);
else
rpc_sleep_on
(
&
xprt
->
sending
,
task
,
NULL
,
NULL
);
...
...
@@ -181,8 +188,14 @@ __xprt_lock_write_next(struct rpc_xprt *xprt)
if
(
!
task
)
return
;
}
if
(
xprt
->
nocong
||
__xprt_get_cong
(
xprt
,
task
))
if
(
xprt
->
nocong
||
__xprt_get_cong
(
xprt
,
task
))
{
struct
rpc_rqst
*
req
=
task
->
tk_rqstp
;
xprt
->
snd_task
=
task
;
if
(
req
)
{
req
->
rq_bytes_sent
=
0
;
req
->
rq_ntrans
++
;
}
}
}
/*
...
...
@@ -422,6 +435,9 @@ xprt_connect(struct rpc_task *task)
if
(
xprt_connected
(
xprt
))
goto
out_write
;
if
(
task
->
tk_rqstp
)
task
->
tk_rqstp
->
rq_bytes_sent
=
0
;
/*
* We're here because the xprt was marked disconnected.
* Start by resetting any existing state.
...
...
@@ -566,14 +582,13 @@ xprt_complete_rqst(struct rpc_xprt *xprt, struct rpc_rqst *req, int copied)
if
(
!
xprt
->
nocong
)
{
xprt_adjust_cwnd
(
xprt
,
copied
);
__xprt_put_cong
(
xprt
,
req
);
if
(
!
req
->
rq_nresend
)
{
if
(
req
->
rq_ntrans
==
1
)
{
unsigned
timer
=
task
->
tk_msg
.
rpc_proc
->
p_timer
;
if
(
timer
)
rpc_update_rtt
(
&
clnt
->
cl_rtt
,
timer
,
(
long
)
jiffies
-
req
->
rq_xtime
);
}
rpc_clear_timeo
(
&
clnt
->
cl_rtt
);
}
#ifdef RPC_PROFILE
...
...
@@ -714,11 +729,11 @@ udp_data_ready(struct sock *sk, int len)
dprintk
(
"RPC: %4d received reply
\n
"
,
task
->
tk_pid
);
if
((
copied
=
rovr
->
rq_
r
len
)
>
repsize
)
if
((
copied
=
rovr
->
rq_
private_buf
.
len
)
>
repsize
)
copied
=
repsize
;
/* Suck it into the iovec, verify checksum if not done by hw. */
if
(
csum_partial_copy_to_xdr
(
&
rovr
->
rq_
rcv
_buf
,
skb
))
if
(
csum_partial_copy_to_xdr
(
&
rovr
->
rq_
private
_buf
,
skb
))
goto
out_unlock
;
/* Something worked... */
...
...
@@ -841,7 +856,7 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
return
;
}
rcvbuf
=
&
req
->
rq_
rcv
_buf
;
rcvbuf
=
&
req
->
rq_
private
_buf
;
len
=
desc
->
count
;
if
(
len
>
xprt
->
tcp_reclen
-
xprt
->
tcp_offset
)
{
skb_reader_t
my_desc
;
...
...
@@ -859,7 +874,7 @@ tcp_read_request(struct rpc_xprt *xprt, skb_reader_t *desc)
xprt
->
tcp_copied
+=
len
;
xprt
->
tcp_offset
+=
len
;
if
(
xprt
->
tcp_copied
==
req
->
rq_
r
len
)
if
(
xprt
->
tcp_copied
==
req
->
rq_
private_buf
.
len
)
xprt
->
tcp_flags
&=
~
XPRT_COPY_DATA
;
else
if
(
xprt
->
tcp_offset
==
xprt
->
tcp_reclen
)
{
if
(
xprt
->
tcp_flags
&
XPRT_LAST_FRAG
)
...
...
@@ -1039,21 +1054,6 @@ xprt_write_space(struct sock *sk)
read_unlock
(
&
sk
->
sk_callback_lock
);
}
/*
* Exponential backoff for UDP retries
*/
static
inline
int
xprt_expbackoff
(
struct
rpc_task
*
task
,
struct
rpc_rqst
*
req
)
{
int
backoff
;
req
->
rq_ntimeo
++
;
backoff
=
min
(
rpc_ntimeo
(
&
task
->
tk_client
->
cl_rtt
),
XPRT_MAX_BACKOFF
);
if
(
req
->
rq_ntimeo
<
(
1
<<
backoff
))
return
1
;
return
0
;
}
/*
* RPC receive timeout handler.
*/
...
...
@@ -1067,15 +1067,8 @@ xprt_timer(struct rpc_task *task)
if
(
req
->
rq_received
)
goto
out
;
if
(
!
xprt
->
nocong
)
{
if
(
xprt_expbackoff
(
task
,
req
))
{
rpc_add_timer
(
task
,
xprt_timer
);
goto
out_unlock
;
}
rpc_inc_timeo
(
&
task
->
tk_client
->
cl_rtt
);
xprt_adjust_cwnd
(
req
->
rq_xprt
,
-
ETIMEDOUT
);
}
req
->
rq_nresend
++
;
xprt_adjust_cwnd
(
req
->
rq_xprt
,
-
ETIMEDOUT
);
__xprt_put_cong
(
xprt
,
req
);
dprintk
(
"RPC: %4d xprt_timer (%s request)
\n
"
,
task
->
tk_pid
,
req
?
"pending"
:
"backlogged"
);
...
...
@@ -1084,7 +1077,6 @@ xprt_timer(struct rpc_task *task)
out:
task
->
tk_timeout
=
0
;
rpc_wake_up_task
(
task
);
out_unlock:
spin_unlock
(
&
xprt
->
sock_lock
);
}
...
...
@@ -1104,10 +1096,11 @@ xprt_prepare_transmit(struct rpc_task *task)
if
(
xprt
->
shutdown
)
return
-
EIO
;
if
(
task
->
tk_rpcwait
)
rpc_remove_wait_queue
(
task
);
spin_lock_bh
(
&
xprt
->
sock_lock
);
if
(
req
->
rq_received
&&
!
req
->
rq_bytes_sent
)
{
err
=
req
->
rq_received
;
goto
out_unlock
;
}
if
(
!
__xprt_lock_write
(
xprt
,
task
))
{
err
=
-
EAGAIN
;
goto
out_unlock
;
...
...
@@ -1117,11 +1110,6 @@ xprt_prepare_transmit(struct rpc_task *task)
err
=
-
ENOTCONN
;
goto
out_unlock
;
}
if
(
list_empty
(
&
req
->
rq_list
))
{
list_add_tail
(
&
req
->
rq_list
,
&
xprt
->
recv
);
req
->
rq_received
=
0
;
}
out_unlock:
spin_unlock_bh
(
&
xprt
->
sock_lock
);
return
err
;
...
...
@@ -1146,6 +1134,20 @@ xprt_transmit(struct rpc_task *task)
*
marker
=
htonl
(
0x80000000
|
(
req
->
rq_slen
-
sizeof
(
*
marker
)));
}
smp_rmb
();
if
(
!
req
->
rq_received
)
{
if
(
list_empty
(
&
req
->
rq_list
))
{
spin_lock_bh
(
&
xprt
->
sock_lock
);
/* Update the softirq receive buffer */
memcpy
(
&
req
->
rq_private_buf
,
&
req
->
rq_rcv_buf
,
sizeof
(
req
->
rq_private_buf
));
/* Add request to the receive list */
list_add_tail
(
&
req
->
rq_list
,
&
xprt
->
recv
);
spin_unlock_bh
(
&
xprt
->
sock_lock
);
}
}
else
if
(
!
req
->
rq_bytes_sent
)
return
;
/* Continue transmitting the packet/record. We must be careful
* to cope with writespace callbacks arriving _after_ we have
* called xprt_sendmsg().
...
...
@@ -1160,8 +1162,12 @@ xprt_transmit(struct rpc_task *task)
if
(
xprt
->
stream
)
{
req
->
rq_bytes_sent
+=
status
;
if
(
req
->
rq_bytes_sent
>=
req
->
rq_slen
)
/* If we've sent the entire packet, immediately
* reset the count of bytes sent. */
if
(
req
->
rq_bytes_sent
>=
req
->
rq_slen
)
{
req
->
rq_bytes_sent
=
0
;
goto
out_receive
;
}
}
else
{
if
(
status
>=
req
->
rq_slen
)
goto
out_receive
;
...
...
@@ -1182,9 +1188,6 @@ xprt_transmit(struct rpc_task *task)
* hence there is no danger of the waking up task being put on
* schedq, and being picked up by a parallel run of rpciod().
*/
if
(
req
->
rq_received
)
goto
out_release
;
task
->
tk_status
=
status
;
switch
(
status
)
{
...
...
@@ -1214,22 +1217,21 @@ xprt_transmit(struct rpc_task *task)
if
(
xprt
->
stream
)
xprt_disconnect
(
xprt
);
}
out_release:
xprt_release_write
(
xprt
,
task
);
req
->
rq_bytes_sent
=
0
;
return
;
out_receive:
dprintk
(
"RPC: %4d xmit complete
\n
"
,
task
->
tk_pid
);
/* Set the task's receive timeout value */
spin_lock_bh
(
&
xprt
->
sock_lock
);
if
(
!
xprt
->
nocong
)
{
task
->
tk_timeout
=
rpc_calc_rto
(
&
clnt
->
cl_rtt
,
task
->
tk_msg
.
rpc_proc
->
p_timer
);
req
->
rq_ntimeo
=
0
;
task
->
tk_timeout
<<=
clnt
->
cl_timeout
.
to_retries
-
req
->
rq_timeout
.
to_retries
;
if
(
task
->
tk_timeout
>
req
->
rq_timeout
.
to_maxval
)
task
->
tk_timeout
=
req
->
rq_timeout
.
to_maxval
;
}
else
task
->
tk_timeout
=
req
->
rq_timeout
.
to_current
;
spin_lock_bh
(
&
xprt
->
sock_lock
);
/* Don't race with disconnect */
if
(
!
xprt_connected
(
xprt
))
task
->
tk_status
=
-
ENOTCONN
;
...
...
@@ -1237,7 +1239,6 @@ xprt_transmit(struct rpc_task *task)
rpc_sleep_on
(
&
xprt
->
pending
,
task
,
NULL
,
xprt_timer
);
__xprt_release_write
(
xprt
,
task
);
spin_unlock_bh
(
&
xprt
->
sock_lock
);
req
->
rq_bytes_sent
=
0
;
}
/*
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment