Commit 67435598 authored by Trond Myklebust's avatar Trond Myklebust

Merge bk://linux.bkbits.net/linux-2.5

into hostme.bitkeeper.com:/ua/repos/n/nfsclient/linux-2.5
parents d498eb57 ec186a65
......@@ -367,7 +367,8 @@ void sync_inodes_sb(struct super_block *sb, int wait)
};
get_page_state(&ps);
wbc.nr_to_write = ps.nr_dirty + ps.nr_dirty / 4;
wbc.nr_to_write = ps.nr_dirty + ps.nr_unstable +
(ps.nr_dirty + ps.nr_unstable) / 4;
spin_lock(&inode_lock);
sync_sb_inodes(sb, &wbc);
spin_unlock(&inode_lock);
......
......@@ -187,15 +187,7 @@ nlm_bind_host(struct nlm_host *host)
host->h_nextrebind - jiffies);
}
} else {
uid_t saved_fsuid = current->fsuid;
kernel_cap_t saved_cap = current->cap_effective;
/* Create RPC socket as root user so we get a priv port */
current->fsuid = 0;
cap_raise (current->cap_effective, CAP_NET_BIND_SERVICE);
xprt = xprt_create_proto(host->h_proto, &host->h_addr, NULL);
current->fsuid = saved_fsuid;
current->cap_effective = saved_cap;
if (xprt == NULL)
goto forgetit;
......@@ -209,6 +201,7 @@ nlm_bind_host(struct nlm_host *host)
}
clnt->cl_autobind = 1; /* turn on pmap queries */
xprt->nocong = 1; /* No congestion control for NLM */
xprt->resvport = 1; /* NLM requires a reserved port */
host->h_rpcclnt = clnt;
}
......@@ -276,7 +269,7 @@ nlm_shutdown_hosts(void)
dprintk("lockd: nuking all hosts...\n");
for (i = 0; i < NLM_HOST_NRHASH; i++) {
for (host = nlm_hosts[i]; host; host = host->h_next)
host->h_expires = 0;
host->h_expires = jiffies - 1;
}
/* Then, perform a garbage collection pass */
......@@ -323,6 +316,9 @@ nlm_gc_hosts(void)
while ((host = *q) != NULL) {
if (host->h_count || host->h_inuse
|| time_before(jiffies, host->h_expires)) {
dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
host->h_name, host->h_count,
host->h_inuse, host->h_expires);
q = &host->h_next;
continue;
}
......
......@@ -123,6 +123,7 @@ nsm_create(void)
clnt->cl_softrtry = 1;
clnt->cl_chatty = 1;
clnt->cl_oneshot = 1;
xprt->resvport = 1; /* NSM requires a reserved port */
out:
return clnt;
......
......@@ -83,7 +83,7 @@ nfs_opendir(struct inode *inode, struct file *filp)
lock_kernel();
/* Do cto revalidation */
if (server->flags & NFS_MOUNT_NOCTO)
if (!(server->flags & NFS_MOUNT_NOCTO))
res = __nfs_revalidate_inode(server, inode);
/* Call generic open code in order to cache credentials */
if (!res)
......
......@@ -83,7 +83,7 @@ nfs_file_open(struct inode *inode, struct file *filp)
if ((open = server->rpc_ops->file_open) != NULL)
res = open(inode, filp);
/* Do cto revalidation */
else if (server->flags & NFS_MOUNT_NOCTO)
else if (!(server->flags & NFS_MOUNT_NOCTO))
res = __nfs_revalidate_inode(server, inode);
/* Call generic open code in order to cache credentials */
if (!res)
......
......@@ -280,8 +280,6 @@ nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
err = nfs_wb_all(inode);
} else
nfs_commit_file(inode, NULL, 0, 0, 0);
/* Avoid races. Tell upstream we've done all we were told to do */
wbc->nr_to_write = 0;
out:
return err;
}
......@@ -490,7 +488,6 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst, struct file *file, u
int res;
res = nfs_scan_list(&nfsi->commit, dst, file, idx_start, npages);
nfsi->ncommit -= res;
sub_page_state(nr_unstable,res);
if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit))
printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");
return res;
......@@ -1009,6 +1006,7 @@ nfs_commit_done(struct rpc_task *task)
{
struct nfs_write_data *data = (struct nfs_write_data *)task->tk_calldata;
struct nfs_page *req;
int res = 0;
dprintk("NFS: %4d nfs_commit_done (status %d)\n",
task->tk_pid, task->tk_status);
......@@ -1043,7 +1041,9 @@ nfs_commit_done(struct rpc_task *task)
nfs_mark_request_dirty(req);
next:
nfs_unlock_request(req);
res++;
}
sub_page_state(nr_unstable,res);
}
#endif
......
......@@ -157,6 +157,11 @@ typedef size_t (*skb_read_actor_t)(skb_reader_t *desc, void *to, size_t len);
extern void xdr_partial_copy_from_skb(struct xdr_buf *, unsigned int,
skb_reader_t *, skb_read_actor_t);
struct socket;
struct sockaddr;
extern int xdr_sendpages(struct socket *, struct sockaddr *, int,
struct xdr_buf *, unsigned int, int);
/*
* Provide some simple tools for XDR buffer overflow-checking etc.
*/
......
......@@ -198,7 +198,7 @@ void xprt_sock_setbufsize(struct rpc_xprt *);
#define XPRT_CONNECT 0
#define xprt_connected(xp) (!(xp)->stream || test_bit(XPRT_CONNECT, &(xp)->sockstate))
#define xprt_connected(xp) (test_bit(XPRT_CONNECT, &(xp)->sockstate))
#define xprt_set_connected(xp) (set_bit(XPRT_CONNECT, &(xp)->sockstate))
#define xprt_test_and_set_connected(xp) (test_and_set_bit(XPRT_CONNECT, &(xp)->sockstate))
#define xprt_clear_connected(xp) (clear_bit(XPRT_CONNECT, &(xp)->sockstate))
......
......@@ -270,7 +270,7 @@ int wakeup_bdflush(long nr_pages)
struct page_state ps;
get_page_state(&ps);
nr_pages = ps.nr_dirty;
nr_pages = ps.nr_dirty + ps.nr_unstable;
}
return pdflush_operation(background_writeout, nr_pages);
}
......
......@@ -57,8 +57,7 @@ static void call_refresh(struct rpc_task *task);
static void call_refreshresult(struct rpc_task *task);
static void call_timeout(struct rpc_task *task);
static void call_connect(struct rpc_task *task);
static void child_connect(struct rpc_task *task);
static void child_connect_status(struct rpc_task *task);
static void call_connect_status(struct rpc_task *task);
static u32 * call_header(struct rpc_task *task);
static u32 * call_verify(struct rpc_task *task);
......@@ -602,40 +601,48 @@ static void
call_connect(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
struct rpc_task *child;
dprintk("RPC: %4d call_connect status %d\n",
task->tk_pid, task->tk_status);
if (xprt_connected(clnt->cl_xprt)) {
task->tk_action = call_transmit;
if (task->tk_status < 0 || !clnt->cl_xprt->stream)
return;
/* Run as a child to ensure it runs as an rpciod task. Rpciod
* guarantees we have the correct capabilities for socket bind
* to succeed. */
child = rpc_new_child(clnt, task);
if (child) {
child->tk_action = child_connect;
rpc_run_child(task, child, NULL);
}
task->tk_action = call_connect_status;
if (task->tk_status < 0)
return;
xprt_connect(task);
}
/*
* 4b. Sort out connect result
*/
static void
child_connect(struct rpc_task *task)
call_connect_status(struct rpc_task *task)
{
struct rpc_clnt *clnt = task->tk_client;
int status = task->tk_status;
task->tk_status = 0;
task->tk_action = child_connect_status;
xprt_connect(task);
}
if (status >= 0) {
clnt->cl_stats->netreconn++;
task->tk_action = call_transmit;
return;
}
static void
child_connect_status(struct rpc_task *task)
{
if (task->tk_status == -EAGAIN)
task->tk_action = child_connect;
else
task->tk_action = NULL;
/* Something failed: we may have to rebind */
if (clnt->cl_autobind)
clnt->cl_port = 0;
switch (status) {
case -ENOTCONN:
case -ETIMEDOUT:
case -EAGAIN:
task->tk_action = (clnt->cl_port == 0) ? call_bind : call_connect;
break;
default:
rpc_exit(task, -EIO);
}
}
/*
......@@ -696,6 +703,7 @@ call_status(struct rpc_task *task)
break;
case -ECONNREFUSED:
case -ENOTCONN:
req->rq_bytes_sent = 0;
if (clnt->cl_autobind)
clnt->cl_port = 0;
task->tk_action = call_bind;
......
......@@ -1110,9 +1110,10 @@ void rpc_show_tasks(void)
alltask_for_each(t, le, &all_tasks)
printk("%05d %04d %04x %06d %8p %6d %8p %08ld %8s %8p %8p\n",
t->tk_pid,
(t->tk_msg.rpc_proc->p_proc ? t->tk_msg.rpc_proc->p_proc : -1),
(t->tk_msg.rpc_proc ? t->tk_msg.rpc_proc->p_proc : -1),
t->tk_flags, t->tk_status,
t->tk_client, t->tk_client->cl_prog,
t->tk_client,
(t->tk_client ? t->tk_client->cl_prog : 0),
t->tk_rqstp, t->tk_timeout,
rpc_qname(t->tk_rpcwait),
t->tk_action, t->tk_exit);
......
......@@ -13,6 +13,8 @@
#include <linux/pagemap.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/net.h>
#include <net/sock.h>
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/msg_prot.h>
......@@ -314,8 +316,113 @@ xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base,
} while ((pglen -= len) != 0);
copy_tail:
len = xdr->tail[0].iov_len;
if (len)
copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len);
if (base < len)
copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base);
}
int
xdr_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen,
struct xdr_buf *xdr, unsigned int base, int msgflags)
{
struct page **ppage = xdr->pages;
unsigned int len, pglen = xdr->page_len;
int err, ret = 0;
ssize_t (*sendpage)(struct socket *, struct page *, int, size_t, int);
mm_segment_t oldfs;
len = xdr->head[0].iov_len;
if (base < len || (addr != NULL && base == 0)) {
struct iovec iov = {
.iov_base = xdr->head[0].iov_base + base,
.iov_len = len - base,
};
struct msghdr msg = {
.msg_name = addr,
.msg_namelen = addrlen,
.msg_flags = msgflags,
};
if (iov.iov_len != 0) {
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
}
if (xdr->len > len)
msg.msg_flags |= MSG_MORE;
oldfs = get_fs(); set_fs(get_ds());
err = sock_sendmsg(sock, &msg, iov.iov_len);
set_fs(oldfs);
if (ret == 0)
ret = err;
else if (err > 0)
ret += err;
if (err != iov.iov_len)
goto out;
base = 0;
} else
base -= len;
if (pglen == 0)
goto copy_tail;
if (base >= pglen) {
base -= pglen;
goto copy_tail;
}
if (base || xdr->page_base) {
pglen -= base;
base += xdr->page_base;
ppage += base >> PAGE_CACHE_SHIFT;
base &= ~PAGE_CACHE_MASK;
}
sendpage = sock->ops->sendpage ? : sock_no_sendpage;
do {
int flags = msgflags;
len = PAGE_CACHE_SIZE;
if (base)
len -= base;
if (pglen < len)
len = pglen;
if (pglen != len || xdr->tail[0].iov_len != 0)
flags |= MSG_MORE;
/* Hmm... We might be dealing with highmem pages */
if (PageHighMem(*ppage))
sendpage = sock_no_sendpage;
err = sendpage(sock, *ppage, base, len, flags);
if (ret == 0)
ret = err;
else if (err > 0)
ret += err;
if (err != len)
goto out;
base = 0;
ppage++;
} while ((pglen -= len) != 0);
copy_tail:
len = xdr->tail[0].iov_len;
if (base < len) {
struct iovec iov = {
.iov_base = xdr->tail[0].iov_base + base,
.iov_len = len - base,
};
struct msghdr msg = {
.msg_iov = &iov,
.msg_iovlen = 1,
.msg_flags = msgflags,
};
oldfs = get_fs(); set_fs(get_ds());
err = sock_sendmsg(sock, &msg, iov.iov_len);
set_fs(oldfs);
if (ret == 0)
ret = err;
else if (err > 0)
ret += err;
}
out:
return ret;
}
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment