Commit 57fd0b77 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'afs-20170316' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs

Pull AFS fixes from David Howells:
 "Fixes to the AFS filesystem in the kernel.

  They fix a variety of bugs. These include some issues fixed for
  consistency with other AFS implementations:

   - handle AFS mode bits better

   - use the client mtime rather than the server mtime in the protocol

   - handle the server returning more or less data than was requested in
     a FetchData call

   - distinguish mountpoints from symlinks based on the mode bits rather
     than preemptively reading every symlink to find out what it
     actually represents

  One other notable change for the user is that files are now flushed on
  close analogously with other network filesystems"

* tag 'afs-20170316' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs: (28 commits)
  afs: Don't wait for page writeback with the page lock held
  afs: ->writepage() shouldn't call clear_page_dirty_for_io()
  afs: Fix abort on signal while waiting for call completion
  afs: Fix an off-by-one error in afs_send_pages()
  afs: Fix afs_kill_pages()
  afs: Fix page leak in afs_write_begin()
  afs: Don't set PG_error on local EINTR or ENOMEM when filling a page
  afs: Populate and use client modification time
  afs: Better abort and net error handling
  afs: Invalid op ID should abort with RXGEN_OPCODE
  afs: Fix the maths in afs_fs_store_data()
  afs: Use a bvec rather than a kvec in afs_send_pages()
  afs: Make struct afs_read::remain 64-bit
  afs: Fix AFS read bug
  afs: Prevent callback expiry timer overflow
  afs: Migrate vlocation fields to 64-bit
  afs: security: Replace rcu_assign_pointer() with RCU_INIT_POINTER()
  afs: inode: Replace rcu_assign_pointer() with RCU_INIT_POINTER()
  afs: Distinguish mountpoints from symlinks by file mode alone
  afs: Flush outstanding writes when an fd is closed
  ...
parents c79d5ff0 c5051c7b
......@@ -362,7 +362,7 @@ static void afs_callback_updater(struct work_struct *work)
{
struct afs_server *server;
struct afs_vnode *vnode, *xvnode;
time_t now;
time64_t now;
long timeout;
int ret;
......@@ -370,7 +370,7 @@ static void afs_callback_updater(struct work_struct *work)
_enter("");
now = get_seconds();
now = ktime_get_real_seconds();
/* find the first vnode to update */
spin_lock(&server->cb_lock);
......@@ -424,7 +424,8 @@ static void afs_callback_updater(struct work_struct *work)
/* and then reschedule */
_debug("reschedule");
vnode->update_at = get_seconds() + afs_vnode_update_timeout;
vnode->update_at = ktime_get_real_seconds() +
afs_vnode_update_timeout;
spin_lock(&server->cb_lock);
......
......@@ -187,7 +187,6 @@ static int afs_deliver_cb_callback(struct afs_call *call)
struct afs_callback *cb;
struct afs_server *server;
__be32 *bp;
u32 tmp;
int ret, loop;
_enter("{%u}", call->unmarshall);
......@@ -249,9 +248,9 @@ static int afs_deliver_cb_callback(struct afs_call *call)
if (ret < 0)
return ret;
tmp = ntohl(call->tmp);
_debug("CB count: %u", tmp);
if (tmp != call->count && tmp != 0)
call->count2 = ntohl(call->tmp);
_debug("CB count: %u", call->count2);
if (call->count2 != call->count && call->count2 != 0)
return -EBADMSG;
call->offset = 0;
call->unmarshall++;
......@@ -259,14 +258,14 @@ static int afs_deliver_cb_callback(struct afs_call *call)
case 4:
_debug("extract CB array");
ret = afs_extract_data(call, call->buffer,
call->count * 3 * 4, false);
call->count2 * 3 * 4, false);
if (ret < 0)
return ret;
_debug("unmarshall CB array");
cb = call->request;
bp = call->buffer;
for (loop = call->count; loop > 0; loop--, cb++) {
for (loop = call->count2; loop > 0; loop--, cb++) {
cb->version = ntohl(*bp++);
cb->expiry = ntohl(*bp++);
cb->type = ntohl(*bp++);
......
......@@ -30,6 +30,7 @@ static int afs_readpages(struct file *filp, struct address_space *mapping,
const struct file_operations afs_file_operations = {
.open = afs_open,
.flush = afs_flush,
.release = afs_release,
.llseek = generic_file_llseek,
.read_iter = generic_file_read_iter,
......@@ -184,10 +185,13 @@ int afs_page_filler(void *data, struct page *page)
if (!req)
goto enomem;
/* We request a full page. If the page is a partial one at the
* end of the file, the server will return a short read and the
* unmarshalling code will clear the unfilled space.
*/
atomic_set(&req->usage, 1);
req->pos = (loff_t)page->index << PAGE_SHIFT;
req->len = min_t(size_t, i_size_read(inode) - req->pos,
PAGE_SIZE);
req->len = PAGE_SIZE;
req->nr_pages = 1;
req->pages[0] = page;
get_page(page);
......@@ -208,7 +212,13 @@ int afs_page_filler(void *data, struct page *page)
fscache_uncache_page(vnode->cache, page);
#endif
BUG_ON(PageFsCache(page));
if (ret == -EINTR ||
ret == -ENOMEM ||
ret == -ERESTARTSYS ||
ret == -EAGAIN)
goto error;
goto io_error;
}
SetPageUptodate(page);
......@@ -227,10 +237,12 @@ int afs_page_filler(void *data, struct page *page)
_leave(" = 0");
return 0;
io_error:
SetPageError(page);
goto error;
enomem:
ret = -ENOMEM;
error:
SetPageError(page);
unlock_page(page);
_leave(" = %d", ret);
return ret;
......
......@@ -16,6 +16,12 @@
#include "internal.h"
#include "afs_fs.h"
/*
* We need somewhere to discard into in case the server helpfully returns more
* than we asked for in FS.FetchData{,64}.
*/
static u8 afs_discard_buffer[64];
/*
* decode an AFSFid block
*/
......@@ -105,7 +111,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
vnode->vfs_inode.i_mode = mode;
}
vnode->vfs_inode.i_ctime.tv_sec = status->mtime_server;
vnode->vfs_inode.i_ctime.tv_sec = status->mtime_client;
vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime;
vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime;
vnode->vfs_inode.i_version = data_version;
......@@ -139,7 +145,7 @@ static void xdr_decode_AFSCallBack(const __be32 **_bp, struct afs_vnode *vnode)
vnode->cb_version = ntohl(*bp++);
vnode->cb_expiry = ntohl(*bp++);
vnode->cb_type = ntohl(*bp++);
vnode->cb_expires = vnode->cb_expiry + get_seconds();
vnode->cb_expires = vnode->cb_expiry + ktime_get_real_seconds();
*_bp = bp;
}
......@@ -315,7 +321,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
void *buffer;
int ret;
_enter("{%u,%zu/%u;%u/%llu}",
_enter("{%u,%zu/%u;%llu/%llu}",
call->unmarshall, call->offset, call->count,
req->remain, req->actual_len);
......@@ -353,12 +359,6 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
req->actual_len |= ntohl(call->tmp);
_debug("DATA length: %llu", req->actual_len);
/* Check that the server didn't want to send us extra. We
* might want to just discard instead, but that requires
* cooperation from AF_RXRPC.
*/
if (req->actual_len > req->len)
return -EBADMSG;
req->remain = req->actual_len;
call->offset = req->pos & (PAGE_SIZE - 1);
......@@ -368,6 +368,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
call->unmarshall++;
begin_page:
ASSERTCMP(req->index, <, req->nr_pages);
if (req->remain > PAGE_SIZE - call->offset)
size = PAGE_SIZE - call->offset;
else
......@@ -378,7 +379,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
/* extract the returned data */
case 3:
_debug("extract data %u/%llu %zu/%u",
_debug("extract data %llu/%llu %zu/%u",
req->remain, req->actual_len, call->offset, call->count);
buffer = kmap(req->pages[req->index]);
......@@ -389,19 +390,40 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
if (call->offset == PAGE_SIZE) {
if (req->page_done)
req->page_done(call, req);
if (req->remain > 0) {
req->index++;
if (req->remain > 0) {
call->offset = 0;
if (req->index >= req->nr_pages) {
call->unmarshall = 4;
goto begin_discard;
}
goto begin_page;
}
}
goto no_more_data;
/* Discard any excess data the server gave us */
begin_discard:
case 4:
size = min_t(loff_t, sizeof(afs_discard_buffer), req->remain);
call->count = size;
_debug("extract discard %llu/%llu %zu/%u",
req->remain, req->actual_len, call->offset, call->count);
call->offset = 0;
ret = afs_extract_data(call, afs_discard_buffer, call->count, true);
req->remain -= call->offset;
if (ret < 0)
return ret;
if (req->remain > 0)
goto begin_discard;
no_more_data:
call->offset = 0;
call->unmarshall++;
call->unmarshall = 5;
/* extract the metadata */
case 4:
case 5:
ret = afs_extract_data(call, call->buffer,
(21 + 3 + 6) * 4, false);
if (ret < 0)
......@@ -416,16 +438,17 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
call->offset = 0;
call->unmarshall++;
case 5:
case 6:
break;
}
if (call->count < PAGE_SIZE) {
buffer = kmap(req->pages[req->index]);
memset(buffer + call->count, 0, PAGE_SIZE - call->count);
kunmap(req->pages[req->index]);
for (; req->index < req->nr_pages; req->index++) {
if (call->count < PAGE_SIZE)
zero_user_segment(req->pages[req->index],
call->count, PAGE_SIZE);
if (req->page_done)
req->page_done(call, req);
call->count = 0;
}
_leave(" = 0 [done]");
......@@ -711,8 +734,8 @@ int afs_fs_create(struct afs_server *server,
memset(bp, 0, padsz);
bp = (void *) bp + padsz;
}
*bp++ = htonl(AFS_SET_MODE);
*bp++ = 0; /* mtime */
*bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
*bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = htonl(mode & S_IALLUGO); /* unix mode */
......@@ -980,8 +1003,8 @@ int afs_fs_symlink(struct afs_server *server,
memset(bp, 0, c_padsz);
bp = (void *) bp + c_padsz;
}
*bp++ = htonl(AFS_SET_MODE);
*bp++ = 0; /* mtime */
*bp++ = htonl(AFS_SET_MODE | AFS_SET_MTIME);
*bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = htonl(S_IRWXUGO); /* unix mode */
......@@ -1180,8 +1203,8 @@ static int afs_fs_store_data64(struct afs_server *server,
*bp++ = htonl(vnode->fid.vnode);
*bp++ = htonl(vnode->fid.unique);
*bp++ = 0; /* mask */
*bp++ = 0; /* mtime */
*bp++ = htonl(AFS_SET_MTIME); /* mask */
*bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = 0; /* unix mode */
......@@ -1213,7 +1236,7 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
_enter(",%x,{%x:%u},,",
key_serial(wb->key), vnode->fid.vid, vnode->fid.vnode);
size = to - offset;
size = (loff_t)to - (loff_t)offset;
if (first != last)
size += (loff_t)(last - first) << PAGE_SHIFT;
pos = (loff_t)first << PAGE_SHIFT;
......@@ -1257,8 +1280,8 @@ int afs_fs_store_data(struct afs_server *server, struct afs_writeback *wb,
*bp++ = htonl(vnode->fid.vnode);
*bp++ = htonl(vnode->fid.unique);
*bp++ = 0; /* mask */
*bp++ = 0; /* mtime */
*bp++ = htonl(AFS_SET_MTIME); /* mask */
*bp++ = htonl(vnode->vfs_inode.i_mtime.tv_sec); /* mtime */
*bp++ = 0; /* owner */
*bp++ = 0; /* group */
*bp++ = 0; /* unix mode */
......
......@@ -54,8 +54,21 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
inode->i_fop = &afs_dir_file_operations;
break;
case AFS_FTYPE_SYMLINK:
/* Symlinks with a mode of 0644 are actually mountpoints. */
if ((vnode->status.mode & 0777) == 0644) {
inode->i_flags |= S_AUTOMOUNT;
spin_lock(&vnode->lock);
set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
spin_unlock(&vnode->lock);
inode->i_mode = S_IFDIR | 0555;
inode->i_op = &afs_mntpt_inode_operations;
inode->i_fop = &afs_mntpt_file_operations;
} else {
inode->i_mode = S_IFLNK | vnode->status.mode;
inode->i_op = &page_symlink_inode_operations;
}
inode_nohighmem(inode);
break;
default:
......@@ -70,27 +83,15 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
set_nlink(inode, vnode->status.nlink);
inode->i_uid = vnode->status.owner;
inode->i_gid = GLOBAL_ROOT_GID;
inode->i_gid = vnode->status.group;
inode->i_size = vnode->status.size;
inode->i_ctime.tv_sec = vnode->status.mtime_server;
inode->i_ctime.tv_sec = vnode->status.mtime_client;
inode->i_ctime.tv_nsec = 0;
inode->i_atime = inode->i_mtime = inode->i_ctime;
inode->i_blocks = 0;
inode->i_generation = vnode->fid.unique;
inode->i_version = vnode->status.data_version;
inode->i_mapping->a_ops = &afs_fs_aops;
/* check to see whether a symbolic link is really a mountpoint */
if (vnode->status.type == AFS_FTYPE_SYMLINK) {
afs_mntpt_check_symlink(vnode, key);
if (test_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags)) {
inode->i_mode = S_IFDIR | vnode->status.mode;
inode->i_op = &afs_mntpt_inode_operations;
inode->i_fop = &afs_mntpt_file_operations;
}
}
return 0;
}
......@@ -245,12 +246,13 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
vnode->cb_version = 0;
vnode->cb_expiry = 0;
vnode->cb_type = 0;
vnode->cb_expires = get_seconds();
vnode->cb_expires = ktime_get_real_seconds();
} else {
vnode->cb_version = cb->version;
vnode->cb_expiry = cb->expiry;
vnode->cb_type = cb->type;
vnode->cb_expires = vnode->cb_expiry + get_seconds();
vnode->cb_expires = vnode->cb_expiry +
ktime_get_real_seconds();
}
}
......@@ -323,7 +325,7 @@ int afs_validate(struct afs_vnode *vnode, struct key *key)
!test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags) &&
!test_bit(AFS_VNODE_MODIFIED, &vnode->flags) &&
!test_bit(AFS_VNODE_ZAP_DATA, &vnode->flags)) {
if (vnode->cb_expires < get_seconds() + 10) {
if (vnode->cb_expires < ktime_get_real_seconds() + 10) {
_debug("callback expired");
set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
} else {
......@@ -444,7 +446,7 @@ void afs_evict_inode(struct inode *inode)
mutex_lock(&vnode->permits_lock);
permits = vnode->permits;
rcu_assign_pointer(vnode->permits, NULL);
RCU_INIT_POINTER(vnode->permits, NULL);
mutex_unlock(&vnode->permits_lock);
if (permits)
call_rcu(&permits->rcu, afs_zap_permits);
......
......@@ -11,6 +11,7 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/rxrpc.h>
......@@ -90,7 +91,10 @@ struct afs_call {
unsigned request_size; /* size of request data */
unsigned reply_max; /* maximum size of reply */
unsigned first_offset; /* offset into mapping[first] */
union {
unsigned last_to; /* amount of mapping[last] */
unsigned count2; /* count used in unmarshalling */
};
unsigned char unmarshall; /* unmarshalling phase */
bool incoming; /* T if incoming call */
bool send_pages; /* T if data from mapping should be sent */
......@@ -127,12 +131,11 @@ struct afs_call_type {
*/
struct afs_read {
loff_t pos; /* Where to start reading */
loff_t len; /* How much to read */
loff_t len; /* How much we're asking for */
loff_t actual_len; /* How much we're actually getting */
loff_t remain; /* Amount remaining */
atomic_t usage;
unsigned int remain; /* Amount remaining */
unsigned int index; /* Which page we're reading into */
unsigned int pg_offset; /* Offset in page we're at */
unsigned int nr_pages;
void (*page_done)(struct afs_call *, struct afs_read *);
struct page *pages[];
......@@ -247,7 +250,7 @@ struct afs_cache_vhash {
*/
struct afs_vlocation {
atomic_t usage;
time_t time_of_death; /* time at which put reduced usage to 0 */
time64_t time_of_death; /* time at which put reduced usage to 0 */
struct list_head link; /* link in cell volume location list */
struct list_head grave; /* link in master graveyard list */
struct list_head update; /* link in master update list */
......@@ -258,7 +261,7 @@ struct afs_vlocation {
struct afs_cache_vlocation vldb; /* volume information DB record */
struct afs_volume *vols[3]; /* volume access record pointer (index by type) */
wait_queue_head_t waitq; /* status change waitqueue */
time_t update_at; /* time at which record should be updated */
time64_t update_at; /* time at which record should be updated */
spinlock_t lock; /* access lock */
afs_vlocation_state_t state; /* volume location state */
unsigned short upd_rej_cnt; /* ENOMEDIUM count during update */
......@@ -271,7 +274,7 @@ struct afs_vlocation {
*/
struct afs_server {
atomic_t usage;
time_t time_of_death; /* time at which put reduced usage to 0 */
time64_t time_of_death; /* time at which put reduced usage to 0 */
struct in_addr addr; /* server address */
struct afs_cell *cell; /* cell in which server resides */
struct list_head link; /* link in cell's server list */
......@@ -374,8 +377,8 @@ struct afs_vnode {
struct rb_node server_rb; /* link in server->fs_vnodes */
struct rb_node cb_promise; /* link in server->cb_promises */
struct work_struct cb_broken_work; /* work to be done on callback break */
time_t cb_expires; /* time at which callback expires */
time_t cb_expires_at; /* time used to order cb_promise */
time64_t cb_expires; /* time at which callback expires */
time64_t cb_expires_at; /* time used to order cb_promise */
unsigned cb_version; /* callback version */
unsigned cb_expiry; /* callback expiry time */
afs_callback_type_t cb_type; /* type of callback */
......@@ -557,7 +560,6 @@ extern const struct inode_operations afs_autocell_inode_operations;
extern const struct file_operations afs_mntpt_file_operations;
extern struct vfsmount *afs_d_automount(struct path *);
extern int afs_mntpt_check_symlink(struct afs_vnode *, struct key *);
extern void afs_mntpt_kill_timer(void);
/*
......@@ -718,6 +720,7 @@ extern int afs_writepages(struct address_space *, struct writeback_control *);
extern void afs_pages_written_back(struct afs_vnode *, struct afs_call *);
extern ssize_t afs_file_write(struct kiocb *, struct iov_iter *);
extern int afs_writeback_all(struct afs_vnode *);
extern int afs_flush(struct file *, fl_owner_t);
extern int afs_fsync(struct file *, loff_t, loff_t, int);
......
......@@ -84,6 +84,8 @@ int afs_abort_to_error(u32 abort_code)
case RXKADDATALEN: return -EKEYREJECTED;
case RXKADILLEGALLEVEL: return -EKEYREJECTED;
case RXGEN_OPCODE: return -ENOTSUPP;
default: return -EREMOTEIO;
}
}
......@@ -46,59 +46,6 @@ static DECLARE_DELAYED_WORK(afs_mntpt_expiry_timer, afs_mntpt_expiry_timed_out);
static unsigned long afs_mntpt_expiry_timeout = 10 * 60;
/*
* check a symbolic link to see whether it actually encodes a mountpoint
* - sets the AFS_VNODE_MOUNTPOINT flag on the vnode appropriately
*/
int afs_mntpt_check_symlink(struct afs_vnode *vnode, struct key *key)
{
struct page *page;
size_t size;
char *buf;
int ret;
_enter("{%x:%u,%u}",
vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
/* read the contents of the symlink into the pagecache */
page = read_cache_page(AFS_VNODE_TO_I(vnode)->i_mapping, 0,
afs_page_filler, key);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto out;
}
ret = -EIO;
if (PageError(page))
goto out_free;
buf = kmap(page);
/* examine the symlink's contents */
size = vnode->status.size;
_debug("symlink to %*.*s", (int) size, (int) size, buf);
if (size > 2 &&
(buf[0] == '%' || buf[0] == '#') &&
buf[size - 1] == '.'
) {
_debug("symlink is a mountpoint");
spin_lock(&vnode->lock);
set_bit(AFS_VNODE_MOUNTPOINT, &vnode->flags);
vnode->vfs_inode.i_flags |= S_AUTOMOUNT;
spin_unlock(&vnode->lock);
}
ret = 0;
kunmap(page);
out_free:
put_page(page);
out:
_leave(" = %d", ret);
return ret;
}
/*
* no valid lookup procedure on this sort of dir
*/
......
......@@ -259,67 +259,74 @@ void afs_flat_call_destructor(struct afs_call *call)
call->buffer = NULL;
}
#define AFS_BVEC_MAX 8
/*
* Load the given bvec with the next few pages.
*/
static void afs_load_bvec(struct afs_call *call, struct msghdr *msg,
struct bio_vec *bv, pgoff_t first, pgoff_t last,
unsigned offset)
{
struct page *pages[AFS_BVEC_MAX];
unsigned int nr, n, i, to, bytes = 0;
nr = min_t(pgoff_t, last - first + 1, AFS_BVEC_MAX);
n = find_get_pages_contig(call->mapping, first, nr, pages);
ASSERTCMP(n, ==, nr);
msg->msg_flags |= MSG_MORE;
for (i = 0; i < nr; i++) {
to = PAGE_SIZE;
if (first + i >= last) {
to = call->last_to;
msg->msg_flags &= ~MSG_MORE;
}
bv[i].bv_page = pages[i];
bv[i].bv_len = to - offset;
bv[i].bv_offset = offset;
bytes += to - offset;
offset = 0;
}
iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC, bv, nr, bytes);
}
/*
* attach the data from a bunch of pages on an inode to a call
*/
static int afs_send_pages(struct afs_call *call, struct msghdr *msg)
{
struct page *pages[8];
unsigned count, n, loop, offset, to;
struct bio_vec bv[AFS_BVEC_MAX];
unsigned int bytes, nr, loop, offset;
pgoff_t first = call->first, last = call->last;
int ret;
_enter("");
offset = call->first_offset;
call->first_offset = 0;
do {
_debug("attach %lx-%lx", first, last);
count = last - first + 1;
if (count > ARRAY_SIZE(pages))
count = ARRAY_SIZE(pages);
n = find_get_pages_contig(call->mapping, first, count, pages);
ASSERTCMP(n, ==, count);
loop = 0;
do {
struct bio_vec bvec = {.bv_page = pages[loop],
.bv_offset = offset};
msg->msg_flags = 0;
to = PAGE_SIZE;
if (first + loop >= last)
to = call->last_to;
else
msg->msg_flags = MSG_MORE;
bvec.bv_len = to - offset;
afs_load_bvec(call, msg, bv, first, last, offset);
offset = 0;
bytes = msg->msg_iter.count;
nr = msg->msg_iter.nr_segs;
_debug("- range %u-%u%s",
offset, to, msg->msg_flags ? " [more]" : "");
iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC,
&bvec, 1, to - offset);
/* have to change the state *before* sending the last
/* Have to change the state *before* sending the last
* packet as RxRPC might give us the reply before it
* returns from sending the request */
if (first + loop >= last)
* returns from sending the request.
*/
if (first + nr - 1 >= last)
call->state = AFS_CALL_AWAIT_REPLY;
ret = rxrpc_kernel_send_data(afs_socket, call->rxcall,
msg, to - offset);
msg, bytes);
for (loop = 0; loop < nr; loop++)
put_page(bv[loop].bv_page);
if (ret < 0)
break;
} while (++loop < count);
first += count;
for (loop = 0; loop < count; loop++)
put_page(pages[loop]);
if (ret < 0)
break;
first += nr;
} while (first <= last);
_leave(" = %d", ret);
return ret;
}
......@@ -333,6 +340,8 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
struct rxrpc_call *rxcall;
struct msghdr msg;
struct kvec iov[1];
size_t offset;
u32 abort_code;
int ret;
_enter("%x,{%d},", addr->s_addr, ntohs(call->port));
......@@ -381,9 +390,11 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
msg.msg_controllen = 0;
msg.msg_flags = (call->send_pages ? MSG_MORE : 0);
/* have to change the state *before* sending the last packet as RxRPC
* might give us the reply before it returns from sending the
* request */
/* We have to change the state *before* sending the last packet as
* rxrpc might give us the reply before it returns from sending the
* request. Further, if the send fails, we may already have been given
* a notification and may have collected it.
*/
if (!call->send_pages)
call->state = AFS_CALL_AWAIT_REPLY;
ret = rxrpc_kernel_send_data(afs_socket, rxcall,
......@@ -405,7 +416,17 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp,
return afs_wait_for_call_to_complete(call);
error_do_abort:
rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT, -ret, "KSD");
call->state = AFS_CALL_COMPLETE;
if (ret != -ECONNABORTED) {
rxrpc_kernel_abort_call(afs_socket, rxcall, RX_USER_ABORT,
-ret, "KSD");
} else {
abort_code = 0;
offset = 0;
rxrpc_kernel_recv_data(afs_socket, rxcall, NULL, 0, &offset,
false, &abort_code);
ret = call->type->abort_to_error(abort_code);
}
error_kill_call:
afs_put_call(call);
_leave(" = %d", ret);
......@@ -452,16 +473,18 @@ static void afs_deliver_to_call(struct afs_call *call)
case -EINPROGRESS:
case -EAGAIN:
goto out;
case -ECONNABORTED:
goto call_complete;
case -ENOTCONN:
abort_code = RX_CALL_DEAD;
rxrpc_kernel_abort_call(afs_socket, call->rxcall,
abort_code, -ret, "KNC");
goto do_abort;
goto save_error;
case -ENOTSUPP:
abort_code = RX_INVALID_OPERATION;
abort_code = RXGEN_OPCODE;
rxrpc_kernel_abort_call(afs_socket, call->rxcall,
abort_code, -ret, "KIV");
goto do_abort;
goto save_error;
case -ENODATA:
case -EBADMSG:
case -EMSGSIZE:
......@@ -471,7 +494,7 @@ static void afs_deliver_to_call(struct afs_call *call)
abort_code = RXGEN_SS_UNMARSHAL;
rxrpc_kernel_abort_call(afs_socket, call->rxcall,
abort_code, EBADMSG, "KUM");
goto do_abort;
goto save_error;
}
}
......@@ -482,8 +505,9 @@ static void afs_deliver_to_call(struct afs_call *call)
_leave("");
return;
do_abort:
save_error:
call->error = ret;
call_complete:
call->state = AFS_CALL_COMPLETE;
goto done;
}
......@@ -493,7 +517,6 @@ static void afs_deliver_to_call(struct afs_call *call)
*/
static int afs_wait_for_call_to_complete(struct afs_call *call)
{
const char *abort_why;
int ret;
DECLARE_WAITQUEUE(myself, current);
......@@ -512,13 +535,8 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
continue;
}
abort_why = "KWC";
ret = call->error;
if (call->state == AFS_CALL_COMPLETE)
break;
abort_why = "KWI";
ret = -EINTR;
if (signal_pending(current))
if (call->state == AFS_CALL_COMPLETE ||
signal_pending(current))
break;
schedule();
}
......@@ -526,13 +544,14 @@ static int afs_wait_for_call_to_complete(struct afs_call *call)
remove_wait_queue(&call->waitq, &myself);
__set_current_state(TASK_RUNNING);
/* kill the call */
/* Kill off the call if it's still live. */
if (call->state < AFS_CALL_COMPLETE) {
_debug("call incomplete");
_debug("call interrupted");
rxrpc_kernel_abort_call(afs_socket, call->rxcall,
RX_CALL_DEAD, -ret, abort_why);
RX_USER_ABORT, -EINTR, "KWI");
}
ret = call->error;
_debug("call complete");
afs_put_call(call);
_leave(" = %d", ret);
......
......@@ -114,7 +114,7 @@ void afs_clear_permits(struct afs_vnode *vnode)
mutex_lock(&vnode->permits_lock);
permits = vnode->permits;
rcu_assign_pointer(vnode->permits, NULL);
RCU_INIT_POINTER(vnode->permits, NULL);
mutex_unlock(&vnode->permits_lock);
if (permits)
......@@ -340,17 +340,22 @@ int afs_permission(struct inode *inode, int mask)
} else {
if (!(access & AFS_ACE_LOOKUP))
goto permission_denied;
if ((mask & MAY_EXEC) && !(inode->i_mode & S_IXUSR))
goto permission_denied;
if (mask & (MAY_EXEC | MAY_READ)) {
if (!(access & AFS_ACE_READ))
goto permission_denied;
if (!(inode->i_mode & S_IRUSR))
goto permission_denied;
} else if (mask & MAY_WRITE) {
if (!(access & AFS_ACE_WRITE))
goto permission_denied;
if (!(inode->i_mode & S_IWUSR))
goto permission_denied;
}
}
key_put(key);
ret = generic_permission(inode, mask);
_leave(" = %d", ret);
return ret;
......
......@@ -242,7 +242,7 @@ void afs_put_server(struct afs_server *server)
spin_lock(&afs_server_graveyard_lock);
if (atomic_read(&server->usage) == 0) {
list_move_tail(&server->grave, &afs_server_graveyard);
server->time_of_death = get_seconds();
server->time_of_death = ktime_get_real_seconds();
queue_delayed_work(afs_wq, &afs_server_reaper,
afs_server_timeout * HZ);
}
......@@ -277,9 +277,9 @@ static void afs_reap_server(struct work_struct *work)
LIST_HEAD(corpses);
struct afs_server *server;
unsigned long delay, expiry;
time_t now;
time64_t now;
now = get_seconds();
now = ktime_get_real_seconds();
spin_lock(&afs_server_graveyard_lock);
while (!list_empty(&afs_server_graveyard)) {
......
......@@ -340,7 +340,8 @@ static void afs_vlocation_queue_for_updates(struct afs_vlocation *vl)
struct afs_vlocation *xvl;
/* wait at least 10 minutes before updating... */
vl->update_at = get_seconds() + afs_vlocation_update_timeout;
vl->update_at = ktime_get_real_seconds() +
afs_vlocation_update_timeout;
spin_lock(&afs_vlocation_updates_lock);
......@@ -506,7 +507,7 @@ void afs_put_vlocation(struct afs_vlocation *vl)
if (atomic_read(&vl->usage) == 0) {
_debug("buried");
list_move_tail(&vl->grave, &afs_vlocation_graveyard);
vl->time_of_death = get_seconds();
vl->time_of_death = ktime_get_real_seconds();
queue_delayed_work(afs_wq, &afs_vlocation_reap,
afs_vlocation_timeout * HZ);
......@@ -543,11 +544,11 @@ static void afs_vlocation_reaper(struct work_struct *work)
LIST_HEAD(corpses);
struct afs_vlocation *vl;
unsigned long delay, expiry;
time_t now;
time64_t now;
_enter("");
now = get_seconds();
now = ktime_get_real_seconds();
spin_lock(&afs_vlocation_graveyard_lock);
while (!list_empty(&afs_vlocation_graveyard)) {
......@@ -622,13 +623,13 @@ static void afs_vlocation_updater(struct work_struct *work)
{
struct afs_cache_vlocation vldb;
struct afs_vlocation *vl, *xvl;
time_t now;
time64_t now;
long timeout;
int ret;
_enter("");
now = get_seconds();
now = ktime_get_real_seconds();
/* find a record to update */
spin_lock(&afs_vlocation_updates_lock);
......@@ -684,7 +685,8 @@ static void afs_vlocation_updater(struct work_struct *work)
/* and then reschedule */
_debug("reschedule");
vl->update_at = get_seconds() + afs_vlocation_update_timeout;
vl->update_at = ktime_get_real_seconds() +
afs_vlocation_update_timeout;
spin_lock(&afs_vlocation_updates_lock);
......
......@@ -84,10 +84,9 @@ void afs_put_writeback(struct afs_writeback *wb)
* partly or wholly fill a page that's under preparation for writing
*/
static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
loff_t pos, struct page *page)
loff_t pos, unsigned int len, struct page *page)
{
struct afs_read *req;
loff_t i_size;
int ret;
_enter(",,%llu", (unsigned long long)pos);
......@@ -99,14 +98,10 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
atomic_set(&req->usage, 1);
req->pos = pos;
req->len = len;
req->nr_pages = 1;
req->pages[0] = page;
i_size = i_size_read(&vnode->vfs_inode);
if (pos + PAGE_SIZE > i_size)
req->len = i_size - pos;
else
req->len = PAGE_SIZE;
get_page(page);
ret = afs_vnode_fetch_data(vnode, key, req);
afs_put_read(req);
......@@ -159,12 +154,12 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
kfree(candidate);
return -ENOMEM;
}
*pagep = page;
/* page won't leak in error case: it eventually gets cleaned off LRU */
if (!PageUptodate(page) && len != PAGE_SIZE) {
ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page);
ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
if (ret < 0) {
unlock_page(page);
put_page(page);
kfree(candidate);
_leave(" = %d [prep]", ret);
return ret;
......@@ -172,6 +167,9 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
SetPageUptodate(page);
}
/* page won't leak in error case: it eventually gets cleaned off LRU */
*pagep = page;
try_again:
spin_lock(&vnode->writeback_lock);
......@@ -233,7 +231,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
if (wb->state == AFS_WBACK_PENDING)
wb->state = AFS_WBACK_CONFLICTING;
spin_unlock(&vnode->writeback_lock);
if (PageDirty(page)) {
if (clear_page_dirty_for_io(page)) {
ret = afs_write_back_from_locked_page(wb, page);
if (ret < 0) {
afs_put_writeback(candidate);
......@@ -257,7 +255,9 @@ int afs_write_end(struct file *file, struct address_space *mapping,
struct page *page, void *fsdata)
{
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
struct key *key = file->private_data;
loff_t i_size, maybe_i_size;
int ret;
_enter("{%x:%u},{%lx}",
vnode->fid.vid, vnode->fid.vnode, page->index);
......@@ -273,6 +273,20 @@ int afs_write_end(struct file *file, struct address_space *mapping,
spin_unlock(&vnode->writeback_lock);
}
if (!PageUptodate(page)) {
if (copied < len) {
/* Try and load any missing data from the server. The
* unmarshalling routine will take care of clearing any
* bits that are beyond the EOF.
*/
ret = afs_fill_page(vnode, key, pos + copied,
len - copied, page);
if (ret < 0)
return ret;
}
SetPageUptodate(page);
}
set_page_dirty(page);
if (PageDirty(page))
_debug("dirtied");
......@@ -307,10 +321,14 @@ static void afs_kill_pages(struct afs_vnode *vnode, bool error,
ASSERTCMP(pv.nr, ==, count);
for (loop = 0; loop < count; loop++) {
ClearPageUptodate(pv.pages[loop]);
struct page *page = pv.pages[loop];
ClearPageUptodate(page);
if (error)
SetPageError(pv.pages[loop]);
end_page_writeback(pv.pages[loop]);
SetPageError(page);
if (PageWriteback(page))
end_page_writeback(page);
if (page->index >= first)
first = page->index + 1;
}
__pagevec_release(&pv);
......@@ -335,8 +353,6 @@ static int afs_write_back_from_locked_page(struct afs_writeback *wb,
_enter(",%lx", primary_page->index);
count = 1;
if (!clear_page_dirty_for_io(primary_page))
BUG();
if (test_set_page_writeback(primary_page))
BUG();
......@@ -502,17 +518,17 @@ static int afs_writepages_region(struct address_space *mapping,
*/
lock_page(page);
if (page->mapping != mapping) {
if (page->mapping != mapping || !PageDirty(page)) {
unlock_page(page);
put_page(page);
continue;
}
if (PageWriteback(page)) {
unlock_page(page);
if (wbc->sync_mode != WB_SYNC_NONE)
wait_on_page_writeback(page);
if (PageWriteback(page) || !PageDirty(page)) {
unlock_page(page);
put_page(page);
continue;
}
......@@ -523,6 +539,8 @@ static int afs_writepages_region(struct address_space *mapping,
wb->state = AFS_WBACK_WRITING;
spin_unlock(&wb->vnode->writeback_lock);
if (!clear_page_dirty_for_io(page))
BUG();
ret = afs_write_back_from_locked_page(wb, page);
unlock_page(page);
put_page(page);
......@@ -745,6 +763,20 @@ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
return ret;
}
/*
* Flush out all outstanding writes on a file opened for writing when it is
* closed.
*/
int afs_flush(struct file *file, fl_owner_t id)
{
_enter("");
if ((file->f_mode & FMODE_WRITE) == 0)
return 0;
return vfs_fsync(file, 0);
}
/*
* notification that a previously read-only page is about to become writable
* - if it returns an error, the caller will deliver a bus error signal
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment