Commit 633a8e89 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag '5.17-rc3-smb3-client-fixes' of git://git.samba.org/sfrench/cifs-2.6

Pull cifs fixes from Steve French:
 "SMB3 client fixes including:

   - multiple fscache related fixes, reenabling ability to read/write to
     cached files for cifs.ko (that was temporarily disabled for cifs.ko
     a few weeks ago due to the recent fscache changes)

   - also includes a new fscache helper function ("query_occupancy")
     used by above

   - fix for multiuser mounts and NTLMSSP auth (workstation name) for
     stable

   - fix locking ordering problem in multichannel code

   - trivial malformed comment fix"

* tag '5.17-rc3-smb3-client-fixes' of git://git.samba.org/sfrench/cifs-2.6:
  cifs: fix workstation_name for multiuser mounts
  Invalidate fscache cookie only when inode attributes are changed.
  cifs: Fix the readahead conversion to manage the batch when reading from cache
  cifs: Implement cache I/O by accessing the cache directly
  netfs, cachefiles: Add a method to query presence of data in the cache
  cifs: Transition from ->readpages() to ->readahead()
  cifs: unlock chan_lock before calling cifs_put_tcp_session
  Fix a warning about a malformed kernel doc comment in cifs
parents dcb85f85 d3b331fb
...@@ -462,6 +462,10 @@ operation table looks like the following:: ...@@ -462,6 +462,10 @@ operation table looks like the following::
struct iov_iter *iter, struct iov_iter *iter,
netfs_io_terminated_t term_func, netfs_io_terminated_t term_func,
void *term_func_priv); void *term_func_priv);
int (*query_occupancy)(struct netfs_cache_resources *cres,
loff_t start, size_t len, size_t granularity,
loff_t *_data_start, size_t *_data_len);
}; };
With a termination handler function pointer:: With a termination handler function pointer::
...@@ -536,6 +540,18 @@ The methods defined in the table are: ...@@ -536,6 +540,18 @@ The methods defined in the table are:
indicating whether the termination is definitely happening in the caller's indicating whether the termination is definitely happening in the caller's
context. context.
* ``query_occupancy()``
[Required] Called to find out where the next piece of data is within a
particular region of the cache. The start and length of the region to be
queried are passed in, along with the granularity to which the answer needs
to be aligned. The function passes back the start and length of the data,
if any, available within that region. Note that there may be a hole at the
front.
It returns 0 if some data was found, -ENODATA if there was no usable data
within the region or -ENOBUFS if there is no caching on this file.
Note that these methods are passed a pointer to the cache resource structure, Note that these methods are passed a pointer to the cache resource structure,
not the read request structure as they could be used in other situations where not the read request structure as they could be used in other situations where
there isn't a read request structure as well, such as writing dirty data to the there isn't a read request structure as well, such as writing dirty data to the
......
...@@ -191,6 +191,64 @@ static int cachefiles_read(struct netfs_cache_resources *cres, ...@@ -191,6 +191,64 @@ static int cachefiles_read(struct netfs_cache_resources *cres,
return ret; return ret;
} }
/*
* Query the occupancy of the cache in a region, returning where the next chunk
* of data starts and how long it is.
*/
static int cachefiles_query_occupancy(struct netfs_cache_resources *cres,
loff_t start, size_t len, size_t granularity,
loff_t *_data_start, size_t *_data_len)
{
struct cachefiles_object *object;
struct file *file;
loff_t off, off2;
*_data_start = -1;
*_data_len = 0;
if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ))
return -ENOBUFS;
object = cachefiles_cres_object(cres);
file = cachefiles_cres_file(cres);
granularity = max_t(size_t, object->volume->cache->bsize, granularity);
_enter("%pD,%li,%llx,%zx/%llx",
file, file_inode(file)->i_ino, start, len,
i_size_read(file_inode(file)));
off = cachefiles_inject_read_error();
if (off == 0)
off = vfs_llseek(file, start, SEEK_DATA);
if (off == -ENXIO)
return -ENODATA; /* Beyond EOF */
if (off < 0 && off >= (loff_t)-MAX_ERRNO)
return -ENOBUFS; /* Error. */
if (round_up(off, granularity) >= start + len)
return -ENODATA; /* No data in range */
off2 = cachefiles_inject_read_error();
if (off2 == 0)
off2 = vfs_llseek(file, off, SEEK_HOLE);
if (off2 == -ENXIO)
return -ENODATA; /* Beyond EOF */
if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO)
return -ENOBUFS; /* Error. */
/* Round away partial blocks */
off = round_up(off, granularity);
off2 = round_down(off2, granularity);
if (off2 <= off)
return -ENODATA;
*_data_start = off;
if (off2 > start + len)
*_data_len = len;
else
*_data_len = off2 - off;
return 0;
}
/* /*
* Handle completion of a write to the cache. * Handle completion of a write to the cache.
*/ */
...@@ -545,6 +603,7 @@ static const struct netfs_cache_ops cachefiles_netfs_cache_ops = { ...@@ -545,6 +603,7 @@ static const struct netfs_cache_ops cachefiles_netfs_cache_ops = {
.write = cachefiles_write, .write = cachefiles_write,
.prepare_read = cachefiles_prepare_read, .prepare_read = cachefiles_prepare_read,
.prepare_write = cachefiles_prepare_write, .prepare_write = cachefiles_prepare_write,
.query_occupancy = cachefiles_query_occupancy,
}; };
/* /*
......
...@@ -162,7 +162,7 @@ static void cifs_resolve_server(struct work_struct *work) ...@@ -162,7 +162,7 @@ static void cifs_resolve_server(struct work_struct *work)
mutex_unlock(&server->srv_mutex); mutex_unlock(&server->srv_mutex);
} }
/** /*
* Mark all sessions and tcons for reconnect. * Mark all sessions and tcons for reconnect.
* *
* @server needs to be previously set to CifsNeedReconnect. * @server needs to be previously set to CifsNeedReconnect.
...@@ -1831,13 +1831,9 @@ void cifs_put_smb_ses(struct cifs_ses *ses) ...@@ -1831,13 +1831,9 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
int i; int i;
for (i = 1; i < chan_count; i++) { for (i = 1; i < chan_count; i++) {
/* spin_unlock(&ses->chan_lock);
* note: for now, we're okay accessing ses->chans
* without chan_lock. But when chans can go away, we'll
* need to introduce ref counting to make sure that chan
* is not freed from under us.
*/
cifs_put_tcp_session(ses->chans[i].server, 0); cifs_put_tcp_session(ses->chans[i].server, 0);
spin_lock(&ses->chan_lock);
ses->chans[i].server = NULL; ses->chans[i].server = NULL;
} }
} }
...@@ -1981,6 +1977,19 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses) ...@@ -1981,6 +1977,19 @@ cifs_set_cifscreds(struct smb3_fs_context *ctx, struct cifs_ses *ses)
} }
} }
ctx->workstation_name = kstrdup(ses->workstation_name, GFP_KERNEL);
if (!ctx->workstation_name) {
cifs_dbg(FYI, "Unable to allocate memory for workstation_name\n");
rc = -ENOMEM;
kfree(ctx->username);
ctx->username = NULL;
kfree_sensitive(ctx->password);
ctx->password = NULL;
kfree(ctx->domainname);
ctx->domainname = NULL;
goto out_key_put;
}
out_key_put: out_key_put:
up_read(&key->sem); up_read(&key->sem);
key_put(key); key_put(key);
......
...@@ -4269,8 +4269,6 @@ cifs_readv_complete(struct work_struct *work) ...@@ -4269,8 +4269,6 @@ cifs_readv_complete(struct work_struct *work)
for (i = 0; i < rdata->nr_pages; i++) { for (i = 0; i < rdata->nr_pages; i++) {
struct page *page = rdata->pages[i]; struct page *page = rdata->pages[i];
lru_cache_add(page);
if (rdata->result == 0 || if (rdata->result == 0 ||
(rdata->result == -EAGAIN && got_bytes)) { (rdata->result == -EAGAIN && got_bytes)) {
flush_dcache_page(page); flush_dcache_page(page);
...@@ -4278,12 +4276,12 @@ cifs_readv_complete(struct work_struct *work) ...@@ -4278,12 +4276,12 @@ cifs_readv_complete(struct work_struct *work)
} else } else
SetPageError(page); SetPageError(page);
unlock_page(page);
if (rdata->result == 0 || if (rdata->result == 0 ||
(rdata->result == -EAGAIN && got_bytes)) (rdata->result == -EAGAIN && got_bytes))
cifs_readpage_to_fscache(rdata->mapping->host, page); cifs_readpage_to_fscache(rdata->mapping->host, page);
unlock_page(page);
got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes); got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
put_page(page); put_page(page);
...@@ -4340,7 +4338,6 @@ readpages_fill_pages(struct TCP_Server_Info *server, ...@@ -4340,7 +4338,6 @@ readpages_fill_pages(struct TCP_Server_Info *server,
* fill them until the writes are flushed. * fill them until the writes are flushed.
*/ */
zero_user(page, 0, PAGE_SIZE); zero_user(page, 0, PAGE_SIZE);
lru_cache_add(page);
flush_dcache_page(page); flush_dcache_page(page);
SetPageUptodate(page); SetPageUptodate(page);
unlock_page(page); unlock_page(page);
...@@ -4350,7 +4347,6 @@ readpages_fill_pages(struct TCP_Server_Info *server, ...@@ -4350,7 +4347,6 @@ readpages_fill_pages(struct TCP_Server_Info *server,
continue; continue;
} else { } else {
/* no need to hold page hostage */ /* no need to hold page hostage */
lru_cache_add(page);
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
rdata->pages[i] = NULL; rdata->pages[i] = NULL;
...@@ -4393,92 +4389,20 @@ cifs_readpages_copy_into_pages(struct TCP_Server_Info *server, ...@@ -4393,92 +4389,20 @@ cifs_readpages_copy_into_pages(struct TCP_Server_Info *server,
return readpages_fill_pages(server, rdata, iter, iter->count); return readpages_fill_pages(server, rdata, iter, iter->count);
} }
static int static void cifs_readahead(struct readahead_control *ractl)
readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
unsigned int rsize, struct list_head *tmplist,
unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
{
struct page *page, *tpage;
unsigned int expected_index;
int rc;
gfp_t gfp = readahead_gfp_mask(mapping);
INIT_LIST_HEAD(tmplist);
page = lru_to_page(page_list);
/*
* Lock the page and put it in the cache. Since no one else
* should have access to this page, we're safe to simply set
* PG_locked without checking it first.
*/
__SetPageLocked(page);
rc = add_to_page_cache_locked(page, mapping,
page->index, gfp);
/* give up if we can't stick it in the cache */
if (rc) {
__ClearPageLocked(page);
return rc;
}
/* move first page to the tmplist */
*offset = (loff_t)page->index << PAGE_SHIFT;
*bytes = PAGE_SIZE;
*nr_pages = 1;
list_move_tail(&page->lru, tmplist);
/* now try and add more pages onto the request */
expected_index = page->index + 1;
list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
/* discontinuity ? */
if (page->index != expected_index)
break;
/* would this page push the read over the rsize? */
if (*bytes + PAGE_SIZE > rsize)
break;
__SetPageLocked(page);
rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
if (rc) {
__ClearPageLocked(page);
break;
}
list_move_tail(&page->lru, tmplist);
(*bytes) += PAGE_SIZE;
expected_index++;
(*nr_pages)++;
}
return rc;
}
static int cifs_readpages(struct file *file, struct address_space *mapping,
struct list_head *page_list, unsigned num_pages)
{ {
int rc; int rc;
int err = 0; struct cifsFileInfo *open_file = ractl->file->private_data;
struct list_head tmplist; struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(ractl->file);
struct cifsFileInfo *open_file = file->private_data;
struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
struct TCP_Server_Info *server; struct TCP_Server_Info *server;
pid_t pid; pid_t pid;
unsigned int xid; unsigned int xid, nr_pages, last_batch_size = 0, cache_nr_pages = 0;
pgoff_t next_cached = ULONG_MAX;
bool caching = fscache_cookie_enabled(cifs_inode_cookie(ractl->mapping->host)) &&
cifs_inode_cookie(ractl->mapping->host)->cache_priv;
bool check_cache = caching;
xid = get_xid(); xid = get_xid();
/*
* Reads as many pages as possible from fscache. Returns -ENOBUFS
* immediately if the cookie is negative
*
* After this point, every page in the list might have PG_fscache set,
* so we will need to clean that up off of every page we don't use.
*/
rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
&num_pages);
if (rc == 0) {
free_xid(xid);
return rc;
}
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid; pid = open_file->pid;
...@@ -4489,39 +4413,73 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, ...@@ -4489,39 +4413,73 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses); server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n", cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
__func__, file, mapping, num_pages); __func__, ractl->file, ractl->mapping, readahead_count(ractl));
/* /*
* Start with the page at end of list and move it to private * Chop the readahead request up into rsize-sized read requests.
* list. Do the same with any following pages until we hit
* the rsize limit, hit an index discontinuity, or run out of
* pages. Issue the async read and then start the loop again
* until the list is empty.
*
* Note that list order is important. The page_list is in
* the order of declining indexes. When we put the pages in
* the rdata->pages, then we want them in increasing order.
*/ */
while (!list_empty(page_list) && !err) { while ((nr_pages = readahead_count(ractl) - last_batch_size)) {
unsigned int i, nr_pages, bytes, rsize; unsigned int i, got, rsize;
loff_t offset; struct page *page;
struct page *page, *tpage;
struct cifs_readdata *rdata; struct cifs_readdata *rdata;
struct cifs_credits credits_on_stack; struct cifs_credits credits_on_stack;
struct cifs_credits *credits = &credits_on_stack; struct cifs_credits *credits = &credits_on_stack;
pgoff_t index = readahead_index(ractl) + last_batch_size;
/*
* Find out if we have anything cached in the range of
* interest, and if so, where the next chunk of cached data is.
*/
if (caching) {
if (check_cache) {
rc = cifs_fscache_query_occupancy(
ractl->mapping->host, index, nr_pages,
&next_cached, &cache_nr_pages);
if (rc < 0)
caching = false;
check_cache = false;
}
if (index == next_cached) {
/*
* TODO: Send a whole batch of pages to be read
* by the cache.
*/
page = readahead_page(ractl);
last_batch_size = 1 << thp_order(page);
if (cifs_readpage_from_fscache(ractl->mapping->host,
page) < 0) {
/*
* TODO: Deal with cache read failure
* here, but for the moment, delegate
* that to readpage.
*/
caching = false;
}
unlock_page(page);
next_cached++;
cache_nr_pages--;
if (cache_nr_pages == 0)
check_cache = true;
continue;
}
}
if (open_file->invalidHandle) { if (open_file->invalidHandle) {
rc = cifs_reopen_file(open_file, true); rc = cifs_reopen_file(open_file, true);
if (rc) {
if (rc == -EAGAIN) if (rc == -EAGAIN)
continue; continue;
else if (rc)
break; break;
} }
}
rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
&rsize, credits); &rsize, credits);
if (rc) if (rc)
break; break;
nr_pages = min_t(size_t, rsize / PAGE_SIZE, readahead_count(ractl));
nr_pages = min_t(size_t, nr_pages, next_cached - index);
/* /*
* Give up immediately if rsize is too small to read an entire * Give up immediately if rsize is too small to read an entire
...@@ -4529,16 +4487,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, ...@@ -4529,16 +4487,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
* reach this point however since we set ra_pages to 0 when the * reach this point however since we set ra_pages to 0 when the
* rsize is smaller than a cache page. * rsize is smaller than a cache page.
*/ */
if (unlikely(rsize < PAGE_SIZE)) { if (unlikely(!nr_pages)) {
add_credits_and_wake_if(server, credits, 0);
free_xid(xid);
return 0;
}
nr_pages = 0;
err = readpages_get_pages(mapping, page_list, rsize, &tmplist,
&nr_pages, &offset, &bytes);
if (!nr_pages) {
add_credits_and_wake_if(server, credits, 0); add_credits_and_wake_if(server, credits, 0);
break; break;
} }
...@@ -4546,22 +4495,23 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, ...@@ -4546,22 +4495,23 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete); rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
if (!rdata) { if (!rdata) {
/* best to give up if we're out of mem */ /* best to give up if we're out of mem */
list_for_each_entry_safe(page, tpage, &tmplist, lru) {
list_del(&page->lru);
lru_cache_add(page);
unlock_page(page);
put_page(page);
}
rc = -ENOMEM;
add_credits_and_wake_if(server, credits, 0); add_credits_and_wake_if(server, credits, 0);
break; break;
} }
got = __readahead_batch(ractl, rdata->pages, nr_pages);
if (got != nr_pages) {
pr_warn("__readahead_batch() returned %u/%u\n",
got, nr_pages);
nr_pages = got;
}
rdata->nr_pages = nr_pages;
rdata->bytes = readahead_batch_length(ractl);
rdata->cfile = cifsFileInfo_get(open_file); rdata->cfile = cifsFileInfo_get(open_file);
rdata->server = server; rdata->server = server;
rdata->mapping = mapping; rdata->mapping = ractl->mapping;
rdata->offset = offset; rdata->offset = readahead_pos(ractl);
rdata->bytes = bytes;
rdata->pid = pid; rdata->pid = pid;
rdata->pagesz = PAGE_SIZE; rdata->pagesz = PAGE_SIZE;
rdata->tailsz = PAGE_SIZE; rdata->tailsz = PAGE_SIZE;
...@@ -4569,13 +4519,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, ...@@ -4569,13 +4519,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
rdata->copy_into_pages = cifs_readpages_copy_into_pages; rdata->copy_into_pages = cifs_readpages_copy_into_pages;
rdata->credits = credits_on_stack; rdata->credits = credits_on_stack;
list_for_each_entry_safe(page, tpage, &tmplist, lru) {
list_del(&page->lru);
rdata->pages[rdata->nr_pages++] = page;
}
rc = adjust_credits(server, &rdata->credits, rdata->bytes); rc = adjust_credits(server, &rdata->credits, rdata->bytes);
if (!rc) { if (!rc) {
if (rdata->cfile->invalidHandle) if (rdata->cfile->invalidHandle)
rc = -EAGAIN; rc = -EAGAIN;
...@@ -4587,7 +4531,6 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, ...@@ -4587,7 +4531,6 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
add_credits_and_wake_if(server, &rdata->credits, 0); add_credits_and_wake_if(server, &rdata->credits, 0);
for (i = 0; i < rdata->nr_pages; i++) { for (i = 0; i < rdata->nr_pages; i++) {
page = rdata->pages[i]; page = rdata->pages[i];
lru_cache_add(page);
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
} }
...@@ -4597,10 +4540,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, ...@@ -4597,10 +4540,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
} }
kref_put(&rdata->refcount, cifs_readdata_release); kref_put(&rdata->refcount, cifs_readdata_release);
last_batch_size = nr_pages;
} }
free_xid(xid); free_xid(xid);
return rc;
} }
/* /*
...@@ -5006,7 +4949,7 @@ static int cifs_set_page_dirty(struct page *page) ...@@ -5006,7 +4949,7 @@ static int cifs_set_page_dirty(struct page *page)
const struct address_space_operations cifs_addr_ops = { const struct address_space_operations cifs_addr_ops = {
.readpage = cifs_readpage, .readpage = cifs_readpage,
.readpages = cifs_readpages, .readahead = cifs_readahead,
.writepage = cifs_writepage, .writepage = cifs_writepage,
.writepages = cifs_writepages, .writepages = cifs_writepages,
.write_begin = cifs_write_begin, .write_begin = cifs_write_begin,
......
...@@ -134,37 +134,127 @@ void cifs_fscache_release_inode_cookie(struct inode *inode) ...@@ -134,37 +134,127 @@ void cifs_fscache_release_inode_cookie(struct inode *inode)
} }
} }
static inline void fscache_end_operation(struct netfs_cache_resources *cres)
{
const struct netfs_cache_ops *ops = fscache_operation_valid(cres);
if (ops)
ops->end_operation(cres);
}
/* /*
* Retrieve a page from FS-Cache * Fallback page reading interface.
*/ */
int __cifs_readpage_from_fscache(struct inode *inode, struct page *page) static int fscache_fallback_read_page(struct inode *inode, struct page *page)
{ {
cifs_dbg(FYI, "%s: (fsc:%p, p:%p, i:0x%p\n", struct netfs_cache_resources cres;
__func__, CIFS_I(inode)->fscache, page, inode); struct fscache_cookie *cookie = cifs_inode_cookie(inode);
return -ENOBUFS; // Needs conversion to using netfslib struct iov_iter iter;
struct bio_vec bvec[1];
int ret;
memset(&cres, 0, sizeof(cres));
bvec[0].bv_page = page;
bvec[0].bv_offset = 0;
bvec[0].bv_len = PAGE_SIZE;
iov_iter_bvec(&iter, READ, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
ret = fscache_begin_read_operation(&cres, cookie);
if (ret < 0)
return ret;
ret = fscache_read(&cres, page_offset(page), &iter, NETFS_READ_HOLE_FAIL,
NULL, NULL);
fscache_end_operation(&cres);
return ret;
} }
/* /*
* Retrieve a set of pages from FS-Cache * Fallback page writing interface.
*/ */
int __cifs_readpages_from_fscache(struct inode *inode, static int fscache_fallback_write_page(struct inode *inode, struct page *page,
struct address_space *mapping, bool no_space_allocated_yet)
struct list_head *pages,
unsigned *nr_pages)
{ {
cifs_dbg(FYI, "%s: (0x%p/%u/0x%p)\n", struct netfs_cache_resources cres;
__func__, CIFS_I(inode)->fscache, *nr_pages, inode); struct fscache_cookie *cookie = cifs_inode_cookie(inode);
return -ENOBUFS; // Needs conversion to using netfslib struct iov_iter iter;
struct bio_vec bvec[1];
loff_t start = page_offset(page);
size_t len = PAGE_SIZE;
int ret;
memset(&cres, 0, sizeof(cres));
bvec[0].bv_page = page;
bvec[0].bv_offset = 0;
bvec[0].bv_len = PAGE_SIZE;
iov_iter_bvec(&iter, WRITE, bvec, ARRAY_SIZE(bvec), PAGE_SIZE);
ret = fscache_begin_write_operation(&cres, cookie);
if (ret < 0)
return ret;
ret = cres.ops->prepare_write(&cres, &start, &len, i_size_read(inode),
no_space_allocated_yet);
if (ret == 0)
ret = fscache_write(&cres, page_offset(page), &iter, NULL, NULL);
fscache_end_operation(&cres);
return ret;
} }
void __cifs_readpage_to_fscache(struct inode *inode, struct page *page) /*
* Retrieve a page from FS-Cache
*/
int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
{ {
struct cifsInodeInfo *cifsi = CIFS_I(inode); int ret;
cifs_dbg(FYI, "%s: (fsc:%p, p:%p, i:0x%p\n",
__func__, cifs_inode_cookie(inode), page, inode);
ret = fscache_fallback_read_page(inode, page);
if (ret < 0)
return ret;
WARN_ON(!cifsi->fscache); /* Read completed synchronously */
SetPageUptodate(page);
return 0;
}
void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
{
cifs_dbg(FYI, "%s: (fsc: %p, p: %p, i: %p)\n", cifs_dbg(FYI, "%s: (fsc: %p, p: %p, i: %p)\n",
__func__, cifsi->fscache, page, inode); __func__, cifs_inode_cookie(inode), page, inode);
fscache_fallback_write_page(inode, page, true);
}
/*
* Query the cache occupancy.
*/
int __cifs_fscache_query_occupancy(struct inode *inode,
pgoff_t first, unsigned int nr_pages,
pgoff_t *_data_first,
unsigned int *_data_nr_pages)
{
struct netfs_cache_resources cres;
struct fscache_cookie *cookie = cifs_inode_cookie(inode);
loff_t start, data_start;
size_t len, data_len;
int ret;
// Needs conversion to using netfslib ret = fscache_begin_read_operation(&cres, cookie);
if (ret < 0)
return ret;
start = first * PAGE_SIZE;
len = nr_pages * PAGE_SIZE;
ret = cres.ops->query_occupancy(&cres, start, len, PAGE_SIZE,
&data_start, &data_len);
if (ret == 0) {
*_data_first = data_start / PAGE_SIZE;
*_data_nr_pages = len / PAGE_SIZE;
}
fscache_end_operation(&cres);
return ret;
} }
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#ifndef _CIFS_FSCACHE_H #ifndef _CIFS_FSCACHE_H
#define _CIFS_FSCACHE_H #define _CIFS_FSCACHE_H
#include <linux/swap.h>
#include <linux/fscache.h> #include <linux/fscache.h>
#include "cifsglob.h" #include "cifsglob.h"
...@@ -58,14 +59,6 @@ void cifs_fscache_fill_coherency(struct inode *inode, ...@@ -58,14 +59,6 @@ void cifs_fscache_fill_coherency(struct inode *inode,
} }
extern int cifs_fscache_release_page(struct page *page, gfp_t gfp);
extern int __cifs_readpage_from_fscache(struct inode *, struct page *);
extern int __cifs_readpages_from_fscache(struct inode *,
struct address_space *,
struct list_head *,
unsigned *);
extern void __cifs_readpage_to_fscache(struct inode *, struct page *);
static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode) static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode)
{ {
return CIFS_I(inode)->fscache; return CIFS_I(inode)->fscache;
...@@ -80,33 +73,52 @@ static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags ...@@ -80,33 +73,52 @@ static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags
i_size_read(inode), flags); i_size_read(inode), flags);
} }
static inline int cifs_readpage_from_fscache(struct inode *inode, extern int __cifs_fscache_query_occupancy(struct inode *inode,
struct page *page) pgoff_t first, unsigned int nr_pages,
{ pgoff_t *_data_first,
if (CIFS_I(inode)->fscache) unsigned int *_data_nr_pages);
return __cifs_readpage_from_fscache(inode, page);
static inline int cifs_fscache_query_occupancy(struct inode *inode,
pgoff_t first, unsigned int nr_pages,
pgoff_t *_data_first,
unsigned int *_data_nr_pages)
{
if (!cifs_inode_cookie(inode))
return -ENOBUFS; return -ENOBUFS;
return __cifs_fscache_query_occupancy(inode, first, nr_pages,
_data_first, _data_nr_pages);
} }
static inline int cifs_readpages_from_fscache(struct inode *inode, extern int __cifs_readpage_from_fscache(struct inode *pinode, struct page *ppage);
struct address_space *mapping, extern void __cifs_readpage_to_fscache(struct inode *pinode, struct page *ppage);
struct list_head *pages,
unsigned *nr_pages)
static inline int cifs_readpage_from_fscache(struct inode *inode,
struct page *page)
{ {
if (CIFS_I(inode)->fscache) if (cifs_inode_cookie(inode))
return __cifs_readpages_from_fscache(inode, mapping, pages, return __cifs_readpage_from_fscache(inode, page);
nr_pages);
return -ENOBUFS; return -ENOBUFS;
} }
static inline void cifs_readpage_to_fscache(struct inode *inode, static inline void cifs_readpage_to_fscache(struct inode *inode,
struct page *page) struct page *page)
{ {
if (PageFsCache(page)) if (cifs_inode_cookie(inode))
__cifs_readpage_to_fscache(inode, page); __cifs_readpage_to_fscache(inode, page);
} }
static inline int cifs_fscache_release_page(struct page *page, gfp_t gfp)
{
if (PageFsCache(page)) {
if (current_is_kswapd() || !(gfp & __GFP_FS))
return false;
wait_on_page_fscache(page);
fscache_note_page_release(cifs_inode_cookie(page->mapping->host));
}
return true;
}
#else /* CONFIG_CIFS_FSCACHE */ #else /* CONFIG_CIFS_FSCACHE */
static inline static inline
void cifs_fscache_fill_coherency(struct inode *inode, void cifs_fscache_fill_coherency(struct inode *inode,
...@@ -123,22 +135,29 @@ static inline void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool upd ...@@ -123,22 +135,29 @@ static inline void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool upd
static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode) { return NULL; } static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode) { return NULL; }
static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags) {} static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags) {}
static inline int static inline int cifs_fscache_query_occupancy(struct inode *inode,
cifs_readpage_from_fscache(struct inode *inode, struct page *page) pgoff_t first, unsigned int nr_pages,
pgoff_t *_data_first,
unsigned int *_data_nr_pages)
{ {
*_data_first = ULONG_MAX;
*_data_nr_pages = 0;
return -ENOBUFS; return -ENOBUFS;
} }
static inline int cifs_readpages_from_fscache(struct inode *inode, static inline int
struct address_space *mapping, cifs_readpage_from_fscache(struct inode *inode, struct page *page)
struct list_head *pages,
unsigned *nr_pages)
{ {
return -ENOBUFS; return -ENOBUFS;
} }
static inline void cifs_readpage_to_fscache(struct inode *inode, static inline
struct page *page) {} void cifs_readpage_to_fscache(struct inode *inode, struct page *page) {}
static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp)
{
return true; /* May release page */
}
#endif /* CONFIG_CIFS_FSCACHE */ #endif /* CONFIG_CIFS_FSCACHE */
......
...@@ -83,6 +83,7 @@ static void cifs_set_ops(struct inode *inode) ...@@ -83,6 +83,7 @@ static void cifs_set_ops(struct inode *inode)
static void static void
cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr) cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
{ {
struct cifs_fscache_inode_coherency_data cd;
struct cifsInodeInfo *cifs_i = CIFS_I(inode); struct cifsInodeInfo *cifs_i = CIFS_I(inode);
cifs_dbg(FYI, "%s: revalidating inode %llu\n", cifs_dbg(FYI, "%s: revalidating inode %llu\n",
...@@ -113,6 +114,9 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr) ...@@ -113,6 +114,9 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
cifs_dbg(FYI, "%s: invalidating inode %llu mapping\n", cifs_dbg(FYI, "%s: invalidating inode %llu mapping\n",
__func__, cifs_i->uniqueid); __func__, cifs_i->uniqueid);
set_bit(CIFS_INO_INVALID_MAPPING, &cifs_i->flags); set_bit(CIFS_INO_INVALID_MAPPING, &cifs_i->flags);
/* Invalidate fscache cookie */
cifs_fscache_fill_coherency(&cifs_i->vfs_inode, &cd);
fscache_invalidate(cifs_inode_cookie(inode), &cd, i_size_read(inode), 0);
} }
/* /*
...@@ -2261,8 +2265,6 @@ cifs_dentry_needs_reval(struct dentry *dentry) ...@@ -2261,8 +2265,6 @@ cifs_dentry_needs_reval(struct dentry *dentry)
int int
cifs_invalidate_mapping(struct inode *inode) cifs_invalidate_mapping(struct inode *inode)
{ {
struct cifs_fscache_inode_coherency_data cd;
struct cifsInodeInfo *cifsi = CIFS_I(inode);
int rc = 0; int rc = 0;
if (inode->i_mapping && inode->i_mapping->nrpages != 0) { if (inode->i_mapping && inode->i_mapping->nrpages != 0) {
...@@ -2272,8 +2274,6 @@ cifs_invalidate_mapping(struct inode *inode) ...@@ -2272,8 +2274,6 @@ cifs_invalidate_mapping(struct inode *inode)
__func__, inode); __func__, inode);
} }
cifs_fscache_fill_coherency(&cifsi->vfs_inode, &cd);
fscache_invalidate(cifs_inode_cookie(inode), &cd, i_size_read(inode), 0);
return rc; return rc;
} }
......
...@@ -713,7 +713,11 @@ static int size_of_ntlmssp_blob(struct cifs_ses *ses, int base_size) ...@@ -713,7 +713,11 @@ static int size_of_ntlmssp_blob(struct cifs_ses *ses, int base_size)
else else
sz += sizeof(__le16); sz += sizeof(__le16);
sz += sizeof(__le16) * strnlen(ses->workstation_name, CIFS_MAX_WORKSTATION_LEN); if (ses->workstation_name)
sz += sizeof(__le16) * strnlen(ses->workstation_name,
CIFS_MAX_WORKSTATION_LEN);
else
sz += sizeof(__le16);
return sz; return sz;
} }
......
...@@ -244,6 +244,13 @@ struct netfs_cache_ops { ...@@ -244,6 +244,13 @@ struct netfs_cache_ops {
int (*prepare_write)(struct netfs_cache_resources *cres, int (*prepare_write)(struct netfs_cache_resources *cres,
loff_t *_start, size_t *_len, loff_t i_size, loff_t *_start, size_t *_len, loff_t i_size,
bool no_space_allocated_yet); bool no_space_allocated_yet);
/* Query the occupancy of the cache in a region, returning where the
* next chunk of data starts and how long it is.
*/
int (*query_occupancy)(struct netfs_cache_resources *cres,
loff_t start, size_t len, size_t granularity,
loff_t *_data_start, size_t *_data_len);
}; };
struct readahead_control; struct readahead_control;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment