Commit 83ab4b46 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'vfs-6.10-rc8.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs

Pull vfs fixes from Christian Brauner:
 "cachefiles:

   - Export an existing and add a new cachefile helper to be used in
     filesystems to fix reference count bugs

   - Use the newly added fscache_ty_get_volume() helper to get a
     reference count on an fscache_volume to handle volumes that are
     about to be removed cleanly

   - After withdrawing a fscache_cache via FSCACHE_CACHE_IS_WITHDRAWN
     wait for all ongoing cookie lookups to complete and for the object
     count to reach zero

   - Propagate errors from vfs_getxattr() to avoid an infinite loop in
     cachefiles_check_volume_xattr() because it keeps seeing ESTALE

   - Don't send new requests when an object is dropped by raising
     CACHEFILES_ONDEMAND_OJBSTATE_DROPPING

   - Cancel all requests for an object that is about to be dropped

   - Wait for the ondemand_boject_worker to finish before dropping a
     cachefiles object to prevent use-after-free

   - Use cyclic allocation for message ids to better handle id recycling

   - Add missing lock protection when iterating through the xarray when
     polling

  netfs:

   - Use standard logging helpers for debug logging

  VFS:

   - Fix potential use-after-free in file locks during
     trace_posix_lock_inode(). The tracepoint could fire while another
     task raced it and freed the lock that was requested to be traced

   - Only increment the nr_dentry_negative counter for dentries that are
     present on the superblock LRU. Currently, DCACHE_LRU_LIST list is
     used to detect this case. However, the flag is also raised in
     combination with DCACHE_SHRINK_LIST to indicate that dentry->d_lru
     is used. So checking only DCACHE_LRU_LIST will lead to wrong
     nr_dentry_negative count. Fix the check to not count dentries that
     are on a shrink related list

  Misc:

   - hfsplus: fix an uninitialized value issue in copy_name

   - minix: fix minixfs_rename with HIGHMEM. It still uses kunmap() even
     though we switched it to kmap_local_page() a while ago"

* tag 'vfs-6.10-rc8.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs:
  minixfs: Fix minixfs_rename with HIGHMEM
  hfsplus: fix uninit-value in copy_name
  vfs: don't mod negative dentry count when on shrinker list
  filelock: fix potential use-after-free in posix_lock_inode
  cachefiles: add missing lock protection when polling
  cachefiles: cyclic allocation of msg_id to avoid reuse
  cachefiles: wait for ondemand_object_worker to finish when dropping object
  cachefiles: cancel all requests for the object that is being dropped
  cachefiles: stop sending new request when dropping object
  cachefiles: propagate errors from vfs_getxattr() to avoid infinite loop
  cachefiles: fix slab-use-after-free in cachefiles_withdraw_cookie()
  cachefiles: fix slab-use-after-free in fscache_withdraw_volume()
  netfs, fscache: export fscache_put_volume() and add fscache_try_get_volume()
  netfs: Switch debug logging to pr_debug()
parents 9d9a2f29 3d1bec29
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/statfs.h> #include <linux/statfs.h>
#include <linux/namei.h> #include <linux/namei.h>
#include <trace/events/fscache.h>
#include "internal.h" #include "internal.h"
/* /*
...@@ -312,19 +313,59 @@ static void cachefiles_withdraw_objects(struct cachefiles_cache *cache) ...@@ -312,19 +313,59 @@ static void cachefiles_withdraw_objects(struct cachefiles_cache *cache)
} }
/* /*
* Withdraw volumes. * Withdraw fscache volumes.
*/
static void cachefiles_withdraw_fscache_volumes(struct cachefiles_cache *cache)
{
struct list_head *cur;
struct cachefiles_volume *volume;
struct fscache_volume *vcookie;
_enter("");
retry:
spin_lock(&cache->object_list_lock);
list_for_each(cur, &cache->volumes) {
volume = list_entry(cur, struct cachefiles_volume, cache_link);
if (atomic_read(&volume->vcookie->n_accesses) == 0)
continue;
vcookie = fscache_try_get_volume(volume->vcookie,
fscache_volume_get_withdraw);
if (vcookie) {
spin_unlock(&cache->object_list_lock);
fscache_withdraw_volume(vcookie);
fscache_put_volume(vcookie, fscache_volume_put_withdraw);
goto retry;
}
}
spin_unlock(&cache->object_list_lock);
_leave("");
}
/*
* Withdraw cachefiles volumes.
*/ */
static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache) static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache)
{ {
_enter(""); _enter("");
for (;;) { for (;;) {
struct fscache_volume *vcookie = NULL;
struct cachefiles_volume *volume = NULL; struct cachefiles_volume *volume = NULL;
spin_lock(&cache->object_list_lock); spin_lock(&cache->object_list_lock);
if (!list_empty(&cache->volumes)) { if (!list_empty(&cache->volumes)) {
volume = list_first_entry(&cache->volumes, volume = list_first_entry(&cache->volumes,
struct cachefiles_volume, cache_link); struct cachefiles_volume, cache_link);
vcookie = fscache_try_get_volume(volume->vcookie,
fscache_volume_get_withdraw);
if (!vcookie) {
spin_unlock(&cache->object_list_lock);
cpu_relax();
continue;
}
list_del_init(&volume->cache_link); list_del_init(&volume->cache_link);
} }
spin_unlock(&cache->object_list_lock); spin_unlock(&cache->object_list_lock);
...@@ -332,6 +373,7 @@ static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache) ...@@ -332,6 +373,7 @@ static void cachefiles_withdraw_volumes(struct cachefiles_cache *cache)
break; break;
cachefiles_withdraw_volume(volume); cachefiles_withdraw_volume(volume);
fscache_put_volume(vcookie, fscache_volume_put_withdraw);
} }
_leave(""); _leave("");
...@@ -371,6 +413,7 @@ void cachefiles_withdraw_cache(struct cachefiles_cache *cache) ...@@ -371,6 +413,7 @@ void cachefiles_withdraw_cache(struct cachefiles_cache *cache)
pr_info("File cache on %s unregistering\n", fscache->name); pr_info("File cache on %s unregistering\n", fscache->name);
fscache_withdraw_cache(fscache); fscache_withdraw_cache(fscache);
cachefiles_withdraw_fscache_volumes(cache);
/* we now have to destroy all the active objects pertaining to this /* we now have to destroy all the active objects pertaining to this
* cache - which we do by passing them off to thread pool to be * cache - which we do by passing them off to thread pool to be
......
...@@ -366,14 +366,14 @@ static __poll_t cachefiles_daemon_poll(struct file *file, ...@@ -366,14 +366,14 @@ static __poll_t cachefiles_daemon_poll(struct file *file,
if (cachefiles_in_ondemand_mode(cache)) { if (cachefiles_in_ondemand_mode(cache)) {
if (!xa_empty(&cache->reqs)) { if (!xa_empty(&cache->reqs)) {
rcu_read_lock(); xas_lock(&xas);
xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) { xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) {
if (!cachefiles_ondemand_is_reopening_read(req)) { if (!cachefiles_ondemand_is_reopening_read(req)) {
mask |= EPOLLIN; mask |= EPOLLIN;
break; break;
} }
} }
rcu_read_unlock(); xas_unlock(&xas);
} }
} else { } else {
if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags)) if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags))
......
...@@ -48,6 +48,7 @@ enum cachefiles_object_state { ...@@ -48,6 +48,7 @@ enum cachefiles_object_state {
CACHEFILES_ONDEMAND_OBJSTATE_CLOSE, /* Anonymous fd closed by daemon or initial state */ CACHEFILES_ONDEMAND_OBJSTATE_CLOSE, /* Anonymous fd closed by daemon or initial state */
CACHEFILES_ONDEMAND_OBJSTATE_OPEN, /* Anonymous fd associated with object is available */ CACHEFILES_ONDEMAND_OBJSTATE_OPEN, /* Anonymous fd associated with object is available */
CACHEFILES_ONDEMAND_OBJSTATE_REOPENING, /* Object that was closed and is being reopened. */ CACHEFILES_ONDEMAND_OBJSTATE_REOPENING, /* Object that was closed and is being reopened. */
CACHEFILES_ONDEMAND_OBJSTATE_DROPPING, /* Object is being dropped. */
}; };
struct cachefiles_ondemand_info { struct cachefiles_ondemand_info {
...@@ -128,6 +129,7 @@ struct cachefiles_cache { ...@@ -128,6 +129,7 @@ struct cachefiles_cache {
unsigned long req_id_next; unsigned long req_id_next;
struct xarray ondemand_ids; /* xarray for ondemand_id allocation */ struct xarray ondemand_ids; /* xarray for ondemand_id allocation */
u32 ondemand_id_next; u32 ondemand_id_next;
u32 msg_id_next;
}; };
static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache) static inline bool cachefiles_in_ondemand_mode(struct cachefiles_cache *cache)
...@@ -335,6 +337,7 @@ cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \ ...@@ -335,6 +337,7 @@ cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \
CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN); CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN);
CACHEFILES_OBJECT_STATE_FUNCS(close, CLOSE); CACHEFILES_OBJECT_STATE_FUNCS(close, CLOSE);
CACHEFILES_OBJECT_STATE_FUNCS(reopening, REOPENING); CACHEFILES_OBJECT_STATE_FUNCS(reopening, REOPENING);
CACHEFILES_OBJECT_STATE_FUNCS(dropping, DROPPING);
static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req) static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req)
{ {
......
...@@ -517,7 +517,8 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, ...@@ -517,7 +517,8 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
*/ */
xas_lock(&xas); xas_lock(&xas);
if (test_bit(CACHEFILES_DEAD, &cache->flags)) { if (test_bit(CACHEFILES_DEAD, &cache->flags) ||
cachefiles_ondemand_object_is_dropping(object)) {
xas_unlock(&xas); xas_unlock(&xas);
ret = -EIO; ret = -EIO;
goto out; goto out;
...@@ -527,20 +528,32 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, ...@@ -527,20 +528,32 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
smp_mb(); smp_mb();
if (opcode == CACHEFILES_OP_CLOSE && if (opcode == CACHEFILES_OP_CLOSE &&
!cachefiles_ondemand_object_is_open(object)) { !cachefiles_ondemand_object_is_open(object)) {
WARN_ON_ONCE(object->ondemand->ondemand_id == 0); WARN_ON_ONCE(object->ondemand->ondemand_id == 0);
xas_unlock(&xas); xas_unlock(&xas);
ret = -EIO; ret = -EIO;
goto out; goto out;
} }
xas.xa_index = 0; /*
* Cyclically find a free xas to avoid msg_id reuse that would
* cause the daemon to successfully copen a stale msg_id.
*/
xas.xa_index = cache->msg_id_next;
xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK); xas_find_marked(&xas, UINT_MAX, XA_FREE_MARK);
if (xas.xa_node == XAS_RESTART) {
xas.xa_index = 0;
xas_find_marked(&xas, cache->msg_id_next - 1, XA_FREE_MARK);
}
if (xas.xa_node == XAS_RESTART) if (xas.xa_node == XAS_RESTART)
xas_set_err(&xas, -EBUSY); xas_set_err(&xas, -EBUSY);
xas_store(&xas, req); xas_store(&xas, req);
xas_clear_mark(&xas, XA_FREE_MARK); if (xas_valid(&xas)) {
xas_set_mark(&xas, CACHEFILES_REQ_NEW); cache->msg_id_next = xas.xa_index + 1;
xas_clear_mark(&xas, XA_FREE_MARK);
xas_set_mark(&xas, CACHEFILES_REQ_NEW);
}
xas_unlock(&xas); xas_unlock(&xas);
} while (xas_nomem(&xas, GFP_KERNEL)); } while (xas_nomem(&xas, GFP_KERNEL));
...@@ -568,7 +581,8 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, ...@@ -568,7 +581,8 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object,
* If error occurs after creating the anonymous fd, * If error occurs after creating the anonymous fd,
* cachefiles_ondemand_fd_release() will set object to close. * cachefiles_ondemand_fd_release() will set object to close.
*/ */
if (opcode == CACHEFILES_OP_OPEN) if (opcode == CACHEFILES_OP_OPEN &&
!cachefiles_ondemand_object_is_dropping(object))
cachefiles_ondemand_set_object_close(object); cachefiles_ondemand_set_object_close(object);
kfree(req); kfree(req);
return ret; return ret;
...@@ -667,8 +681,34 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object) ...@@ -667,8 +681,34 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object)
void cachefiles_ondemand_clean_object(struct cachefiles_object *object) void cachefiles_ondemand_clean_object(struct cachefiles_object *object)
{ {
unsigned long index;
struct cachefiles_req *req;
struct cachefiles_cache *cache;
if (!object->ondemand)
return;
cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0, cachefiles_ondemand_send_req(object, CACHEFILES_OP_CLOSE, 0,
cachefiles_ondemand_init_close_req, NULL); cachefiles_ondemand_init_close_req, NULL);
if (!object->ondemand->ondemand_id)
return;
/* Cancel all requests for the object that is being dropped. */
cache = object->volume->cache;
xa_lock(&cache->reqs);
cachefiles_ondemand_set_object_dropping(object);
xa_for_each(&cache->reqs, index, req) {
if (req->object == object) {
req->error = -EIO;
complete(&req->done);
__xa_erase(&cache->reqs, index);
}
}
xa_unlock(&cache->reqs);
/* Wait for ondemand_object_worker() to finish to avoid UAF. */
cancel_work_sync(&object->ondemand->ondemand_work);
} }
int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object, int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object,
......
...@@ -133,7 +133,6 @@ void cachefiles_free_volume(struct fscache_volume *vcookie) ...@@ -133,7 +133,6 @@ void cachefiles_free_volume(struct fscache_volume *vcookie)
void cachefiles_withdraw_volume(struct cachefiles_volume *volume) void cachefiles_withdraw_volume(struct cachefiles_volume *volume)
{ {
fscache_withdraw_volume(volume->vcookie);
cachefiles_set_volume_xattr(volume); cachefiles_set_volume_xattr(volume);
__cachefiles_free_volume(volume); __cachefiles_free_volume(volume);
} }
...@@ -110,9 +110,11 @@ int cachefiles_check_auxdata(struct cachefiles_object *object, struct file *file ...@@ -110,9 +110,11 @@ int cachefiles_check_auxdata(struct cachefiles_object *object, struct file *file
if (xlen == 0) if (xlen == 0)
xlen = vfs_getxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, buf, tlen); xlen = vfs_getxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, buf, tlen);
if (xlen != tlen) { if (xlen != tlen) {
if (xlen < 0) if (xlen < 0) {
ret = xlen;
trace_cachefiles_vfs_error(object, file_inode(file), xlen, trace_cachefiles_vfs_error(object, file_inode(file), xlen,
cachefiles_trace_getxattr_error); cachefiles_trace_getxattr_error);
}
if (xlen == -EIO) if (xlen == -EIO)
cachefiles_io_error_obj( cachefiles_io_error_obj(
object, object,
...@@ -252,6 +254,7 @@ int cachefiles_check_volume_xattr(struct cachefiles_volume *volume) ...@@ -252,6 +254,7 @@ int cachefiles_check_volume_xattr(struct cachefiles_volume *volume)
xlen = vfs_getxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, buf, len); xlen = vfs_getxattr(&nop_mnt_idmap, dentry, cachefiles_xattr_cache, buf, len);
if (xlen != len) { if (xlen != len) {
if (xlen < 0) { if (xlen < 0) {
ret = xlen;
trace_cachefiles_vfs_error(NULL, d_inode(dentry), xlen, trace_cachefiles_vfs_error(NULL, d_inode(dentry), xlen,
cachefiles_trace_getxattr_error); cachefiles_trace_getxattr_error);
if (xlen == -EIO) if (xlen == -EIO)
......
...@@ -355,7 +355,11 @@ static inline void __d_clear_type_and_inode(struct dentry *dentry) ...@@ -355,7 +355,11 @@ static inline void __d_clear_type_and_inode(struct dentry *dentry)
flags &= ~DCACHE_ENTRY_TYPE; flags &= ~DCACHE_ENTRY_TYPE;
WRITE_ONCE(dentry->d_flags, flags); WRITE_ONCE(dentry->d_flags, flags);
dentry->d_inode = NULL; dentry->d_inode = NULL;
if (flags & DCACHE_LRU_LIST) /*
* The negative counter only tracks dentries on the LRU. Don't inc if
* d_lru is on another list.
*/
if ((flags & (DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
this_cpu_inc(nr_dentry_negative); this_cpu_inc(nr_dentry_negative);
} }
...@@ -1844,9 +1848,11 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode) ...@@ -1844,9 +1848,11 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
spin_lock(&dentry->d_lock); spin_lock(&dentry->d_lock);
/* /*
* Decrement negative dentry count if it was in the LRU list. * The negative counter only tracks dentries on the LRU. Don't dec if
* d_lru is on another list.
*/ */
if (dentry->d_flags & DCACHE_LRU_LIST) if ((dentry->d_flags &
(DCACHE_LRU_LIST|DCACHE_SHRINK_LIST)) == DCACHE_LRU_LIST)
this_cpu_dec(nr_dentry_negative); this_cpu_dec(nr_dentry_negative);
hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
raw_write_seqcount_begin(&dentry->d_seq); raw_write_seqcount_begin(&dentry->d_seq);
......
...@@ -696,7 +696,7 @@ ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size) ...@@ -696,7 +696,7 @@ ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
return err; return err;
} }
strbuf = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN + strbuf = kzalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN +
XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL); XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL);
if (!strbuf) { if (!strbuf) {
res = -ENOMEM; res = -ENOMEM;
......
...@@ -1367,9 +1367,9 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, ...@@ -1367,9 +1367,9 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
locks_wake_up_blocks(&left->c); locks_wake_up_blocks(&left->c);
} }
out: out:
trace_posix_lock_inode(inode, request, error);
spin_unlock(&ctx->flc_lock); spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem); percpu_up_read(&file_rwsem);
trace_posix_lock_inode(inode, request, error);
/* /*
* Free any unused locks. * Free any unused locks.
*/ */
......
...@@ -213,8 +213,7 @@ static int minix_rename(struct mnt_idmap *idmap, ...@@ -213,8 +213,7 @@ static int minix_rename(struct mnt_idmap *idmap,
if (!new_de) if (!new_de)
goto out_dir; goto out_dir;
err = minix_set_link(new_de, new_page, old_inode); err = minix_set_link(new_de, new_page, old_inode);
kunmap(new_page); unmap_and_put_page(new_page, new_de);
put_page(new_page);
if (err) if (err)
goto out_dir; goto out_dir;
inode_set_ctime_current(new_inode); inode_set_ctime_current(new_inode);
......
...@@ -117,7 +117,7 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) ...@@ -117,7 +117,7 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) { if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
if (folio->index == rreq->no_unlock_folio && if (folio->index == rreq->no_unlock_folio &&
test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags)) test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
_debug("no unlock"); kdebug("no unlock");
else else
folio_unlock(folio); folio_unlock(folio);
} }
...@@ -204,7 +204,7 @@ void netfs_readahead(struct readahead_control *ractl) ...@@ -204,7 +204,7 @@ void netfs_readahead(struct readahead_control *ractl)
struct netfs_inode *ctx = netfs_inode(ractl->mapping->host); struct netfs_inode *ctx = netfs_inode(ractl->mapping->host);
int ret; int ret;
_enter("%lx,%x", readahead_index(ractl), readahead_count(ractl)); kenter("%lx,%x", readahead_index(ractl), readahead_count(ractl));
if (readahead_count(ractl) == 0) if (readahead_count(ractl) == 0)
return; return;
...@@ -268,7 +268,7 @@ int netfs_read_folio(struct file *file, struct folio *folio) ...@@ -268,7 +268,7 @@ int netfs_read_folio(struct file *file, struct folio *folio)
struct folio *sink = NULL; struct folio *sink = NULL;
int ret; int ret;
_enter("%lx", folio->index); kenter("%lx", folio->index);
rreq = netfs_alloc_request(mapping, file, rreq = netfs_alloc_request(mapping, file,
folio_file_pos(folio), folio_size(folio), folio_file_pos(folio), folio_size(folio),
...@@ -508,7 +508,7 @@ int netfs_write_begin(struct netfs_inode *ctx, ...@@ -508,7 +508,7 @@ int netfs_write_begin(struct netfs_inode *ctx,
have_folio: have_folio:
*_folio = folio; *_folio = folio;
_leave(" = 0"); kleave(" = 0");
return 0; return 0;
error_put: error_put:
...@@ -518,7 +518,7 @@ int netfs_write_begin(struct netfs_inode *ctx, ...@@ -518,7 +518,7 @@ int netfs_write_begin(struct netfs_inode *ctx,
folio_unlock(folio); folio_unlock(folio);
folio_put(folio); folio_put(folio);
} }
_leave(" = %d", ret); kleave(" = %d", ret);
return ret; return ret;
} }
EXPORT_SYMBOL(netfs_write_begin); EXPORT_SYMBOL(netfs_write_begin);
...@@ -536,7 +536,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio, ...@@ -536,7 +536,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
size_t flen = folio_size(folio); size_t flen = folio_size(folio);
int ret; int ret;
_enter("%zx @%llx", flen, start); kenter("%zx @%llx", flen, start);
ret = -ENOMEM; ret = -ENOMEM;
...@@ -567,7 +567,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio, ...@@ -567,7 +567,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
error_put: error_put:
netfs_put_request(rreq, false, netfs_rreq_trace_put_discard); netfs_put_request(rreq, false, netfs_rreq_trace_put_discard);
error: error:
_leave(" = %d", ret); kleave(" = %d", ret);
return ret; return ret;
} }
......
...@@ -56,7 +56,7 @@ static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx, ...@@ -56,7 +56,7 @@ static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx,
struct netfs_group *group = netfs_folio_group(folio); struct netfs_group *group = netfs_folio_group(folio);
loff_t pos = folio_file_pos(folio); loff_t pos = folio_file_pos(folio);
_enter(""); kenter("");
if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE) if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE)
return NETFS_FLUSH_CONTENT; return NETFS_FLUSH_CONTENT;
...@@ -272,12 +272,12 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, ...@@ -272,12 +272,12 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
*/ */
howto = netfs_how_to_modify(ctx, file, folio, netfs_group, howto = netfs_how_to_modify(ctx, file, folio, netfs_group,
flen, offset, part, maybe_trouble); flen, offset, part, maybe_trouble);
_debug("howto %u", howto); kdebug("howto %u", howto);
switch (howto) { switch (howto) {
case NETFS_JUST_PREFETCH: case NETFS_JUST_PREFETCH:
ret = netfs_prefetch_for_write(file, folio, offset, part); ret = netfs_prefetch_for_write(file, folio, offset, part);
if (ret < 0) { if (ret < 0) {
_debug("prefetch = %zd", ret); kdebug("prefetch = %zd", ret);
goto error_folio_unlock; goto error_folio_unlock;
} }
break; break;
...@@ -418,7 +418,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, ...@@ -418,7 +418,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
} }
iocb->ki_pos += written; iocb->ki_pos += written;
_leave(" = %zd [%zd]", written, ret); kleave(" = %zd [%zd]", written, ret);
return written ? written : ret; return written ? written : ret;
error_folio_unlock: error_folio_unlock:
...@@ -491,7 +491,7 @@ ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) ...@@ -491,7 +491,7 @@ ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct netfs_inode *ictx = netfs_inode(inode); struct netfs_inode *ictx = netfs_inode(inode);
ssize_t ret; ssize_t ret;
_enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode)); kenter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
if (!iov_iter_count(from)) if (!iov_iter_count(from))
return 0; return 0;
...@@ -529,7 +529,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr ...@@ -529,7 +529,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
vm_fault_t ret = VM_FAULT_RETRY; vm_fault_t ret = VM_FAULT_RETRY;
int err; int err;
_enter("%lx", folio->index); kenter("%lx", folio->index);
sb_start_pagefault(inode->i_sb); sb_start_pagefault(inode->i_sb);
......
...@@ -33,7 +33,7 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i ...@@ -33,7 +33,7 @@ ssize_t netfs_unbuffered_read_iter_locked(struct kiocb *iocb, struct iov_iter *i
size_t orig_count = iov_iter_count(iter); size_t orig_count = iov_iter_count(iter);
bool async = !is_sync_kiocb(iocb); bool async = !is_sync_kiocb(iocb);
_enter(""); kenter("");
if (!orig_count) if (!orig_count)
return 0; /* Don't update atime */ return 0; /* Don't update atime */
......
...@@ -37,7 +37,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter * ...@@ -37,7 +37,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
size_t len = iov_iter_count(iter); size_t len = iov_iter_count(iter);
bool async = !is_sync_kiocb(iocb); bool async = !is_sync_kiocb(iocb);
_enter(""); kenter("");
/* We're going to need a bounce buffer if what we transmit is going to /* We're going to need a bounce buffer if what we transmit is going to
* be different in some way to the source buffer, e.g. because it gets * be different in some way to the source buffer, e.g. because it gets
...@@ -45,7 +45,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter * ...@@ -45,7 +45,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
*/ */
// TODO // TODO
_debug("uw %llx-%llx", start, end); kdebug("uw %llx-%llx", start, end);
wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, start, wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, start,
iocb->ki_flags & IOCB_DIRECT ? iocb->ki_flags & IOCB_DIRECT ?
...@@ -96,7 +96,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter * ...@@ -96,7 +96,7 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
wreq->cleanup = netfs_cleanup_dio_write; wreq->cleanup = netfs_cleanup_dio_write;
ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), wreq->len); ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), wreq->len);
if (ret < 0) { if (ret < 0) {
_debug("begin = %zd", ret); kdebug("begin = %zd", ret);
goto out; goto out;
} }
...@@ -143,7 +143,7 @@ ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from) ...@@ -143,7 +143,7 @@ ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from)
loff_t pos = iocb->ki_pos; loff_t pos = iocb->ki_pos;
unsigned long long end = pos + iov_iter_count(from) - 1; unsigned long long end = pos + iov_iter_count(from) - 1;
_enter("%llx,%zx,%llx", pos, iov_iter_count(from), i_size_read(inode)); kenter("%llx,%zx,%llx", pos, iov_iter_count(from), i_size_read(inode));
if (!iov_iter_count(from)) if (!iov_iter_count(from))
return 0; return 0;
......
...@@ -237,7 +237,7 @@ int fscache_add_cache(struct fscache_cache *cache, ...@@ -237,7 +237,7 @@ int fscache_add_cache(struct fscache_cache *cache,
{ {
int n_accesses; int n_accesses;
_enter("{%s,%s}", ops->name, cache->name); kenter("{%s,%s}", ops->name, cache->name);
BUG_ON(fscache_cache_state(cache) != FSCACHE_CACHE_IS_PREPARING); BUG_ON(fscache_cache_state(cache) != FSCACHE_CACHE_IS_PREPARING);
...@@ -257,7 +257,7 @@ int fscache_add_cache(struct fscache_cache *cache, ...@@ -257,7 +257,7 @@ int fscache_add_cache(struct fscache_cache *cache,
up_write(&fscache_addremove_sem); up_write(&fscache_addremove_sem);
pr_notice("Cache \"%s\" added (type %s)\n", cache->name, ops->name); pr_notice("Cache \"%s\" added (type %s)\n", cache->name, ops->name);
_leave(" = 0 [%s]", cache->name); kleave(" = 0 [%s]", cache->name);
return 0; return 0;
} }
EXPORT_SYMBOL(fscache_add_cache); EXPORT_SYMBOL(fscache_add_cache);
......
...@@ -456,7 +456,7 @@ struct fscache_cookie *__fscache_acquire_cookie( ...@@ -456,7 +456,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
{ {
struct fscache_cookie *cookie; struct fscache_cookie *cookie;
_enter("V=%x", volume->debug_id); kenter("V=%x", volume->debug_id);
if (!index_key || !index_key_len || index_key_len > 255 || aux_data_len > 255) if (!index_key || !index_key_len || index_key_len > 255 || aux_data_len > 255)
return NULL; return NULL;
...@@ -484,7 +484,7 @@ struct fscache_cookie *__fscache_acquire_cookie( ...@@ -484,7 +484,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
trace_fscache_acquire(cookie); trace_fscache_acquire(cookie);
fscache_stat(&fscache_n_acquires_ok); fscache_stat(&fscache_n_acquires_ok);
_leave(" = c=%08x", cookie->debug_id); kleave(" = c=%08x", cookie->debug_id);
return cookie; return cookie;
} }
EXPORT_SYMBOL(__fscache_acquire_cookie); EXPORT_SYMBOL(__fscache_acquire_cookie);
...@@ -505,7 +505,7 @@ static void fscache_perform_lookup(struct fscache_cookie *cookie) ...@@ -505,7 +505,7 @@ static void fscache_perform_lookup(struct fscache_cookie *cookie)
enum fscache_access_trace trace = fscache_access_lookup_cookie_end_failed; enum fscache_access_trace trace = fscache_access_lookup_cookie_end_failed;
bool need_withdraw = false; bool need_withdraw = false;
_enter(""); kenter("");
if (!cookie->volume->cache_priv) { if (!cookie->volume->cache_priv) {
fscache_create_volume(cookie->volume, true); fscache_create_volume(cookie->volume, true);
...@@ -519,7 +519,7 @@ static void fscache_perform_lookup(struct fscache_cookie *cookie) ...@@ -519,7 +519,7 @@ static void fscache_perform_lookup(struct fscache_cookie *cookie)
if (cookie->state != FSCACHE_COOKIE_STATE_FAILED) if (cookie->state != FSCACHE_COOKIE_STATE_FAILED)
fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT); fscache_set_cookie_state(cookie, FSCACHE_COOKIE_STATE_QUIESCENT);
need_withdraw = true; need_withdraw = true;
_leave(" [fail]"); kleave(" [fail]");
goto out; goto out;
} }
...@@ -572,7 +572,7 @@ void __fscache_use_cookie(struct fscache_cookie *cookie, bool will_modify) ...@@ -572,7 +572,7 @@ void __fscache_use_cookie(struct fscache_cookie *cookie, bool will_modify)
bool queue = false; bool queue = false;
int n_active; int n_active;
_enter("c=%08x", cookie->debug_id); kenter("c=%08x", cookie->debug_id);
if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags), if (WARN(test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
"Trying to use relinquished cookie\n")) "Trying to use relinquished cookie\n"))
...@@ -636,7 +636,7 @@ void __fscache_use_cookie(struct fscache_cookie *cookie, bool will_modify) ...@@ -636,7 +636,7 @@ void __fscache_use_cookie(struct fscache_cookie *cookie, bool will_modify)
spin_unlock(&cookie->lock); spin_unlock(&cookie->lock);
if (queue) if (queue)
fscache_queue_cookie(cookie, fscache_cookie_get_use_work); fscache_queue_cookie(cookie, fscache_cookie_get_use_work);
_leave(""); kleave("");
} }
EXPORT_SYMBOL(__fscache_use_cookie); EXPORT_SYMBOL(__fscache_use_cookie);
...@@ -702,7 +702,7 @@ static void fscache_cookie_state_machine(struct fscache_cookie *cookie) ...@@ -702,7 +702,7 @@ static void fscache_cookie_state_machine(struct fscache_cookie *cookie)
enum fscache_cookie_state state; enum fscache_cookie_state state;
bool wake = false; bool wake = false;
_enter("c=%x", cookie->debug_id); kenter("c=%x", cookie->debug_id);
again: again:
spin_lock(&cookie->lock); spin_lock(&cookie->lock);
...@@ -820,7 +820,7 @@ static void fscache_cookie_state_machine(struct fscache_cookie *cookie) ...@@ -820,7 +820,7 @@ static void fscache_cookie_state_machine(struct fscache_cookie *cookie)
spin_unlock(&cookie->lock); spin_unlock(&cookie->lock);
if (wake) if (wake)
wake_up_cookie_state(cookie); wake_up_cookie_state(cookie);
_leave(""); kleave("");
} }
static void fscache_cookie_worker(struct work_struct *work) static void fscache_cookie_worker(struct work_struct *work)
...@@ -867,7 +867,7 @@ static void fscache_cookie_lru_do_one(struct fscache_cookie *cookie) ...@@ -867,7 +867,7 @@ static void fscache_cookie_lru_do_one(struct fscache_cookie *cookie)
set_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags); set_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags);
spin_unlock(&cookie->lock); spin_unlock(&cookie->lock);
fscache_stat(&fscache_n_cookies_lru_expired); fscache_stat(&fscache_n_cookies_lru_expired);
_debug("lru c=%x", cookie->debug_id); kdebug("lru c=%x", cookie->debug_id);
__fscache_withdraw_cookie(cookie); __fscache_withdraw_cookie(cookie);
} }
...@@ -971,7 +971,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire) ...@@ -971,7 +971,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, bool retire)
if (retire) if (retire)
fscache_stat(&fscache_n_relinquishes_retire); fscache_stat(&fscache_n_relinquishes_retire);
_enter("c=%08x{%d},%d", kenter("c=%08x{%d},%d",
cookie->debug_id, atomic_read(&cookie->n_active), retire); cookie->debug_id, atomic_read(&cookie->n_active), retire);
if (WARN(test_and_set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags), if (WARN(test_and_set_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags),
...@@ -1050,7 +1050,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie, ...@@ -1050,7 +1050,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
{ {
bool is_caching; bool is_caching;
_enter("c=%x", cookie->debug_id); kenter("c=%x", cookie->debug_id);
fscache_stat(&fscache_n_invalidates); fscache_stat(&fscache_n_invalidates);
...@@ -1072,7 +1072,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie, ...@@ -1072,7 +1072,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
case FSCACHE_COOKIE_STATE_INVALIDATING: /* is_still_valid will catch it */ case FSCACHE_COOKIE_STATE_INVALIDATING: /* is_still_valid will catch it */
default: default:
spin_unlock(&cookie->lock); spin_unlock(&cookie->lock);
_leave(" [no %u]", cookie->state); kleave(" [no %u]", cookie->state);
return; return;
case FSCACHE_COOKIE_STATE_LOOKING_UP: case FSCACHE_COOKIE_STATE_LOOKING_UP:
...@@ -1081,7 +1081,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie, ...@@ -1081,7 +1081,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
fallthrough; fallthrough;
case FSCACHE_COOKIE_STATE_CREATING: case FSCACHE_COOKIE_STATE_CREATING:
spin_unlock(&cookie->lock); spin_unlock(&cookie->lock);
_leave(" [look %x]", cookie->inval_counter); kleave(" [look %x]", cookie->inval_counter);
return; return;
case FSCACHE_COOKIE_STATE_ACTIVE: case FSCACHE_COOKIE_STATE_ACTIVE:
...@@ -1094,7 +1094,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie, ...@@ -1094,7 +1094,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie,
if (is_caching) if (is_caching)
fscache_queue_cookie(cookie, fscache_cookie_get_inval_work); fscache_queue_cookie(cookie, fscache_cookie_get_inval_work);
_leave(" [inv]"); kleave(" [inv]");
return; return;
} }
} }
......
...@@ -28,12 +28,12 @@ bool fscache_wait_for_operation(struct netfs_cache_resources *cres, ...@@ -28,12 +28,12 @@ bool fscache_wait_for_operation(struct netfs_cache_resources *cres,
again: again:
if (!fscache_cache_is_live(cookie->volume->cache)) { if (!fscache_cache_is_live(cookie->volume->cache)) {
_leave(" [broken]"); kleave(" [broken]");
return false; return false;
} }
state = fscache_cookie_state(cookie); state = fscache_cookie_state(cookie);
_enter("c=%08x{%u},%x", cookie->debug_id, state, want_state); kenter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
switch (state) { switch (state) {
case FSCACHE_COOKIE_STATE_CREATING: case FSCACHE_COOKIE_STATE_CREATING:
...@@ -52,7 +52,7 @@ bool fscache_wait_for_operation(struct netfs_cache_resources *cres, ...@@ -52,7 +52,7 @@ bool fscache_wait_for_operation(struct netfs_cache_resources *cres,
case FSCACHE_COOKIE_STATE_DROPPED: case FSCACHE_COOKIE_STATE_DROPPED:
case FSCACHE_COOKIE_STATE_RELINQUISHING: case FSCACHE_COOKIE_STATE_RELINQUISHING:
default: default:
_leave(" [not live]"); kleave(" [not live]");
return false; return false;
} }
...@@ -92,7 +92,7 @@ static int fscache_begin_operation(struct netfs_cache_resources *cres, ...@@ -92,7 +92,7 @@ static int fscache_begin_operation(struct netfs_cache_resources *cres,
spin_lock(&cookie->lock); spin_lock(&cookie->lock);
state = fscache_cookie_state(cookie); state = fscache_cookie_state(cookie);
_enter("c=%08x{%u},%x", cookie->debug_id, state, want_state); kenter("c=%08x{%u},%x", cookie->debug_id, state, want_state);
switch (state) { switch (state) {
case FSCACHE_COOKIE_STATE_LOOKING_UP: case FSCACHE_COOKIE_STATE_LOOKING_UP:
...@@ -140,7 +140,7 @@ static int fscache_begin_operation(struct netfs_cache_resources *cres, ...@@ -140,7 +140,7 @@ static int fscache_begin_operation(struct netfs_cache_resources *cres,
cres->cache_priv = NULL; cres->cache_priv = NULL;
cres->ops = NULL; cres->ops = NULL;
fscache_end_cookie_access(cookie, fscache_access_io_not_live); fscache_end_cookie_access(cookie, fscache_access_io_not_live);
_leave(" = -ENOBUFS"); kleave(" = -ENOBUFS");
return -ENOBUFS; return -ENOBUFS;
} }
...@@ -224,7 +224,7 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie, ...@@ -224,7 +224,7 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie,
if (len == 0) if (len == 0)
goto abandon; goto abandon;
_enter("%llx,%zx", start, len); kenter("%llx,%zx", start, len);
wreq = kzalloc(sizeof(struct fscache_write_request), GFP_NOFS); wreq = kzalloc(sizeof(struct fscache_write_request), GFP_NOFS);
if (!wreq) if (!wreq)
......
...@@ -99,7 +99,7 @@ int __init fscache_init(void) ...@@ -99,7 +99,7 @@ int __init fscache_init(void)
*/ */
void __exit fscache_exit(void) void __exit fscache_exit(void)
{ {
_enter(""); kenter("");
kmem_cache_destroy(fscache_cookie_jar); kmem_cache_destroy(fscache_cookie_jar);
fscache_proc_cleanup(); fscache_proc_cleanup();
......
...@@ -27,6 +27,19 @@ struct fscache_volume *fscache_get_volume(struct fscache_volume *volume, ...@@ -27,6 +27,19 @@ struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
return volume; return volume;
} }
struct fscache_volume *fscache_try_get_volume(struct fscache_volume *volume,
enum fscache_volume_trace where)
{
int ref;
if (!__refcount_inc_not_zero(&volume->ref, &ref))
return NULL;
trace_fscache_volume(volume->debug_id, ref + 1, where);
return volume;
}
EXPORT_SYMBOL(fscache_try_get_volume);
static void fscache_see_volume(struct fscache_volume *volume, static void fscache_see_volume(struct fscache_volume *volume,
enum fscache_volume_trace where) enum fscache_volume_trace where)
{ {
...@@ -251,7 +264,7 @@ static struct fscache_volume *fscache_alloc_volume(const char *volume_key, ...@@ -251,7 +264,7 @@ static struct fscache_volume *fscache_alloc_volume(const char *volume_key,
fscache_see_volume(volume, fscache_volume_new_acquire); fscache_see_volume(volume, fscache_volume_new_acquire);
fscache_stat(&fscache_n_volumes); fscache_stat(&fscache_n_volumes);
up_write(&fscache_addremove_sem); up_write(&fscache_addremove_sem);
_leave(" = v=%x", volume->debug_id); kleave(" = v=%x", volume->debug_id);
return volume; return volume;
err_vol: err_vol:
...@@ -420,6 +433,7 @@ void fscache_put_volume(struct fscache_volume *volume, ...@@ -420,6 +433,7 @@ void fscache_put_volume(struct fscache_volume *volume,
fscache_free_volume(volume); fscache_free_volume(volume);
} }
} }
EXPORT_SYMBOL(fscache_put_volume);
/* /*
* Relinquish a volume representation cookie. * Relinquish a volume representation cookie.
...@@ -452,7 +466,7 @@ void fscache_withdraw_volume(struct fscache_volume *volume) ...@@ -452,7 +466,7 @@ void fscache_withdraw_volume(struct fscache_volume *volume)
{ {
int n_accesses; int n_accesses;
_debug("withdraw V=%x", volume->debug_id); kdebug("withdraw V=%x", volume->debug_id);
/* Allow wakeups on dec-to-0 */ /* Allow wakeups on dec-to-0 */
n_accesses = atomic_dec_return(&volume->n_accesses); n_accesses = atomic_dec_return(&volume->n_accesses);
......
...@@ -34,7 +34,6 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync); ...@@ -34,7 +34,6 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync);
/* /*
* main.c * main.c
*/ */
extern unsigned int netfs_debug;
extern struct list_head netfs_io_requests; extern struct list_head netfs_io_requests;
extern spinlock_t netfs_proc_lock; extern spinlock_t netfs_proc_lock;
extern mempool_t netfs_request_pool; extern mempool_t netfs_request_pool;
...@@ -344,8 +343,6 @@ extern const struct seq_operations fscache_volumes_seq_ops; ...@@ -344,8 +343,6 @@ extern const struct seq_operations fscache_volumes_seq_ops;
struct fscache_volume *fscache_get_volume(struct fscache_volume *volume, struct fscache_volume *fscache_get_volume(struct fscache_volume *volume,
enum fscache_volume_trace where); enum fscache_volume_trace where);
void fscache_put_volume(struct fscache_volume *volume,
enum fscache_volume_trace where);
bool fscache_begin_volume_access(struct fscache_volume *volume, bool fscache_begin_volume_access(struct fscache_volume *volume,
struct fscache_cookie *cookie, struct fscache_cookie *cookie,
enum fscache_access_trace why); enum fscache_access_trace why);
...@@ -356,42 +353,12 @@ void fscache_create_volume(struct fscache_volume *volume, bool wait); ...@@ -356,42 +353,12 @@ void fscache_create_volume(struct fscache_volume *volume, bool wait);
* debug tracing * debug tracing
*/ */
#define dbgprintk(FMT, ...) \ #define dbgprintk(FMT, ...) \
printk("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__) pr_debug("[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
#define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__) #define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
#define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__) #define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
#define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__) #define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
#ifdef __KDEBUG
#define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
#define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__)
#define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__)
#elif defined(CONFIG_NETFS_DEBUG)
#define _enter(FMT, ...) \
do { \
if (netfs_debug) \
kenter(FMT, ##__VA_ARGS__); \
} while (0)
#define _leave(FMT, ...) \
do { \
if (netfs_debug) \
kleave(FMT, ##__VA_ARGS__); \
} while (0)
#define _debug(FMT, ...) \
do { \
if (netfs_debug) \
kdebug(FMT, ##__VA_ARGS__); \
} while (0)
#else
#define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__)
#define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
#define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
#endif
/* /*
* assertions * assertions
*/ */
......
...@@ -130,7 +130,7 @@ static void netfs_reset_subreq_iter(struct netfs_io_request *rreq, ...@@ -130,7 +130,7 @@ static void netfs_reset_subreq_iter(struct netfs_io_request *rreq,
if (count == remaining) if (count == remaining)
return; return;
_debug("R=%08x[%u] ITER RESUB-MISMATCH %zx != %zx-%zx-%llx %x\n", kdebug("R=%08x[%u] ITER RESUB-MISMATCH %zx != %zx-%zx-%llx %x\n",
rreq->debug_id, subreq->debug_index, rreq->debug_id, subreq->debug_index,
iov_iter_count(&subreq->io_iter), subreq->transferred, iov_iter_count(&subreq->io_iter), subreq->transferred,
subreq->len, rreq->i_size, subreq->len, rreq->i_size,
...@@ -326,7 +326,7 @@ void netfs_subreq_terminated(struct netfs_io_subrequest *subreq, ...@@ -326,7 +326,7 @@ void netfs_subreq_terminated(struct netfs_io_subrequest *subreq,
struct netfs_io_request *rreq = subreq->rreq; struct netfs_io_request *rreq = subreq->rreq;
int u; int u;
_enter("R=%x[%x]{%llx,%lx},%zd", kenter("R=%x[%x]{%llx,%lx},%zd",
rreq->debug_id, subreq->debug_index, rreq->debug_id, subreq->debug_index,
subreq->start, subreq->flags, transferred_or_error); subreq->start, subreq->flags, transferred_or_error);
...@@ -435,7 +435,7 @@ netfs_rreq_prepare_read(struct netfs_io_request *rreq, ...@@ -435,7 +435,7 @@ netfs_rreq_prepare_read(struct netfs_io_request *rreq,
struct netfs_inode *ictx = netfs_inode(rreq->inode); struct netfs_inode *ictx = netfs_inode(rreq->inode);
size_t lsize; size_t lsize;
_enter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size); kenter("%llx-%llx,%llx", subreq->start, subreq->start + subreq->len, rreq->i_size);
if (rreq->origin != NETFS_DIO_READ) { if (rreq->origin != NETFS_DIO_READ) {
source = netfs_cache_prepare_read(subreq, rreq->i_size); source = netfs_cache_prepare_read(subreq, rreq->i_size);
...@@ -518,7 +518,7 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq, ...@@ -518,7 +518,7 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
subreq->start = rreq->start + rreq->submitted; subreq->start = rreq->start + rreq->submitted;
subreq->len = io_iter->count; subreq->len = io_iter->count;
_debug("slice %llx,%zx,%llx", subreq->start, subreq->len, rreq->submitted); kdebug("slice %llx,%zx,%llx", subreq->start, subreq->len, rreq->submitted);
list_add_tail(&subreq->rreq_link, &rreq->subrequests); list_add_tail(&subreq->rreq_link, &rreq->subrequests);
/* Call out to the cache to find out what it can do with the remaining /* Call out to the cache to find out what it can do with the remaining
...@@ -570,7 +570,7 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync) ...@@ -570,7 +570,7 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
struct iov_iter io_iter; struct iov_iter io_iter;
int ret; int ret;
_enter("R=%x %llx-%llx", kenter("R=%x %llx-%llx",
rreq->debug_id, rreq->start, rreq->start + rreq->len - 1); rreq->debug_id, rreq->start, rreq->start + rreq->len - 1);
if (rreq->len == 0) { if (rreq->len == 0) {
...@@ -593,7 +593,7 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync) ...@@ -593,7 +593,7 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
atomic_set(&rreq->nr_outstanding, 1); atomic_set(&rreq->nr_outstanding, 1);
io_iter = rreq->io_iter; io_iter = rreq->io_iter;
do { do {
_debug("submit %llx + %llx >= %llx", kdebug("submit %llx + %llx >= %llx",
rreq->start, rreq->submitted, rreq->i_size); rreq->start, rreq->submitted, rreq->i_size);
if (rreq->origin == NETFS_DIO_READ && if (rreq->origin == NETFS_DIO_READ &&
rreq->start + rreq->submitted >= rreq->i_size) rreq->start + rreq->submitted >= rreq->i_size)
......
...@@ -20,10 +20,6 @@ MODULE_LICENSE("GPL"); ...@@ -20,10 +20,6 @@ MODULE_LICENSE("GPL");
EXPORT_TRACEPOINT_SYMBOL(netfs_sreq); EXPORT_TRACEPOINT_SYMBOL(netfs_sreq);
unsigned netfs_debug;
module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
static struct kmem_cache *netfs_request_slab; static struct kmem_cache *netfs_request_slab;
static struct kmem_cache *netfs_subrequest_slab; static struct kmem_cache *netfs_subrequest_slab;
mempool_t netfs_request_pool; mempool_t netfs_request_pool;
......
...@@ -26,7 +26,7 @@ bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio) ...@@ -26,7 +26,7 @@ bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio)
struct fscache_cookie *cookie = netfs_i_cookie(ictx); struct fscache_cookie *cookie = netfs_i_cookie(ictx);
bool need_use = false; bool need_use = false;
_enter(""); kenter("");
if (!filemap_dirty_folio(mapping, folio)) if (!filemap_dirty_folio(mapping, folio))
return false; return false;
...@@ -99,7 +99,7 @@ void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length) ...@@ -99,7 +99,7 @@ void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
struct netfs_folio *finfo; struct netfs_folio *finfo;
size_t flen = folio_size(folio); size_t flen = folio_size(folio);
_enter("{%lx},%zx,%zx", folio->index, offset, length); kenter("{%lx},%zx,%zx", folio->index, offset, length);
if (!folio_test_private(folio)) if (!folio_test_private(folio))
return; return;
......
...@@ -161,7 +161,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq, ...@@ -161,7 +161,7 @@ static void netfs_retry_write_stream(struct netfs_io_request *wreq,
{ {
struct list_head *next; struct list_head *next;
_enter("R=%x[%x:]", wreq->debug_id, stream->stream_nr); kenter("R=%x[%x:]", wreq->debug_id, stream->stream_nr);
if (list_empty(&stream->subrequests)) if (list_empty(&stream->subrequests))
return; return;
...@@ -374,7 +374,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq) ...@@ -374,7 +374,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
unsigned int notes; unsigned int notes;
int s; int s;
_enter("%llx-%llx", wreq->start, wreq->start + wreq->len); kenter("%llx-%llx", wreq->start, wreq->start + wreq->len);
trace_netfs_collect(wreq); trace_netfs_collect(wreq);
trace_netfs_rreq(wreq, netfs_rreq_trace_collect); trace_netfs_rreq(wreq, netfs_rreq_trace_collect);
...@@ -409,7 +409,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq) ...@@ -409,7 +409,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
front = stream->front; front = stream->front;
while (front) { while (front) {
trace_netfs_collect_sreq(wreq, front); trace_netfs_collect_sreq(wreq, front);
//_debug("sreq [%x] %llx %zx/%zx", //kdebug("sreq [%x] %llx %zx/%zx",
// front->debug_index, front->start, front->transferred, front->len); // front->debug_index, front->start, front->transferred, front->len);
/* Stall if there may be a discontinuity. */ /* Stall if there may be a discontinuity. */
...@@ -598,7 +598,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq) ...@@ -598,7 +598,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
out: out:
netfs_put_group_many(wreq->group, wreq->nr_group_rel); netfs_put_group_many(wreq->group, wreq->nr_group_rel);
wreq->nr_group_rel = 0; wreq->nr_group_rel = 0;
_leave(" = %x", notes); kleave(" = %x", notes);
return; return;
need_retry: need_retry:
...@@ -606,7 +606,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq) ...@@ -606,7 +606,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
* that any partially completed op will have had any wholly transferred * that any partially completed op will have had any wholly transferred
* folios removed from it. * folios removed from it.
*/ */
_debug("retry"); kdebug("retry");
netfs_retry_writes(wreq); netfs_retry_writes(wreq);
goto out; goto out;
} }
...@@ -621,7 +621,7 @@ void netfs_write_collection_worker(struct work_struct *work) ...@@ -621,7 +621,7 @@ void netfs_write_collection_worker(struct work_struct *work)
size_t transferred; size_t transferred;
int s; int s;
_enter("R=%x", wreq->debug_id); kenter("R=%x", wreq->debug_id);
netfs_see_request(wreq, netfs_rreq_trace_see_work); netfs_see_request(wreq, netfs_rreq_trace_see_work);
if (!test_bit(NETFS_RREQ_IN_PROGRESS, &wreq->flags)) { if (!test_bit(NETFS_RREQ_IN_PROGRESS, &wreq->flags)) {
...@@ -684,7 +684,7 @@ void netfs_write_collection_worker(struct work_struct *work) ...@@ -684,7 +684,7 @@ void netfs_write_collection_worker(struct work_struct *work)
if (wreq->origin == NETFS_DIO_WRITE) if (wreq->origin == NETFS_DIO_WRITE)
inode_dio_end(wreq->inode); inode_dio_end(wreq->inode);
_debug("finished"); kdebug("finished");
trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip); trace_netfs_rreq(wreq, netfs_rreq_trace_wake_ip);
clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &wreq->flags); clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &wreq->flags);
wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS); wake_up_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS);
...@@ -744,7 +744,7 @@ void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error, ...@@ -744,7 +744,7 @@ void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
struct netfs_io_request *wreq = subreq->rreq; struct netfs_io_request *wreq = subreq->rreq;
struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr]; struct netfs_io_stream *stream = &wreq->io_streams[subreq->stream_nr];
_enter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error); kenter("%x[%x] %zd", wreq->debug_id, subreq->debug_index, transferred_or_error);
switch (subreq->source) { switch (subreq->source) {
case NETFS_UPLOAD_TO_SERVER: case NETFS_UPLOAD_TO_SERVER:
......
...@@ -99,7 +99,7 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping, ...@@ -99,7 +99,7 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
if (IS_ERR(wreq)) if (IS_ERR(wreq))
return wreq; return wreq;
_enter("R=%x", wreq->debug_id); kenter("R=%x", wreq->debug_id);
ictx = netfs_inode(wreq->inode); ictx = netfs_inode(wreq->inode);
if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &wreq->flags)) if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &wreq->flags))
...@@ -159,7 +159,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq, ...@@ -159,7 +159,7 @@ static void netfs_prepare_write(struct netfs_io_request *wreq,
subreq->max_nr_segs = INT_MAX; subreq->max_nr_segs = INT_MAX;
subreq->stream_nr = stream->stream_nr; subreq->stream_nr = stream->stream_nr;
_enter("R=%x[%x]", wreq->debug_id, subreq->debug_index); kenter("R=%x[%x]", wreq->debug_id, subreq->debug_index);
trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index, trace_netfs_sreq_ref(wreq->debug_id, subreq->debug_index,
refcount_read(&subreq->ref), refcount_read(&subreq->ref),
...@@ -215,7 +215,7 @@ static void netfs_do_issue_write(struct netfs_io_stream *stream, ...@@ -215,7 +215,7 @@ static void netfs_do_issue_write(struct netfs_io_stream *stream,
{ {
struct netfs_io_request *wreq = subreq->rreq; struct netfs_io_request *wreq = subreq->rreq;
_enter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len); kenter("R=%x[%x],%zx", wreq->debug_id, subreq->debug_index, subreq->len);
if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) if (test_bit(NETFS_SREQ_FAILED, &subreq->flags))
return netfs_write_subrequest_terminated(subreq, subreq->error, false); return netfs_write_subrequest_terminated(subreq, subreq->error, false);
...@@ -272,11 +272,11 @@ int netfs_advance_write(struct netfs_io_request *wreq, ...@@ -272,11 +272,11 @@ int netfs_advance_write(struct netfs_io_request *wreq,
size_t part; size_t part;
if (!stream->avail) { if (!stream->avail) {
_leave("no write"); kleave("no write");
return len; return len;
} }
_enter("R=%x[%x]", wreq->debug_id, subreq ? subreq->debug_index : 0); kenter("R=%x[%x]", wreq->debug_id, subreq ? subreq->debug_index : 0);
if (subreq && start != subreq->start + subreq->len) { if (subreq && start != subreq->start + subreq->len) {
netfs_issue_write(wreq, stream); netfs_issue_write(wreq, stream);
...@@ -288,7 +288,7 @@ int netfs_advance_write(struct netfs_io_request *wreq, ...@@ -288,7 +288,7 @@ int netfs_advance_write(struct netfs_io_request *wreq,
subreq = stream->construct; subreq = stream->construct;
part = min(subreq->max_len - subreq->len, len); part = min(subreq->max_len - subreq->len, len);
_debug("part %zx/%zx %zx/%zx", subreq->len, subreq->max_len, part, len); kdebug("part %zx/%zx %zx/%zx", subreq->len, subreq->max_len, part, len);
subreq->len += part; subreq->len += part;
subreq->nr_segs++; subreq->nr_segs++;
...@@ -319,7 +319,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq, ...@@ -319,7 +319,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
bool to_eof = false, streamw = false; bool to_eof = false, streamw = false;
bool debug = false; bool debug = false;
_enter(""); kenter("");
/* netfs_perform_write() may shift i_size around the page or from out /* netfs_perform_write() may shift i_size around the page or from out
* of the page to beyond it, but cannot move i_size into or through the * of the page to beyond it, but cannot move i_size into or through the
...@@ -329,7 +329,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq, ...@@ -329,7 +329,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
if (fpos >= i_size) { if (fpos >= i_size) {
/* mmap beyond eof. */ /* mmap beyond eof. */
_debug("beyond eof"); kdebug("beyond eof");
folio_start_writeback(folio); folio_start_writeback(folio);
folio_unlock(folio); folio_unlock(folio);
wreq->nr_group_rel += netfs_folio_written_back(folio); wreq->nr_group_rel += netfs_folio_written_back(folio);
...@@ -363,7 +363,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq, ...@@ -363,7 +363,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
} }
flen -= foff; flen -= foff;
_debug("folio %zx %zx %zx", foff, flen, fsize); kdebug("folio %zx %zx %zx", foff, flen, fsize);
/* Deal with discontinuities in the stream of dirty pages. These can /* Deal with discontinuities in the stream of dirty pages. These can
* arise from a number of sources: * arise from a number of sources:
...@@ -487,7 +487,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq, ...@@ -487,7 +487,7 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
for (int s = 0; s < NR_IO_STREAMS; s++) for (int s = 0; s < NR_IO_STREAMS; s++)
netfs_issue_write(wreq, &wreq->io_streams[s]); netfs_issue_write(wreq, &wreq->io_streams[s]);
_leave(" = 0"); kleave(" = 0");
return 0; return 0;
} }
...@@ -522,7 +522,7 @@ int netfs_writepages(struct address_space *mapping, ...@@ -522,7 +522,7 @@ int netfs_writepages(struct address_space *mapping,
netfs_stat(&netfs_n_wh_writepages); netfs_stat(&netfs_n_wh_writepages);
do { do {
_debug("wbiter %lx %llx", folio->index, wreq->start + wreq->submitted); kdebug("wbiter %lx %llx", folio->index, wreq->start + wreq->submitted);
/* It appears we don't have to handle cyclic writeback wrapping. */ /* It appears we don't have to handle cyclic writeback wrapping. */
WARN_ON_ONCE(wreq && folio_pos(folio) < wreq->start + wreq->submitted); WARN_ON_ONCE(wreq && folio_pos(folio) < wreq->start + wreq->submitted);
...@@ -546,14 +546,14 @@ int netfs_writepages(struct address_space *mapping, ...@@ -546,14 +546,14 @@ int netfs_writepages(struct address_space *mapping,
mutex_unlock(&ictx->wb_lock); mutex_unlock(&ictx->wb_lock);
netfs_put_request(wreq, false, netfs_rreq_trace_put_return); netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
_leave(" = %d", error); kleave(" = %d", error);
return error; return error;
couldnt_start: couldnt_start:
netfs_kill_dirty_pages(mapping, wbc, folio); netfs_kill_dirty_pages(mapping, wbc, folio);
out: out:
mutex_unlock(&ictx->wb_lock); mutex_unlock(&ictx->wb_lock);
_leave(" = %d", error); kleave(" = %d", error);
return error; return error;
} }
EXPORT_SYMBOL(netfs_writepages); EXPORT_SYMBOL(netfs_writepages);
...@@ -590,7 +590,7 @@ int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_c ...@@ -590,7 +590,7 @@ int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_c
struct folio *folio, size_t copied, bool to_page_end, struct folio *folio, size_t copied, bool to_page_end,
struct folio **writethrough_cache) struct folio **writethrough_cache)
{ {
_enter("R=%x ic=%zu ws=%u cp=%zu tp=%u", kenter("R=%x ic=%zu ws=%u cp=%zu tp=%u",
wreq->debug_id, wreq->iter.count, wreq->wsize, copied, to_page_end); wreq->debug_id, wreq->iter.count, wreq->wsize, copied, to_page_end);
if (!*writethrough_cache) { if (!*writethrough_cache) {
...@@ -624,7 +624,7 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_contr ...@@ -624,7 +624,7 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_contr
struct netfs_inode *ictx = netfs_inode(wreq->inode); struct netfs_inode *ictx = netfs_inode(wreq->inode);
int ret; int ret;
_enter("R=%x", wreq->debug_id); kenter("R=%x", wreq->debug_id);
if (writethrough_cache) if (writethrough_cache)
netfs_write_folio(wreq, wbc, writethrough_cache); netfs_write_folio(wreq, wbc, writethrough_cache);
...@@ -657,7 +657,7 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t ...@@ -657,7 +657,7 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
loff_t start = wreq->start; loff_t start = wreq->start;
int error = 0; int error = 0;
_enter("%zx", len); kenter("%zx", len);
if (wreq->origin == NETFS_DIO_WRITE) if (wreq->origin == NETFS_DIO_WRITE)
inode_dio_begin(wreq->inode); inode_dio_begin(wreq->inode);
...@@ -665,7 +665,7 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t ...@@ -665,7 +665,7 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
while (len) { while (len) {
// TODO: Prepare content encryption // TODO: Prepare content encryption
_debug("unbuffered %zx", len); kdebug("unbuffered %zx", len);
part = netfs_advance_write(wreq, upload, start, len, false); part = netfs_advance_write(wreq, upload, start, len, false);
start += part; start += part;
len -= part; len -= part;
...@@ -684,6 +684,6 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t ...@@ -684,6 +684,6 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
if (list_empty(&upload->subrequests)) if (list_empty(&upload->subrequests))
netfs_wake_write_collector(wreq, false); netfs_wake_write_collector(wreq, false);
_leave(" = %d", error); kleave(" = %d", error);
return error; return error;
} }
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
enum fscache_cache_trace; enum fscache_cache_trace;
enum fscache_cookie_trace; enum fscache_cookie_trace;
enum fscache_access_trace; enum fscache_access_trace;
enum fscache_volume_trace;
enum fscache_cache_state { enum fscache_cache_state {
FSCACHE_CACHE_IS_NOT_PRESENT, /* No cache is present for this name */ FSCACHE_CACHE_IS_NOT_PRESENT, /* No cache is present for this name */
...@@ -97,6 +98,11 @@ extern void fscache_withdraw_cookie(struct fscache_cookie *cookie); ...@@ -97,6 +98,11 @@ extern void fscache_withdraw_cookie(struct fscache_cookie *cookie);
extern void fscache_io_error(struct fscache_cache *cache); extern void fscache_io_error(struct fscache_cache *cache);
extern struct fscache_volume *
fscache_try_get_volume(struct fscache_volume *volume,
enum fscache_volume_trace where);
extern void fscache_put_volume(struct fscache_volume *volume,
enum fscache_volume_trace where);
extern void fscache_end_volume_access(struct fscache_volume *volume, extern void fscache_end_volume_access(struct fscache_volume *volume,
struct fscache_cookie *cookie, struct fscache_cookie *cookie,
enum fscache_access_trace why); enum fscache_access_trace why);
......
...@@ -35,12 +35,14 @@ enum fscache_volume_trace { ...@@ -35,12 +35,14 @@ enum fscache_volume_trace {
fscache_volume_get_cookie, fscache_volume_get_cookie,
fscache_volume_get_create_work, fscache_volume_get_create_work,
fscache_volume_get_hash_collision, fscache_volume_get_hash_collision,
fscache_volume_get_withdraw,
fscache_volume_free, fscache_volume_free,
fscache_volume_new_acquire, fscache_volume_new_acquire,
fscache_volume_put_cookie, fscache_volume_put_cookie,
fscache_volume_put_create_work, fscache_volume_put_create_work,
fscache_volume_put_hash_collision, fscache_volume_put_hash_collision,
fscache_volume_put_relinquish, fscache_volume_put_relinquish,
fscache_volume_put_withdraw,
fscache_volume_see_create_work, fscache_volume_see_create_work,
fscache_volume_see_hash_wake, fscache_volume_see_hash_wake,
fscache_volume_wait_create_work, fscache_volume_wait_create_work,
...@@ -120,12 +122,14 @@ enum fscache_access_trace { ...@@ -120,12 +122,14 @@ enum fscache_access_trace {
EM(fscache_volume_get_cookie, "GET cook ") \ EM(fscache_volume_get_cookie, "GET cook ") \
EM(fscache_volume_get_create_work, "GET creat") \ EM(fscache_volume_get_create_work, "GET creat") \
EM(fscache_volume_get_hash_collision, "GET hcoll") \ EM(fscache_volume_get_hash_collision, "GET hcoll") \
EM(fscache_volume_get_withdraw, "GET withd") \
EM(fscache_volume_free, "FREE ") \ EM(fscache_volume_free, "FREE ") \
EM(fscache_volume_new_acquire, "NEW acq ") \ EM(fscache_volume_new_acquire, "NEW acq ") \
EM(fscache_volume_put_cookie, "PUT cook ") \ EM(fscache_volume_put_cookie, "PUT cook ") \
EM(fscache_volume_put_create_work, "PUT creat") \ EM(fscache_volume_put_create_work, "PUT creat") \
EM(fscache_volume_put_hash_collision, "PUT hcoll") \ EM(fscache_volume_put_hash_collision, "PUT hcoll") \
EM(fscache_volume_put_relinquish, "PUT relnq") \ EM(fscache_volume_put_relinquish, "PUT relnq") \
EM(fscache_volume_put_withdraw, "PUT withd") \
EM(fscache_volume_see_create_work, "SEE creat") \ EM(fscache_volume_see_create_work, "SEE creat") \
EM(fscache_volume_see_hash_wake, "SEE hwake") \ EM(fscache_volume_see_hash_wake, "SEE hwake") \
E_(fscache_volume_wait_create_work, "WAIT crea") E_(fscache_volume_wait_create_work, "WAIT crea")
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment