Commit 5c61ef1b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'fscache-fixes-20180725' of...

Merge tag 'fscache-fixes-20180725' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs

Pull fscache/cachefiles fixes from David Howells:

 - Allow cancelled operations to be queued so they can be cleaned up.

 - Fix a refcounting bug in the monitoring of reads on backend files
   whereby a race can occur between monitor objects being listed for
   work, the work processing being queued and the work processor running
   and destroying the monitor objects.

 - Fix a ref overput in object attachment, whereby a tentatively
   considered object is put in error handling without first being 'got'.

 - Fix a missing clear of the CACHEFILES_OBJECT_ACTIVE flag whereby an
   assertion occurs when we retry because it seems the object is now
   active.

 - Wait rather BUG'ing on an object collision in the depths of
   cachefiles as the active object should be being cleaned up - also
   depends on the one above.

* tag 'fscache-fixes-20180725' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs:
  cachefiles: Wait rather than BUG'ing on "Unexpected object collision"
  cachefiles: Fix missing clear of the CACHEFILES_OBJECT_ACTIVE flag
  fscache: Fix reference overput in fscache_attach_object() error handling
  cachefiles: Fix refcounting bug in backing-file read monitoring
  fscache: Allow cancelled operations to be enqueued
parents 9981b4fb c2412ac4
...@@ -218,7 +218,8 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache) ...@@ -218,7 +218,8 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
"%s", "%s",
fsdef->dentry->d_sb->s_id); fsdef->dentry->d_sb->s_id);
fscache_object_init(&fsdef->fscache, NULL, &cache->cache); fscache_object_init(&fsdef->fscache, &fscache_fsdef_index,
&cache->cache);
ret = fscache_add_cache(&cache->cache, &fsdef->fscache, cache->tag); ret = fscache_add_cache(&cache->cache, &fsdef->fscache, cache->tag);
if (ret < 0) if (ret < 0)
......
...@@ -186,12 +186,12 @@ static int cachefiles_mark_object_active(struct cachefiles_cache *cache, ...@@ -186,12 +186,12 @@ static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
* need to wait for it to be destroyed */ * need to wait for it to be destroyed */
wait_for_old_object: wait_for_old_object:
trace_cachefiles_wait_active(object, dentry, xobject); trace_cachefiles_wait_active(object, dentry, xobject);
clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
if (fscache_object_is_live(&xobject->fscache)) { if (fscache_object_is_live(&xobject->fscache)) {
pr_err("\n"); pr_err("\n");
pr_err("Error: Unexpected object collision\n"); pr_err("Error: Unexpected object collision\n");
cachefiles_printk_object(object, xobject); cachefiles_printk_object(object, xobject);
BUG();
} }
atomic_inc(&xobject->usage); atomic_inc(&xobject->usage);
write_unlock(&cache->active_lock); write_unlock(&cache->active_lock);
...@@ -248,7 +248,6 @@ static int cachefiles_mark_object_active(struct cachefiles_cache *cache, ...@@ -248,7 +248,6 @@ static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
goto try_again; goto try_again;
requeue: requeue:
clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_timeo); cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_timeo);
_leave(" = -ETIMEDOUT"); _leave(" = -ETIMEDOUT");
return -ETIMEDOUT; return -ETIMEDOUT;
......
...@@ -27,6 +27,7 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode, ...@@ -27,6 +27,7 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
struct cachefiles_one_read *monitor = struct cachefiles_one_read *monitor =
container_of(wait, struct cachefiles_one_read, monitor); container_of(wait, struct cachefiles_one_read, monitor);
struct cachefiles_object *object; struct cachefiles_object *object;
struct fscache_retrieval *op = monitor->op;
struct wait_bit_key *key = _key; struct wait_bit_key *key = _key;
struct page *page = wait->private; struct page *page = wait->private;
...@@ -51,16 +52,22 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode, ...@@ -51,16 +52,22 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
list_del(&wait->entry); list_del(&wait->entry);
/* move onto the action list and queue for FS-Cache thread pool */ /* move onto the action list and queue for FS-Cache thread pool */
ASSERT(monitor->op); ASSERT(op);
object = container_of(monitor->op->op.object, /* We need to temporarily bump the usage count as we don't own a ref
struct cachefiles_object, fscache); * here otherwise cachefiles_read_copier() may free the op between the
* monitor being enqueued on the op->to_do list and the op getting
* enqueued on the work queue.
*/
fscache_get_retrieval(op);
object = container_of(op->op.object, struct cachefiles_object, fscache);
spin_lock(&object->work_lock); spin_lock(&object->work_lock);
list_add_tail(&monitor->op_link, &monitor->op->to_do); list_add_tail(&monitor->op_link, &op->to_do);
spin_unlock(&object->work_lock); spin_unlock(&object->work_lock);
fscache_enqueue_retrieval(monitor->op); fscache_enqueue_retrieval(op);
fscache_put_retrieval(op);
return 0; return 0;
} }
......
...@@ -220,6 +220,7 @@ int fscache_add_cache(struct fscache_cache *cache, ...@@ -220,6 +220,7 @@ int fscache_add_cache(struct fscache_cache *cache,
{ {
struct fscache_cache_tag *tag; struct fscache_cache_tag *tag;
ASSERTCMP(ifsdef->cookie, ==, &fscache_fsdef_index);
BUG_ON(!cache->ops); BUG_ON(!cache->ops);
BUG_ON(!ifsdef); BUG_ON(!ifsdef);
...@@ -248,7 +249,6 @@ int fscache_add_cache(struct fscache_cache *cache, ...@@ -248,7 +249,6 @@ int fscache_add_cache(struct fscache_cache *cache,
if (!cache->kobj) if (!cache->kobj)
goto error; goto error;
ifsdef->cookie = &fscache_fsdef_index;
ifsdef->cache = cache; ifsdef->cache = cache;
cache->fsdef = ifsdef; cache->fsdef = ifsdef;
......
...@@ -516,6 +516,7 @@ static int fscache_alloc_object(struct fscache_cache *cache, ...@@ -516,6 +516,7 @@ static int fscache_alloc_object(struct fscache_cache *cache,
goto error; goto error;
} }
ASSERTCMP(object->cookie, ==, cookie);
fscache_stat(&fscache_n_object_alloc); fscache_stat(&fscache_n_object_alloc);
object->debug_id = atomic_inc_return(&fscache_object_debug_id); object->debug_id = atomic_inc_return(&fscache_object_debug_id);
...@@ -571,6 +572,8 @@ static int fscache_attach_object(struct fscache_cookie *cookie, ...@@ -571,6 +572,8 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
_enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id); _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
ASSERTCMP(object->cookie, ==, cookie);
spin_lock(&cookie->lock); spin_lock(&cookie->lock);
/* there may be multiple initial creations of this object, but we only /* there may be multiple initial creations of this object, but we only
...@@ -610,9 +613,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie, ...@@ -610,9 +613,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
spin_unlock(&cache->object_list_lock); spin_unlock(&cache->object_list_lock);
} }
/* attach to the cookie */ /* Attach to the cookie. The object already has a ref on it. */
object->cookie = cookie;
fscache_cookie_get(cookie, fscache_cookie_get_attach_object);
hlist_add_head(&object->cookie_link, &cookie->backing_objects); hlist_add_head(&object->cookie_link, &cookie->backing_objects);
fscache_objlist_add(object); fscache_objlist_add(object);
......
...@@ -327,6 +327,7 @@ void fscache_object_init(struct fscache_object *object, ...@@ -327,6 +327,7 @@ void fscache_object_init(struct fscache_object *object,
object->store_limit_l = 0; object->store_limit_l = 0;
object->cache = cache; object->cache = cache;
object->cookie = cookie; object->cookie = cookie;
fscache_cookie_get(cookie, fscache_cookie_get_attach_object);
object->parent = NULL; object->parent = NULL;
#ifdef CONFIG_FSCACHE_OBJECT_LIST #ifdef CONFIG_FSCACHE_OBJECT_LIST
RB_CLEAR_NODE(&object->objlist_link); RB_CLEAR_NODE(&object->objlist_link);
......
...@@ -70,7 +70,8 @@ void fscache_enqueue_operation(struct fscache_operation *op) ...@@ -70,7 +70,8 @@ void fscache_enqueue_operation(struct fscache_operation *op)
ASSERT(op->processor != NULL); ASSERT(op->processor != NULL);
ASSERT(fscache_object_is_available(op->object)); ASSERT(fscache_object_is_available(op->object));
ASSERTCMP(atomic_read(&op->usage), >, 0); ASSERTCMP(atomic_read(&op->usage), >, 0);
ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS); ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS,
op->state, ==, FSCACHE_OP_ST_CANCELLED);
fscache_stat(&fscache_n_op_enqueue); fscache_stat(&fscache_n_op_enqueue);
switch (op->flags & FSCACHE_OP_TYPE) { switch (op->flags & FSCACHE_OP_TYPE) {
...@@ -499,7 +500,8 @@ void fscache_put_operation(struct fscache_operation *op) ...@@ -499,7 +500,8 @@ void fscache_put_operation(struct fscache_operation *op)
struct fscache_cache *cache; struct fscache_cache *cache;
_enter("{OBJ%x OP%x,%d}", _enter("{OBJ%x OP%x,%d}",
op->object->debug_id, op->debug_id, atomic_read(&op->usage)); op->object ? op->object->debug_id : 0,
op->debug_id, atomic_read(&op->usage));
ASSERTCMP(atomic_read(&op->usage), >, 0); ASSERTCMP(atomic_read(&op->usage), >, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment