Commit 1b02bde3 authored by Emoly Liu's avatar Emoly Liu Committed by Greg Kroah-Hartman

staging/lustre/llite: allocate and free client cache asynchronously

Since the inflight request holds import refcount as well as export,
sometimes obd_disconnect() in client_common_put_super() can't put
the last refcount of OSC import (e.g. due to network disconnection),
this will cause cl_cache being accessed after free.

To fix this issue, ccc_users is used as cl_cache refcount, and
lov/llite/osc all hold one cl_cache refcount respectively, to avoid
the race that a new OST is being added into the system when the client
is mounted.
The following cl_cache functions are added:
- cl_cache_init(): allocate and initialize cl_cache
- cl_cache_incref(): increase cl_cache refcount
- cl_cache_decref(): decrease cl_cache refcount and free the cache
  if refcount=0.
Signed-off-by: default avatarEmoly Liu <emoly.liu@intel.com>
Reviewed-on: http://review.whamcloud.com/13746
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-6173Reviewed-by: default avatarNiu Yawei <yawei.niu@intel.com>
Signed-off-by: default avatarOleg Drokin <green@linuxhacker.ru>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 341f1f0a
...@@ -2322,7 +2322,8 @@ void cl_lock_descr_print(const struct lu_env *env, void *cookie, ...@@ -2322,7 +2322,8 @@ void cl_lock_descr_print(const struct lu_env *env, void *cookie,
*/ */
struct cl_client_cache { struct cl_client_cache {
/** /**
* # of users (OSCs) * # of client cache refcount
* # of users (OSCs) + 2 (held by llite and lov)
*/ */
atomic_t ccc_users; atomic_t ccc_users;
/** /**
...@@ -2357,6 +2358,13 @@ struct cl_client_cache { ...@@ -2357,6 +2358,13 @@ struct cl_client_cache {
}; };
/**
* cl_cache functions
*/
struct cl_client_cache *cl_cache_init(unsigned long lru_page_max);
void cl_cache_incref(struct cl_client_cache *cache);
void cl_cache_decref(struct cl_client_cache *cache);
/** @} cl_page */ /** @} cl_page */
/** \defgroup cl_lock cl_lock /** \defgroup cl_lock cl_lock
......
...@@ -415,7 +415,7 @@ struct lov_obd { ...@@ -415,7 +415,7 @@ struct lov_obd {
enum lustre_sec_part lov_sp_me; enum lustre_sec_part lov_sp_me;
/* Cached LRU and unstable data from upper layer */ /* Cached LRU and unstable data from upper layer */
void *lov_cache; struct cl_client_cache *lov_cache;
struct rw_semaphore lov_notify_lock; struct rw_semaphore lov_notify_lock;
......
...@@ -452,7 +452,7 @@ struct ll_sb_info { ...@@ -452,7 +452,7 @@ struct ll_sb_info {
* any page which is sent to a server as part of a bulk request, * any page which is sent to a server as part of a bulk request,
* but is uncommitted to stable storage. * but is uncommitted to stable storage.
*/ */
struct cl_client_cache ll_cache; struct cl_client_cache *ll_cache;
struct lprocfs_stats *ll_ra_stats; struct lprocfs_stats *ll_ra_stats;
......
...@@ -83,15 +83,11 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb) ...@@ -83,15 +83,11 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
pages = si.totalram - si.totalhigh; pages = si.totalram - si.totalhigh;
lru_page_max = pages / 2; lru_page_max = pages / 2;
/* initialize ll_cache data */ sbi->ll_cache = cl_cache_init(lru_page_max);
atomic_set(&sbi->ll_cache.ccc_users, 0); if (!sbi->ll_cache) {
sbi->ll_cache.ccc_lru_max = lru_page_max; kfree(sbi);
atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max); return NULL;
spin_lock_init(&sbi->ll_cache.ccc_lru_lock); }
INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
atomic_set(&sbi->ll_cache.ccc_unstable_nr, 0);
init_waitqueue_head(&sbi->ll_cache.ccc_unstable_waitq);
sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32, sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
SBI_DEFAULT_READAHEAD_MAX); SBI_DEFAULT_READAHEAD_MAX);
...@@ -131,6 +127,11 @@ static void ll_free_sbi(struct super_block *sb) ...@@ -131,6 +127,11 @@ static void ll_free_sbi(struct super_block *sb)
{ {
struct ll_sb_info *sbi = ll_s2sbi(sb); struct ll_sb_info *sbi = ll_s2sbi(sb);
if (sbi->ll_cache) {
cl_cache_decref(sbi->ll_cache);
sbi->ll_cache = NULL;
}
kfree(sbi); kfree(sbi);
} }
...@@ -488,8 +489,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, ...@@ -488,8 +489,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
cl_sb_init(sb); cl_sb_init(sb);
err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET), err = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CACHE_SET),
KEY_CACHE_SET, sizeof(sbi->ll_cache), KEY_CACHE_SET, sizeof(*sbi->ll_cache),
&sbi->ll_cache, NULL); sbi->ll_cache, NULL);
sb->s_root = d_make_root(root); sb->s_root = d_make_root(root);
if (!sb->s_root) { if (!sb->s_root) {
...@@ -534,8 +535,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt, ...@@ -534,8 +535,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
out_dt: out_dt:
obd_disconnect(sbi->ll_dt_exp); obd_disconnect(sbi->ll_dt_exp);
sbi->ll_dt_exp = NULL; sbi->ll_dt_exp = NULL;
/* Make sure all OScs are gone, since cl_cache is accessing sbi. */
obd_zombie_barrier();
out_md_fid: out_md_fid:
obd_fid_fini(sbi->ll_md_exp->exp_obd); obd_fid_fini(sbi->ll_md_exp->exp_obd);
out_md: out_md:
...@@ -585,10 +584,6 @@ static void client_common_put_super(struct super_block *sb) ...@@ -585,10 +584,6 @@ static void client_common_put_super(struct super_block *sb)
obd_fid_fini(sbi->ll_dt_exp->exp_obd); obd_fid_fini(sbi->ll_dt_exp->exp_obd);
obd_disconnect(sbi->ll_dt_exp); obd_disconnect(sbi->ll_dt_exp);
sbi->ll_dt_exp = NULL; sbi->ll_dt_exp = NULL;
/* wait till all OSCs are gone, since cl_cache is accessing sbi.
* see LU-2543.
*/
obd_zombie_barrier();
ldebugfs_unregister_mountpoint(sbi); ldebugfs_unregister_mountpoint(sbi);
...@@ -921,12 +916,12 @@ void ll_put_super(struct super_block *sb) ...@@ -921,12 +916,12 @@ void ll_put_super(struct super_block *sb)
if (!force) { if (!force) {
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL); struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
rc = l_wait_event(sbi->ll_cache.ccc_unstable_waitq, rc = l_wait_event(sbi->ll_cache->ccc_unstable_waitq,
!atomic_read(&sbi->ll_cache.ccc_unstable_nr), !atomic_read(&sbi->ll_cache->ccc_unstable_nr),
&lwi); &lwi);
} }
ccc_count = atomic_read(&sbi->ll_cache.ccc_unstable_nr); ccc_count = atomic_read(&sbi->ll_cache->ccc_unstable_nr);
if (!force && rc != -EINTR) if (!force && rc != -EINTR)
LASSERTF(!ccc_count, "count: %i\n", ccc_count); LASSERTF(!ccc_count, "count: %i\n", ccc_count);
......
...@@ -356,7 +356,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v) ...@@ -356,7 +356,7 @@ static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
{ {
struct super_block *sb = m->private; struct super_block *sb = m->private;
struct ll_sb_info *sbi = ll_s2sbi(sb); struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = &sbi->ll_cache; struct cl_client_cache *cache = sbi->ll_cache;
int shift = 20 - PAGE_SHIFT; int shift = 20 - PAGE_SHIFT;
int max_cached_mb; int max_cached_mb;
int unused_mb; int unused_mb;
...@@ -383,7 +383,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file, ...@@ -383,7 +383,7 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
{ {
struct super_block *sb = ((struct seq_file *)file->private_data)->private; struct super_block *sb = ((struct seq_file *)file->private_data)->private;
struct ll_sb_info *sbi = ll_s2sbi(sb); struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = &sbi->ll_cache; struct cl_client_cache *cache = sbi->ll_cache;
struct lu_env *env; struct lu_env *env;
int refcheck; int refcheck;
int mult, rc, pages_number; int mult, rc, pages_number;
...@@ -822,7 +822,7 @@ static ssize_t unstable_stats_show(struct kobject *kobj, ...@@ -822,7 +822,7 @@ static ssize_t unstable_stats_show(struct kobject *kobj,
{ {
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info, struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kobj); ll_kobj);
struct cl_client_cache *cache = &sbi->ll_cache; struct cl_client_cache *cache = sbi->ll_cache;
int pages, mb; int pages, mb;
pages = atomic_read(&cache->ccc_unstable_nr); pages = atomic_read(&cache->ccc_unstable_nr);
......
...@@ -892,6 +892,12 @@ static int lov_cleanup(struct obd_device *obd) ...@@ -892,6 +892,12 @@ static int lov_cleanup(struct obd_device *obd)
kfree(lov->lov_tgts); kfree(lov->lov_tgts);
lov->lov_tgt_size = 0; lov->lov_tgt_size = 0;
} }
if (lov->lov_cache) {
cl_cache_decref(lov->lov_cache);
lov->lov_cache = NULL;
}
return 0; return 0;
} }
...@@ -2121,6 +2127,7 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp, ...@@ -2121,6 +2127,7 @@ static int lov_set_info_async(const struct lu_env *env, struct obd_export *exp,
LASSERT(!lov->lov_cache); LASSERT(!lov->lov_cache);
lov->lov_cache = val; lov->lov_cache = val;
do_inactive = 1; do_inactive = 1;
cl_cache_incref(lov->lov_cache);
} }
for (i = 0; i < count; i++, val = (char *)val + incr) { for (i = 0; i < count; i++, val = (char *)val + incr) {
......
...@@ -1072,3 +1072,49 @@ void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice, ...@@ -1072,3 +1072,49 @@ void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
slice->cpl_page = page; slice->cpl_page = page;
} }
EXPORT_SYMBOL(cl_page_slice_add); EXPORT_SYMBOL(cl_page_slice_add);
/**
* Allocate and initialize cl_cache, called by ll_init_sbi().
*/
struct cl_client_cache *cl_cache_init(unsigned long lru_page_max)
{
struct cl_client_cache *cache = NULL;
cache = kzalloc(sizeof(*cache), GFP_KERNEL);
if (!cache)
return NULL;
/* Initialize cache data */
atomic_set(&cache->ccc_users, 1);
cache->ccc_lru_max = lru_page_max;
atomic_set(&cache->ccc_lru_left, lru_page_max);
spin_lock_init(&cache->ccc_lru_lock);
INIT_LIST_HEAD(&cache->ccc_lru);
atomic_set(&cache->ccc_unstable_nr, 0);
init_waitqueue_head(&cache->ccc_unstable_waitq);
return cache;
}
EXPORT_SYMBOL(cl_cache_init);
/**
* Increase cl_cache refcount
*/
void cl_cache_incref(struct cl_client_cache *cache)
{
atomic_inc(&cache->ccc_users);
}
EXPORT_SYMBOL(cl_cache_incref);
/**
* Decrease cl_cache refcount and free the cache if refcount=0.
* Since llite, lov and osc all hold cl_cache refcount,
* the free will not cause race. (LU-6173)
*/
void cl_cache_decref(struct cl_client_cache *cache)
{
if (atomic_dec_and_test(&cache->ccc_users))
kfree(cache);
}
EXPORT_SYMBOL(cl_cache_decref);
...@@ -412,7 +412,7 @@ static int osc_cache_too_much(struct client_obd *cli) ...@@ -412,7 +412,7 @@ static int osc_cache_too_much(struct client_obd *cli)
int pages = atomic_read(&cli->cl_lru_in_list); int pages = atomic_read(&cli->cl_lru_in_list);
unsigned long budget; unsigned long budget;
budget = cache->ccc_lru_max / atomic_read(&cache->ccc_users); budget = cache->ccc_lru_max / (atomic_read(&cache->ccc_users) - 2);
/* if it's going to run out LRU slots, we should free some, but not /* if it's going to run out LRU slots, we should free some, but not
* too much to maintain fairness among OSCs. * too much to maintain fairness among OSCs.
...@@ -712,7 +712,7 @@ int osc_lru_reclaim(struct client_obd *cli) ...@@ -712,7 +712,7 @@ int osc_lru_reclaim(struct client_obd *cli)
cache->ccc_lru_shrinkers++; cache->ccc_lru_shrinkers++;
list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru); list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
max_scans = atomic_read(&cache->ccc_users); max_scans = atomic_read(&cache->ccc_users) - 2;
while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) { while (--max_scans > 0 && !list_empty(&cache->ccc_lru)) {
cli = list_entry(cache->ccc_lru.next, struct client_obd, cli = list_entry(cache->ccc_lru.next, struct client_obd,
cl_lru_osc); cl_lru_osc);
......
...@@ -2913,7 +2913,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp, ...@@ -2913,7 +2913,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
LASSERT(!cli->cl_cache); /* only once */ LASSERT(!cli->cl_cache); /* only once */
cli->cl_cache = val; cli->cl_cache = val;
atomic_inc(&cli->cl_cache->ccc_users); cl_cache_incref(cli->cl_cache);
cli->cl_lru_left = &cli->cl_cache->ccc_lru_left; cli->cl_lru_left = &cli->cl_cache->ccc_lru_left;
/* add this osc into entity list */ /* add this osc into entity list */
...@@ -3293,7 +3293,7 @@ static int osc_cleanup(struct obd_device *obd) ...@@ -3293,7 +3293,7 @@ static int osc_cleanup(struct obd_device *obd)
list_del_init(&cli->cl_lru_osc); list_del_init(&cli->cl_lru_osc);
spin_unlock(&cli->cl_cache->ccc_lru_lock); spin_unlock(&cli->cl_cache->ccc_lru_lock);
cli->cl_lru_left = NULL; cli->cl_lru_left = NULL;
atomic_dec(&cli->cl_cache->ccc_users); cl_cache_decref(cli->cl_cache);
cli->cl_cache = NULL; cli->cl_cache = NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment