Commit 0aa211e3 authored by NeilBrown's avatar NeilBrown Committed by Greg Kroah-Hartman

staging: lustre: libcfs: use a workqueue for rehash work.

lustre has a work-item queuing scheme that provides the
same functionality as linux work_queues.
To make the code easier for linux devs to follow, change
to use work_queues.
Signed-off-by: default avatarNeilBrown <neilb@suse.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent d487fe31
...@@ -126,7 +126,7 @@ extern struct miscdevice libcfs_dev; ...@@ -126,7 +126,7 @@ extern struct miscdevice libcfs_dev;
*/ */
extern char lnet_debug_log_upcall[1024]; extern char lnet_debug_log_upcall[1024];
extern struct cfs_wi_sched *cfs_sched_rehash; extern struct workqueue_struct *cfs_rehash_wq;
struct lnet_debugfs_symlink_def { struct lnet_debugfs_symlink_def {
char *name; char *name;
......
...@@ -248,7 +248,7 @@ struct cfs_hash { ...@@ -248,7 +248,7 @@ struct cfs_hash {
/** # of iterators (caller of cfs_hash_for_each_*) */ /** # of iterators (caller of cfs_hash_for_each_*) */
u32 hs_iterators; u32 hs_iterators;
/** rehash workitem */ /** rehash workitem */
struct cfs_workitem hs_rehash_wi; struct work_struct hs_rehash_work;
/** refcount on this hash table */ /** refcount on this hash table */
atomic_t hs_refcount; atomic_t hs_refcount;
/** rehash buckets-table */ /** rehash buckets-table */
...@@ -265,7 +265,7 @@ struct cfs_hash { ...@@ -265,7 +265,7 @@ struct cfs_hash {
/** bits when we found the max depth */ /** bits when we found the max depth */
unsigned int hs_dep_bits; unsigned int hs_dep_bits;
/** workitem to output max depth */ /** workitem to output max depth */
struct cfs_workitem hs_dep_wi; struct work_struct hs_dep_work;
#endif #endif
/** name of htable */ /** name of htable */
char hs_name[0]; char hs_name[0];
...@@ -738,7 +738,7 @@ u64 cfs_hash_size_get(struct cfs_hash *hs); ...@@ -738,7 +738,7 @@ u64 cfs_hash_size_get(struct cfs_hash *hs);
*/ */
void cfs_hash_rehash_cancel_locked(struct cfs_hash *hs); void cfs_hash_rehash_cancel_locked(struct cfs_hash *hs);
void cfs_hash_rehash_cancel(struct cfs_hash *hs); void cfs_hash_rehash_cancel(struct cfs_hash *hs);
int cfs_hash_rehash(struct cfs_hash *hs, int do_rehash); void cfs_hash_rehash(struct cfs_hash *hs, int do_rehash);
void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key, void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
void *new_key, struct hlist_node *hnode); void *new_key, struct hlist_node *hnode);
......
...@@ -114,7 +114,7 @@ module_param(warn_on_depth, uint, 0644); ...@@ -114,7 +114,7 @@ module_param(warn_on_depth, uint, 0644);
MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high."); MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
#endif #endif
struct cfs_wi_sched *cfs_sched_rehash; struct workqueue_struct *cfs_rehash_wq;
static inline void static inline void
cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {} cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
...@@ -519,7 +519,7 @@ cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur) ...@@ -519,7 +519,7 @@ cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
hs->hs_dep_bits = hs->hs_cur_bits; hs->hs_dep_bits = hs->hs_cur_bits;
spin_unlock(&hs->hs_dep_lock); spin_unlock(&hs->hs_dep_lock);
cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi); queue_work(cfs_rehash_wq, &hs->hs_dep_work);
# endif # endif
} }
...@@ -939,12 +939,12 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts, ...@@ -939,12 +939,12 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
* @flags - CFS_HASH_REHASH enable synamic hash resizing * @flags - CFS_HASH_REHASH enable synamic hash resizing
* - CFS_HASH_SORT enable chained hash sort * - CFS_HASH_SORT enable chained hash sort
*/ */
static int cfs_hash_rehash_worker(struct cfs_workitem *wi); static void cfs_hash_rehash_worker(struct work_struct *work);
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
static int cfs_hash_dep_print(struct cfs_workitem *wi) static void cfs_hash_dep_print(struct work_struct *work)
{ {
struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi); struct cfs_hash *hs = container_of(work, struct cfs_hash, hs_dep_work);
int dep; int dep;
int bkt; int bkt;
int off; int off;
...@@ -968,21 +968,12 @@ static int cfs_hash_dep_print(struct cfs_workitem *wi) ...@@ -968,21 +968,12 @@ static int cfs_hash_dep_print(struct cfs_workitem *wi)
static void cfs_hash_depth_wi_init(struct cfs_hash *hs) static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
{ {
spin_lock_init(&hs->hs_dep_lock); spin_lock_init(&hs->hs_dep_lock);
cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print); INIT_WORK(&hs->hs_dep_work, cfs_hash_dep_print);
} }
static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
{ {
if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi)) cancel_work_sync(&hs->hs_dep_work);
return;
spin_lock(&hs->hs_dep_lock);
while (hs->hs_dep_bits) {
spin_unlock(&hs->hs_dep_lock);
cond_resched();
spin_lock(&hs->hs_dep_lock);
}
spin_unlock(&hs->hs_dep_lock);
} }
#else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */ #else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
...@@ -1044,7 +1035,7 @@ cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits, ...@@ -1044,7 +1035,7 @@ cfs_hash_create(char *name, unsigned int cur_bits, unsigned int max_bits,
hs->hs_ops = ops; hs->hs_ops = ops;
hs->hs_extra_bytes = extra_bytes; hs->hs_extra_bytes = extra_bytes;
hs->hs_rehash_bits = 0; hs->hs_rehash_bits = 0;
cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker); INIT_WORK(&hs->hs_rehash_work, cfs_hash_rehash_worker);
cfs_hash_depth_wi_init(hs); cfs_hash_depth_wi_init(hs);
if (cfs_hash_with_rehash(hs)) if (cfs_hash_with_rehash(hs))
...@@ -1364,6 +1355,7 @@ cfs_hash_for_each_enter(struct cfs_hash *hs) ...@@ -1364,6 +1355,7 @@ cfs_hash_for_each_enter(struct cfs_hash *hs)
cfs_hash_lock(hs, 1); cfs_hash_lock(hs, 1);
hs->hs_iterators++; hs->hs_iterators++;
cfs_hash_unlock(hs, 1);
/* NB: iteration is mostly called by service thread, /* NB: iteration is mostly called by service thread,
* we tend to cancel pending rehash-request, instead of * we tend to cancel pending rehash-request, instead of
...@@ -1371,8 +1363,7 @@ cfs_hash_for_each_enter(struct cfs_hash *hs) ...@@ -1371,8 +1363,7 @@ cfs_hash_for_each_enter(struct cfs_hash *hs)
* after iteration * after iteration
*/ */
if (cfs_hash_is_rehashing(hs)) if (cfs_hash_is_rehashing(hs))
cfs_hash_rehash_cancel_locked(hs); cfs_hash_rehash_cancel(hs);
cfs_hash_unlock(hs, 1);
} }
static void static void
...@@ -1773,43 +1764,14 @@ EXPORT_SYMBOL(cfs_hash_for_each_key); ...@@ -1773,43 +1764,14 @@ EXPORT_SYMBOL(cfs_hash_for_each_key);
* this approach assumes a reasonably uniform hashing function. The * this approach assumes a reasonably uniform hashing function. The
* theta thresholds for @hs are tunable via cfs_hash_set_theta(). * theta thresholds for @hs are tunable via cfs_hash_set_theta().
*/ */
void
cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
{
int i;
/* need hold cfs_hash_lock(hs, 1) */
LASSERT(cfs_hash_with_rehash(hs) &&
!cfs_hash_with_no_lock(hs));
if (!cfs_hash_is_rehashing(hs))
return;
if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
hs->hs_rehash_bits = 0;
return;
}
for (i = 2; cfs_hash_is_rehashing(hs); i++) {
cfs_hash_unlock(hs, 1);
/* raise console warning while waiting too long */
CDEBUG(is_power_of_2(i >> 3) ? D_WARNING : D_INFO,
"hash %s is still rehashing, rescheded %d\n",
hs->hs_name, i - 1);
cond_resched();
cfs_hash_lock(hs, 1);
}
}
void void
cfs_hash_rehash_cancel(struct cfs_hash *hs) cfs_hash_rehash_cancel(struct cfs_hash *hs)
{ {
cfs_hash_lock(hs, 1); LASSERT(cfs_hash_with_rehash(hs));
cfs_hash_rehash_cancel_locked(hs); cancel_work_sync(&hs->hs_rehash_work);
cfs_hash_unlock(hs, 1);
} }
int void
cfs_hash_rehash(struct cfs_hash *hs, int do_rehash) cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
{ {
int rc; int rc;
...@@ -1821,21 +1783,21 @@ cfs_hash_rehash(struct cfs_hash *hs, int do_rehash) ...@@ -1821,21 +1783,21 @@ cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
rc = cfs_hash_rehash_bits(hs); rc = cfs_hash_rehash_bits(hs);
if (rc <= 0) { if (rc <= 0) {
cfs_hash_unlock(hs, 1); cfs_hash_unlock(hs, 1);
return rc; return;
} }
hs->hs_rehash_bits = rc; hs->hs_rehash_bits = rc;
if (!do_rehash) { if (!do_rehash) {
/* launch and return */ /* launch and return */
cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi); queue_work(cfs_rehash_wq, &hs->hs_rehash_work);
cfs_hash_unlock(hs, 1); cfs_hash_unlock(hs, 1);
return 0; return;
} }
/* rehash right now */ /* rehash right now */
cfs_hash_unlock(hs, 1); cfs_hash_unlock(hs, 1);
return cfs_hash_rehash_worker(&hs->hs_rehash_wi); cfs_hash_rehash_worker(&hs->hs_rehash_work);
} }
static int static int
...@@ -1869,10 +1831,10 @@ cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old) ...@@ -1869,10 +1831,10 @@ cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
return c; return c;
} }
static int static void
cfs_hash_rehash_worker(struct cfs_workitem *wi) cfs_hash_rehash_worker(struct work_struct *work)
{ {
struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi); struct cfs_hash *hs = container_of(work, struct cfs_hash, hs_rehash_work);
struct cfs_hash_bucket **bkts; struct cfs_hash_bucket **bkts;
struct cfs_hash_bd bd; struct cfs_hash_bd bd;
unsigned int old_size; unsigned int old_size;
...@@ -1956,8 +1918,6 @@ cfs_hash_rehash_worker(struct cfs_workitem *wi) ...@@ -1956,8 +1918,6 @@ cfs_hash_rehash_worker(struct cfs_workitem *wi)
hs->hs_cur_bits = hs->hs_rehash_bits; hs->hs_cur_bits = hs->hs_rehash_bits;
out: out:
hs->hs_rehash_bits = 0; hs->hs_rehash_bits = 0;
if (rc == -ESRCH) /* never be scheduled again */
cfs_wi_exit(cfs_sched_rehash, wi);
bsize = cfs_hash_bkt_size(hs); bsize = cfs_hash_bkt_size(hs);
cfs_hash_unlock(hs, 1); cfs_hash_unlock(hs, 1);
/* can't refer to @hs anymore because it could be destroyed */ /* can't refer to @hs anymore because it could be destroyed */
...@@ -1965,8 +1925,6 @@ cfs_hash_rehash_worker(struct cfs_workitem *wi) ...@@ -1965,8 +1925,6 @@ cfs_hash_rehash_worker(struct cfs_workitem *wi)
cfs_hash_buckets_free(bkts, bsize, new_size, old_size); cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
if (rc) if (rc)
CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc); CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
/* return 1 only if cfs_wi_exit is called */
return rc == -ESRCH;
} }
/** /**
......
...@@ -554,12 +554,10 @@ static int libcfs_init(void) ...@@ -554,12 +554,10 @@ static int libcfs_init(void)
goto cleanup_deregister; goto cleanup_deregister;
} }
/* max to 4 threads, should be enough for rehash */ cfs_rehash_wq = alloc_workqueue("cfs_rh", WQ_SYSFS, 4);
rc = min(cfs_cpt_weight(cfs_cpt_table, CFS_CPT_ANY), 4); if (!cfs_rehash_wq) {
rc = cfs_wi_sched_create("cfs_rh", cfs_cpt_table, CFS_CPT_ANY, CERROR("Failed to start rehash workqueue.\n");
rc, &cfs_sched_rehash); rc = -ENOMEM;
if (rc) {
CERROR("Startup workitem scheduler: error: %d\n", rc);
goto cleanup_deregister; goto cleanup_deregister;
} }
...@@ -590,9 +588,9 @@ static void libcfs_exit(void) ...@@ -590,9 +588,9 @@ static void libcfs_exit(void)
lustre_remove_debugfs(); lustre_remove_debugfs();
if (cfs_sched_rehash) { if (cfs_rehash_wq) {
cfs_wi_sched_destroy(cfs_sched_rehash); destroy_workqueue(cfs_rehash_wq);
cfs_sched_rehash = NULL; cfs_rehash_wq = NULL;
} }
cfs_crypto_unregister(); cfs_crypto_unregister();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment