Commit 5d145b1a authored by James Simmons's avatar James Simmons Committed by Greg Kroah-Hartman

staging: lustre: libcfs: remove cfs_workitem_t typedefs

Convert cfs_workitem_t to proper structure.
Signed-off-by: default avatarJames Simmons <uja.ornl@yahoo.com>
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-6245
Reviewed-on: http://review.whamcloud.com/17202Reviewed-by: default avatarJohn L. Hammond <john.hammond@intel.com>
Reviewed-by: default avatarDmitry Eremin <dmitry.eremin@intel.com>
Reviewed-by: default avatarOleg Drokin <oleg.drokin@intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent d47b7026
...@@ -245,7 +245,7 @@ struct cfs_hash { ...@@ -245,7 +245,7 @@ struct cfs_hash {
/** # of iterators (caller of cfs_hash_for_each_*) */ /** # of iterators (caller of cfs_hash_for_each_*) */
__u32 hs_iterators; __u32 hs_iterators;
/** rehash workitem */ /** rehash workitem */
cfs_workitem_t hs_rehash_wi; struct cfs_workitem hs_rehash_wi;
/** refcount on this hash table */ /** refcount on this hash table */
atomic_t hs_refcount; atomic_t hs_refcount;
/** rehash buckets-table */ /** rehash buckets-table */
...@@ -262,7 +262,7 @@ struct cfs_hash { ...@@ -262,7 +262,7 @@ struct cfs_hash {
/** bits when we found the max depth */ /** bits when we found the max depth */
unsigned int hs_dep_bits; unsigned int hs_dep_bits;
/** workitem to output max depth */ /** workitem to output max depth */
cfs_workitem_t hs_dep_wi; struct cfs_workitem hs_dep_wi;
#endif #endif
/** name of htable */ /** name of htable */
char hs_name[0]; char hs_name[0];
......
...@@ -73,7 +73,7 @@ int cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, int cpt, ...@@ -73,7 +73,7 @@ int cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, int cpt,
struct cfs_workitem; struct cfs_workitem;
typedef int (*cfs_wi_action_t) (struct cfs_workitem *); typedef int (*cfs_wi_action_t) (struct cfs_workitem *);
typedef struct cfs_workitem { struct cfs_workitem {
/** chain on runq or rerunq */ /** chain on runq or rerunq */
struct list_head wi_list; struct list_head wi_list;
/** working function */ /** working function */
...@@ -84,10 +84,10 @@ typedef struct cfs_workitem { ...@@ -84,10 +84,10 @@ typedef struct cfs_workitem {
unsigned short wi_running:1; unsigned short wi_running:1;
/** scheduled */ /** scheduled */
unsigned short wi_scheduled:1; unsigned short wi_scheduled:1;
} cfs_workitem_t; };
static inline void static inline void
cfs_wi_init(cfs_workitem_t *wi, void *data, cfs_wi_action_t action) cfs_wi_init(struct cfs_workitem *wi, void *data, cfs_wi_action_t action)
{ {
INIT_LIST_HEAD(&wi->wi_list); INIT_LIST_HEAD(&wi->wi_list);
...@@ -97,9 +97,9 @@ cfs_wi_init(cfs_workitem_t *wi, void *data, cfs_wi_action_t action) ...@@ -97,9 +97,9 @@ cfs_wi_init(cfs_workitem_t *wi, void *data, cfs_wi_action_t action)
wi->wi_action = action; wi->wi_action = action;
} }
void cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi); void cfs_wi_schedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi);
int cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi); int cfs_wi_deschedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi);
void cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi); void cfs_wi_exit(struct cfs_wi_sched *sched, struct cfs_workitem *wi);
int cfs_wi_startup(void); int cfs_wi_startup(void);
void cfs_wi_shutdown(void); void cfs_wi_shutdown(void);
......
...@@ -942,10 +942,10 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts, ...@@ -942,10 +942,10 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
* @flags - CFS_HASH_REHASH enable synamic hash resizing * @flags - CFS_HASH_REHASH enable synamic hash resizing
* - CFS_HASH_SORT enable chained hash sort * - CFS_HASH_SORT enable chained hash sort
*/ */
static int cfs_hash_rehash_worker(cfs_workitem_t *wi); static int cfs_hash_rehash_worker(struct cfs_workitem *wi);
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 #if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
static int cfs_hash_dep_print(cfs_workitem_t *wi) static int cfs_hash_dep_print(struct cfs_workitem *wi)
{ {
struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi); struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
int dep; int dep;
...@@ -1847,7 +1847,7 @@ cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old) ...@@ -1847,7 +1847,7 @@ cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
} }
static int static int
cfs_hash_rehash_worker(cfs_workitem_t *wi) cfs_hash_rehash_worker(struct cfs_workitem *wi)
{ {
struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi); struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi);
struct cfs_hash_bucket **bkts; struct cfs_hash_bucket **bkts;
......
...@@ -111,7 +111,7 @@ cfs_wi_sched_cansleep(struct cfs_wi_sched *sched) ...@@ -111,7 +111,7 @@ cfs_wi_sched_cansleep(struct cfs_wi_sched *sched)
* 1. when it returns no one shall try to schedule the workitem. * 1. when it returns no one shall try to schedule the workitem.
*/ */
void void
cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi) cfs_wi_exit(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
{ {
LASSERT(!in_interrupt()); /* because we use plain spinlock */ LASSERT(!in_interrupt()); /* because we use plain spinlock */
LASSERT(!sched->ws_stopping); LASSERT(!sched->ws_stopping);
...@@ -138,7 +138,7 @@ EXPORT_SYMBOL(cfs_wi_exit); ...@@ -138,7 +138,7 @@ EXPORT_SYMBOL(cfs_wi_exit);
* cancel schedule request of workitem \a wi * cancel schedule request of workitem \a wi
*/ */
int int
cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) cfs_wi_deschedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
{ {
int rc; int rc;
...@@ -179,7 +179,7 @@ EXPORT_SYMBOL(cfs_wi_deschedule); ...@@ -179,7 +179,7 @@ EXPORT_SYMBOL(cfs_wi_deschedule);
* be added, and even dynamic creation of serialised queues might be supported. * be added, and even dynamic creation of serialised queues might be supported.
*/ */
void void
cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi) cfs_wi_schedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
{ {
LASSERT(!in_interrupt()); /* because we use plain spinlock */ LASSERT(!in_interrupt()); /* because we use plain spinlock */
LASSERT(!sched->ws_stopping); LASSERT(!sched->ws_stopping);
...@@ -229,12 +229,12 @@ static int cfs_wi_scheduler(void *arg) ...@@ -229,12 +229,12 @@ static int cfs_wi_scheduler(void *arg)
while (!sched->ws_stopping) { while (!sched->ws_stopping) {
int nloops = 0; int nloops = 0;
int rc; int rc;
cfs_workitem_t *wi; struct cfs_workitem *wi;
while (!list_empty(&sched->ws_runq) && while (!list_empty(&sched->ws_runq) &&
nloops < CFS_WI_RESCHED) { nloops < CFS_WI_RESCHED) {
wi = list_entry(sched->ws_runq.next, cfs_workitem_t, wi = list_entry(sched->ws_runq.next,
wi_list); struct cfs_workitem, wi_list);
LASSERT(wi->wi_scheduled && !wi->wi_running); LASSERT(wi->wi_scheduled && !wi->wi_running);
list_del_init(&wi->wi_list); list_del_init(&wi->wi_list);
......
...@@ -176,7 +176,7 @@ typedef int (*swi_action_t) (struct swi_workitem *); ...@@ -176,7 +176,7 @@ typedef int (*swi_action_t) (struct swi_workitem *);
typedef struct swi_workitem { typedef struct swi_workitem {
struct cfs_wi_sched *swi_sched; struct cfs_wi_sched *swi_sched;
cfs_workitem_t swi_workitem; struct cfs_workitem swi_workitem;
swi_action_t swi_action; swi_action_t swi_action;
int swi_state; int swi_state;
} swi_workitem_t; } swi_workitem_t;
...@@ -461,7 +461,7 @@ srpc_serv_is_framework(struct srpc_service *svc) ...@@ -461,7 +461,7 @@ srpc_serv_is_framework(struct srpc_service *svc)
} }
static inline int static inline int
swi_wi_action(cfs_workitem_t *wi) swi_wi_action(struct cfs_workitem *wi)
{ {
swi_workitem_t *swi = container_of(wi, swi_workitem_t, swi_workitem); swi_workitem_t *swi = container_of(wi, swi_workitem_t, swi_workitem);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment