Commit 46a15bc3 authored by Lars Ellenberg's avatar Lars Ellenberg Committed by Philipp Reisner

lru_cache: allow multiple changes per transaction

Allow multiple changes to the active set of elements in lru_cache.
The only current user of lru_cache, drbd, is driving this generalisation.
Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
parent 45dfffeb
...@@ -175,7 +175,6 @@ static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr) ...@@ -175,7 +175,6 @@ static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
{ {
struct lc_element *al_ext; struct lc_element *al_ext;
struct lc_element *tmp; struct lc_element *tmp;
unsigned long al_flags = 0;
int wake; int wake;
spin_lock_irq(&mdev->al_lock); spin_lock_irq(&mdev->al_lock);
...@@ -191,18 +190,7 @@ static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr) ...@@ -191,18 +190,7 @@ static struct lc_element *_al_get(struct drbd_conf *mdev, unsigned int enr)
} }
} }
al_ext = lc_get(mdev->act_log, enr); al_ext = lc_get(mdev->act_log, enr);
al_flags = mdev->act_log->flags;
spin_unlock_irq(&mdev->al_lock); spin_unlock_irq(&mdev->al_lock);
/*
if (!al_ext) {
if (al_flags & LC_STARVING)
dev_warn(DEV, "Have to wait for LRU element (AL too small?)\n");
if (al_flags & LC_DIRTY)
dev_warn(DEV, "Ongoing AL update (AL device too slow?)\n");
}
*/
return al_ext; return al_ext;
} }
...@@ -235,7 +223,7 @@ void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector) ...@@ -235,7 +223,7 @@ void drbd_al_begin_io(struct drbd_conf *mdev, sector_t sector)
mdev->al_writ_cnt++; mdev->al_writ_cnt++;
spin_lock_irq(&mdev->al_lock); spin_lock_irq(&mdev->al_lock);
lc_changed(mdev->act_log, al_ext); lc_committed(mdev->act_log);
spin_unlock_irq(&mdev->al_lock); spin_unlock_irq(&mdev->al_lock);
wake_up(&mdev->al_wait); wake_up(&mdev->al_wait);
} }
...@@ -601,7 +589,7 @@ void drbd_al_shrink(struct drbd_conf *mdev) ...@@ -601,7 +589,7 @@ void drbd_al_shrink(struct drbd_conf *mdev)
struct lc_element *al_ext; struct lc_element *al_ext;
int i; int i;
D_ASSERT(test_bit(__LC_DIRTY, &mdev->act_log->flags)); D_ASSERT(test_bit(__LC_LOCKED, &mdev->act_log->flags));
for (i = 0; i < mdev->act_log->nr_elements; i++) { for (i = 0; i < mdev->act_log->nr_elements; i++) {
al_ext = lc_element_by_index(mdev->act_log, i); al_ext = lc_element_by_index(mdev->act_log, i);
...@@ -708,7 +696,9 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector, ...@@ -708,7 +696,9 @@ static void drbd_try_clear_on_disk_bm(struct drbd_conf *mdev, sector_t sector,
} }
ext->rs_left = rs_left; ext->rs_left = rs_left;
ext->rs_failed = success ? 0 : count; ext->rs_failed = success ? 0 : count;
lc_changed(mdev->resync, &ext->lce); /* we don't keep a persistent log of the resync lru,
* we can commit any change right away. */
lc_committed(mdev->resync);
} }
lc_put(mdev->resync, &ext->lce); lc_put(mdev->resync, &ext->lce);
/* no race, we are within the al_lock! */ /* no race, we are within the al_lock! */
...@@ -892,7 +882,7 @@ struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr) ...@@ -892,7 +882,7 @@ struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
if (bm_ext->lce.lc_number != enr) { if (bm_ext->lce.lc_number != enr) {
bm_ext->rs_left = drbd_bm_e_weight(mdev, enr); bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
bm_ext->rs_failed = 0; bm_ext->rs_failed = 0;
lc_changed(mdev->resync, &bm_ext->lce); lc_committed(mdev->resync);
wakeup = 1; wakeup = 1;
} }
if (bm_ext->lce.refcnt == 1) if (bm_ext->lce.refcnt == 1)
...@@ -908,7 +898,7 @@ struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr) ...@@ -908,7 +898,7 @@ struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
if (rs_flags & LC_STARVING) if (rs_flags & LC_STARVING)
dev_warn(DEV, "Have to wait for element" dev_warn(DEV, "Have to wait for element"
" (resync LRU too small?)\n"); " (resync LRU too small?)\n");
BUG_ON(rs_flags & LC_DIRTY); BUG_ON(rs_flags & LC_LOCKED);
} }
return bm_ext; return bm_ext;
...@@ -916,26 +906,12 @@ struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr) ...@@ -916,26 +906,12 @@ struct bm_extent *_bme_get(struct drbd_conf *mdev, unsigned int enr)
static int _is_in_al(struct drbd_conf *mdev, unsigned int enr) static int _is_in_al(struct drbd_conf *mdev, unsigned int enr)
{ {
struct lc_element *al_ext; int rv;
int rv = 0;
spin_lock_irq(&mdev->al_lock); spin_lock_irq(&mdev->al_lock);
if (unlikely(enr == mdev->act_log->new_number)) rv = lc_is_used(mdev->act_log, enr);
rv = 1;
else {
al_ext = lc_find(mdev->act_log, enr);
if (al_ext) {
if (al_ext->refcnt)
rv = 1;
}
}
spin_unlock_irq(&mdev->al_lock); spin_unlock_irq(&mdev->al_lock);
/*
if (unlikely(rv)) {
dev_info(DEV, "Delaying sync read until app's write is done\n");
}
*/
return rv; return rv;
} }
...@@ -1065,13 +1041,13 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector) ...@@ -1065,13 +1041,13 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
if (rs_flags & LC_STARVING) if (rs_flags & LC_STARVING)
dev_warn(DEV, "Have to wait for element" dev_warn(DEV, "Have to wait for element"
" (resync LRU too small?)\n"); " (resync LRU too small?)\n");
BUG_ON(rs_flags & LC_DIRTY); BUG_ON(rs_flags & LC_LOCKED);
goto try_again; goto try_again;
} }
if (bm_ext->lce.lc_number != enr) { if (bm_ext->lce.lc_number != enr) {
bm_ext->rs_left = drbd_bm_e_weight(mdev, enr); bm_ext->rs_left = drbd_bm_e_weight(mdev, enr);
bm_ext->rs_failed = 0; bm_ext->rs_failed = 0;
lc_changed(mdev->resync, &bm_ext->lce); lc_committed(mdev->resync);
wake_up(&mdev->al_wait); wake_up(&mdev->al_wait);
D_ASSERT(test_bit(BME_LOCKED, &bm_ext->flags) == 0); D_ASSERT(test_bit(BME_LOCKED, &bm_ext->flags) == 0);
} }
...@@ -1082,8 +1058,6 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector) ...@@ -1082,8 +1058,6 @@ int drbd_try_rs_begin_io(struct drbd_conf *mdev, sector_t sector)
} }
check_al: check_al:
for (i = 0; i < AL_EXT_PER_BM_SECT; i++) { for (i = 0; i < AL_EXT_PER_BM_SECT; i++) {
if (unlikely(al_enr+i == mdev->act_log->new_number))
goto try_again;
if (lc_is_used(mdev->act_log, al_enr+i)) if (lc_is_used(mdev->act_log, al_enr+i))
goto try_again; goto try_again;
} }
......
...@@ -760,7 +760,7 @@ static int drbd_check_al_size(struct drbd_conf *mdev) ...@@ -760,7 +760,7 @@ static int drbd_check_al_size(struct drbd_conf *mdev)
in_use = 0; in_use = 0;
t = mdev->act_log; t = mdev->act_log;
n = lc_create("act_log", drbd_al_ext_cache, n = lc_create("act_log", drbd_al_ext_cache, 1,
mdev->sync_conf.al_extents, sizeof(struct lc_element), 0); mdev->sync_conf.al_extents, sizeof(struct lc_element), 0);
if (n == NULL) { if (n == NULL) {
...@@ -1016,7 +1016,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp ...@@ -1016,7 +1016,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
} }
resync_lru = lc_create("resync", drbd_bm_ext_cache, resync_lru = lc_create("resync", drbd_bm_ext_cache,
61, sizeof(struct bm_extent), 1, 61, sizeof(struct bm_extent),
offsetof(struct bm_extent, lce)); offsetof(struct bm_extent, lce));
if (!resync_lru) { if (!resync_lru) {
retcode = ERR_NOMEM; retcode = ERR_NOMEM;
......
...@@ -166,9 +166,11 @@ struct lc_element { ...@@ -166,9 +166,11 @@ struct lc_element {
/* if we want to track a larger set of objects, /* if we want to track a larger set of objects,
* it needs to become arch independend u64 */ * it needs to become arch independend u64 */
unsigned lc_number; unsigned lc_number;
/* special label when on free list */ /* special label when on free list */
#define LC_FREE (~0U) #define LC_FREE (~0U)
/* for pending changes */
unsigned lc_new_number;
}; };
struct lru_cache { struct lru_cache {
...@@ -176,6 +178,7 @@ struct lru_cache { ...@@ -176,6 +178,7 @@ struct lru_cache {
struct list_head lru; struct list_head lru;
struct list_head free; struct list_head free;
struct list_head in_use; struct list_head in_use;
struct list_head to_be_changed;
/* the pre-created kmem cache to allocate the objects from */ /* the pre-created kmem cache to allocate the objects from */
struct kmem_cache *lc_cache; struct kmem_cache *lc_cache;
...@@ -194,18 +197,19 @@ struct lru_cache { ...@@ -194,18 +197,19 @@ struct lru_cache {
* 8 high bits of .lc_index to be overloaded with flags in the future. */ * 8 high bits of .lc_index to be overloaded with flags in the future. */
#define LC_MAX_ACTIVE (1<<24) #define LC_MAX_ACTIVE (1<<24)
/* allow to accumulate a few (index:label) changes,
* but no more than max_pending_changes */
unsigned int max_pending_changes;
/* number of elements currently on to_be_changed list */
unsigned int pending_changes;
/* statistics */ /* statistics */
unsigned used; /* number of lelements currently on in_use list */ unsigned used; /* number of elements currently on in_use list */
unsigned long hits, misses, starving, dirty, changed; unsigned long hits, misses, starving, locked, changed;
/* see below: flag-bits for lru_cache */ /* see below: flag-bits for lru_cache */
unsigned long flags; unsigned long flags;
/* when changing the label of an index element */
unsigned int new_number;
/* for paranoia when changing the label of an index element */
struct lc_element *changing_element;
void *lc_private; void *lc_private;
const char *name; const char *name;
...@@ -221,10 +225,15 @@ enum { ...@@ -221,10 +225,15 @@ enum {
/* debugging aid, to catch concurrent access early. /* debugging aid, to catch concurrent access early.
* user needs to guarantee exclusive access by proper locking! */ * user needs to guarantee exclusive access by proper locking! */
__LC_PARANOIA, __LC_PARANOIA,
/* if we need to change the set, but currently there is a changing
* transaction pending, we are "dirty", and must deferr further /* annotate that the set is "dirty", possibly accumulating further
* changing requests */ * changes, until a transaction is finally triggered */
__LC_DIRTY, __LC_DIRTY,
/* Locked, no further changes allowed.
* Also used to serialize changing transactions. */
__LC_LOCKED,
/* if we need to change the set, but currently there is no free nor /* if we need to change the set, but currently there is no free nor
* unused element available, we are "starving", and must not give out * unused element available, we are "starving", and must not give out
* further references, to guarantee that eventually some refcnt will * further references, to guarantee that eventually some refcnt will
...@@ -236,9 +245,11 @@ enum { ...@@ -236,9 +245,11 @@ enum {
}; };
#define LC_PARANOIA (1<<__LC_PARANOIA) #define LC_PARANOIA (1<<__LC_PARANOIA)
#define LC_DIRTY (1<<__LC_DIRTY) #define LC_DIRTY (1<<__LC_DIRTY)
#define LC_LOCKED (1<<__LC_LOCKED)
#define LC_STARVING (1<<__LC_STARVING) #define LC_STARVING (1<<__LC_STARVING)
extern struct lru_cache *lc_create(const char *name, struct kmem_cache *cache, extern struct lru_cache *lc_create(const char *name, struct kmem_cache *cache,
unsigned max_pending_changes,
unsigned e_count, size_t e_size, size_t e_off); unsigned e_count, size_t e_size, size_t e_off);
extern void lc_reset(struct lru_cache *lc); extern void lc_reset(struct lru_cache *lc);
extern void lc_destroy(struct lru_cache *lc); extern void lc_destroy(struct lru_cache *lc);
...@@ -249,7 +260,7 @@ extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr); ...@@ -249,7 +260,7 @@ extern struct lc_element *lc_try_get(struct lru_cache *lc, unsigned int enr);
extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr); extern struct lc_element *lc_find(struct lru_cache *lc, unsigned int enr);
extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr); extern struct lc_element *lc_get(struct lru_cache *lc, unsigned int enr);
extern unsigned int lc_put(struct lru_cache *lc, struct lc_element *e); extern unsigned int lc_put(struct lru_cache *lc, struct lc_element *e);
extern void lc_changed(struct lru_cache *lc, struct lc_element *e); extern void lc_committed(struct lru_cache *lc);
struct seq_file; struct seq_file;
extern size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc); extern size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc);
...@@ -258,31 +269,40 @@ extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char ...@@ -258,31 +269,40 @@ extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char
void (*detail) (struct seq_file *, struct lc_element *)); void (*detail) (struct seq_file *, struct lc_element *));
/** /**
* lc_try_lock - can be used to stop lc_get() from changing the tracked set * lc_try_lock_for_transaction - can be used to stop lc_get() from changing the tracked set
* @lc: the lru cache to operate on * @lc: the lru cache to operate on
* *
* Note that the reference counts and order on the active and lru lists may * Allows (expects) the set to be "dirty". Note that the reference counts and
* still change. Returns true if we acquired the lock. * order on the active and lru lists may still change. Used to serialize
* changing transactions. Returns true if we aquired the lock.
*/ */
static inline int lc_try_lock(struct lru_cache *lc) static inline int lc_try_lock_for_transaction(struct lru_cache *lc)
{ {
return !test_and_set_bit(__LC_DIRTY, &lc->flags); return !test_and_set_bit(__LC_LOCKED, &lc->flags);
} }
/**
* lc_try_lock - variant to stop lc_get() from changing the tracked set
* @lc: the lru cache to operate on
*
* Note that the reference counts and order on the active and lru lists may
* still change. Only works on a "clean" set. Returns true if we aquired the
* lock, which means there are no pending changes, and any further attempt to
* change the set will not succeed until the next lc_unlock().
*/
extern int lc_try_lock(struct lru_cache *lc);
/** /**
* lc_unlock - unlock @lc, allow lc_get() to change the set again * lc_unlock - unlock @lc, allow lc_get() to change the set again
* @lc: the lru cache to operate on * @lc: the lru cache to operate on
*/ */
static inline void lc_unlock(struct lru_cache *lc) static inline void lc_unlock(struct lru_cache *lc)
{ {
clear_bit_unlock(__LC_DIRTY, &lc->flags); clear_bit(__LC_DIRTY, &lc->flags);
clear_bit_unlock(__LC_LOCKED, &lc->flags);
} }
static inline int lc_is_used(struct lru_cache *lc, unsigned int enr) extern bool lc_is_used(struct lru_cache *lc, unsigned int enr);
{
struct lc_element *e = lc_find(lc, enr);
return e && e->refcnt;
}
#define lc_entry(ptr, type, member) \ #define lc_entry(ptr, type, member) \
container_of(ptr, type, member) container_of(ptr, type, member)
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment