Commit 855cdd2c authored by Matias Bjørling's avatar Matias Bjørling Committed by Jens Axboe

lightnvm: make rrpc_map_page call nvm_get_blk outside locks

The nvm_get_blk() function is called with rlun->lock held. This is ok
when the media manager implementation doesn't go out of its atomic
context. However, if a media manager persists its metadata, and
guarantees that the block is given to the target, this is no longer
a viable approach. Therefore, clean up the flow of rrpc_map_page,
and make sure that nvm_get_blk() is called without any locks acquired.
Signed-off-by: default avatarMatias Bjørling <m@bjorling.me>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 41285fad
...@@ -175,18 +175,17 @@ static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr) ...@@ -175,18 +175,17 @@ static struct ppa_addr rrpc_ppa_to_gaddr(struct nvm_dev *dev, u64 addr)
} }
/* requires lun->lock taken */ /* requires lun->lock taken */
static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *rblk) static void rrpc_set_lun_cur(struct rrpc_lun *rlun, struct rrpc_block *new_rblk,
struct rrpc_block **cur_rblk)
{ {
struct rrpc *rrpc = rlun->rrpc; struct rrpc *rrpc = rlun->rrpc;
BUG_ON(!rblk); if (*cur_rblk) {
spin_lock(&(*cur_rblk)->lock);
if (rlun->cur) { WARN_ON(!block_is_full(rrpc, *cur_rblk));
spin_lock(&rlun->cur->lock); spin_unlock(&(*cur_rblk)->lock);
WARN_ON(!block_is_full(rrpc, rlun->cur));
spin_unlock(&rlun->cur->lock);
} }
rlun->cur = rblk; *cur_rblk = new_rblk;
} }
static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun, static struct rrpc_block *rrpc_get_blk(struct rrpc *rrpc, struct rrpc_lun *rlun,
...@@ -577,21 +576,20 @@ static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk) ...@@ -577,21 +576,20 @@ static u64 rrpc_alloc_addr(struct rrpc *rrpc, struct rrpc_block *rblk)
return addr; return addr;
} }
/* Simple round-robin Logical to physical address translation. /* Map logical address to a physical page. The mapping implements a round robin
* * approach and allocates a page from the next lun available.
* Retrieve the mapping using the active append point. Then update the ap for
* the next write to the disk.
* *
* Returns rrpc_addr with the physical address and block. Remember to return to * Returns rrpc_addr with the physical address and block. Returns NULL if no
* rrpc->addr_cache when request is finished. * blocks in the next rlun are available.
*/ */
static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr, static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
int is_gc) int is_gc)
{ {
struct rrpc_lun *rlun; struct rrpc_lun *rlun;
struct rrpc_block *rblk; struct rrpc_block *rblk, **cur_rblk;
struct nvm_lun *lun; struct nvm_lun *lun;
u64 paddr; u64 paddr;
int gc_force = 0;
rlun = rrpc_get_lun_rr(rrpc, is_gc); rlun = rrpc_get_lun_rr(rrpc, is_gc);
lun = rlun->parent; lun = rlun->parent;
...@@ -599,41 +597,65 @@ static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr, ...@@ -599,41 +597,65 @@ static struct rrpc_addr *rrpc_map_page(struct rrpc *rrpc, sector_t laddr,
if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4) if (!is_gc && lun->nr_free_blocks < rrpc->nr_luns * 4)
return NULL; return NULL;
spin_lock(&rlun->lock); /*
* page allocation steps:
* 1. Try to allocate new page from current rblk
* 2a. If succeed, proceed to map it in and return
* 2b. If fail, first try to allocate a new block from media manger,
* and then retry step 1. Retry until the normal block pool is
* exhausted.
* 3. If exhausted, and garbage collector is requesting the block,
* go to the reserved block and retry step 1.
* In the case that this fails as well, or it is not GC
* requesting, report not able to retrieve a block and let the
* caller handle further processing.
*/
spin_lock(&rlun->lock);
cur_rblk = &rlun->cur;
rblk = rlun->cur; rblk = rlun->cur;
retry: retry:
paddr = rrpc_alloc_addr(rrpc, rblk); paddr = rrpc_alloc_addr(rrpc, rblk);
if (paddr == ADDR_EMPTY) { if (paddr != ADDR_EMPTY)
rblk = rrpc_get_blk(rrpc, rlun, 0); goto done;
if (rblk) {
rrpc_set_lun_cur(rlun, rblk);
goto retry;
}
if (is_gc) { if (!list_empty(&rlun->wblk_list)) {
/* retry from emergency gc block */ new_blk:
paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur); rblk = list_first_entry(&rlun->wblk_list, struct rrpc_block,
if (paddr == ADDR_EMPTY) { prio);
rblk = rrpc_get_blk(rrpc, rlun, 1); rrpc_set_lun_cur(rlun, rblk, cur_rblk);
if (!rblk) { list_del(&rblk->prio);
pr_err("rrpc: no more blocks"); goto retry;
goto err; }
} spin_unlock(&rlun->lock);
rlun->gc_cur = rblk; rblk = rrpc_get_blk(rrpc, rlun, gc_force);
paddr = rrpc_alloc_addr(rrpc, rlun->gc_cur); if (rblk) {
} spin_lock(&rlun->lock);
rblk = rlun->gc_cur; list_add_tail(&rblk->prio, &rlun->wblk_list);
} /*
* another thread might already have added a new block,
* Therefore, make sure that one is used, instead of the
* one just added.
*/
goto new_blk;
}
if (unlikely(is_gc) && !gc_force) {
/* retry from emergency gc block */
cur_rblk = &rlun->gc_cur;
rblk = rlun->gc_cur;
gc_force = 1;
spin_lock(&rlun->lock);
goto retry;
} }
pr_err("rrpc: failed to allocate new block\n");
return NULL;
done:
spin_unlock(&rlun->lock); spin_unlock(&rlun->lock);
return rrpc_update_map(rrpc, laddr, rblk, paddr); return rrpc_update_map(rrpc, laddr, rblk, paddr);
err:
spin_unlock(&rlun->lock);
return NULL;
} }
static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk) static void rrpc_run_gc(struct rrpc *rrpc, struct rrpc_block *rblk)
...@@ -1177,6 +1199,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end) ...@@ -1177,6 +1199,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
rlun->rrpc = rrpc; rlun->rrpc = rrpc;
INIT_LIST_HEAD(&rlun->prio_list); INIT_LIST_HEAD(&rlun->prio_list);
INIT_LIST_HEAD(&rlun->wblk_list);
INIT_WORK(&rlun->ws_gc, rrpc_lun_gc); INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
spin_lock_init(&rlun->lock); spin_lock_init(&rlun->lock);
...@@ -1317,14 +1340,13 @@ static int rrpc_luns_configure(struct rrpc *rrpc) ...@@ -1317,14 +1340,13 @@ static int rrpc_luns_configure(struct rrpc *rrpc)
rblk = rrpc_get_blk(rrpc, rlun, 0); rblk = rrpc_get_blk(rrpc, rlun, 0);
if (!rblk) if (!rblk)
goto err; goto err;
rrpc_set_lun_cur(rlun, rblk, &rlun->cur);
rrpc_set_lun_cur(rlun, rblk);
/* Emergency gc block */ /* Emergency gc block */
rblk = rrpc_get_blk(rrpc, rlun, 1); rblk = rrpc_get_blk(rrpc, rlun, 1);
if (!rblk) if (!rblk)
goto err; goto err;
rlun->gc_cur = rblk; rrpc_set_lun_cur(rlun, rblk, &rlun->gc_cur);
} }
return 0; return 0;
......
...@@ -76,6 +76,7 @@ struct rrpc_lun { ...@@ -76,6 +76,7 @@ struct rrpc_lun {
struct rrpc_block *blocks; /* Reference to block allocation */ struct rrpc_block *blocks; /* Reference to block allocation */
struct list_head prio_list; /* Blocks that may be GC'ed */ struct list_head prio_list; /* Blocks that may be GC'ed */
struct list_head wblk_list; /* Queued blocks to be written to */
struct work_struct ws_gc; struct work_struct ws_gc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment