Commit 75cb8e93 authored by Javier González's avatar Javier González Committed by Jens Axboe

lightnvm: pblk: advance bio according to lba index

When a lba either hits the cache or corresponds to an empty entry in the
L2P table, we need to advance the bio according to the position in which
the lba is located. Otherwise, we will copy data in the wrong page, thus
causing data corruption for the application.

In case of a cache hit, we assumed that bio->bi_iter.bi_idx would
contain the correct index, but this is no necessarily true. Instead, use
the local bio advance counter and iterator. This guarantees that lbas
hitting the cache are copied into the right bv_page.

In case of an empty L2P entry, we omitted to advance the bio. In the
cases when the same I/O also contains a cache hit, data corresponding
to this lba will be copied to the wrong bv_page. Fix this by advancing
the bio as we do in the case of a cache hit.

Fixes: a4bd217b lightnvm: physical block device (pblk) target
Signed-off-by: default avatarJavier González <javier@javigon.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 5a60f4b6
...@@ -657,7 +657,7 @@ unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd, ...@@ -657,7 +657,7 @@ unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
* be directed to disk. * be directed to disk.
*/ */
int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
struct ppa_addr ppa, int bio_iter) struct ppa_addr ppa, int bio_iter, bool advanced_bio)
{ {
struct pblk *pblk = container_of(rb, struct pblk, rwb); struct pblk *pblk = container_of(rb, struct pblk, rwb);
struct pblk_rb_entry *entry; struct pblk_rb_entry *entry;
...@@ -694,7 +694,7 @@ int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, ...@@ -694,7 +694,7 @@ int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
* filled with data from the cache). If part of the data resides on the * filled with data from the cache). If part of the data resides on the
* media, we will read later on * media, we will read later on
*/ */
if (unlikely(!bio->bi_iter.bi_idx)) if (unlikely(!advanced_bio))
bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE); bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE);
data = bio_data(bio); data = bio_data(bio);
......
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
*/ */
static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio, static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
sector_t lba, struct ppa_addr ppa, sector_t lba, struct ppa_addr ppa,
int bio_iter) int bio_iter, bool advanced_bio)
{ {
#ifdef CONFIG_NVM_DEBUG #ifdef CONFIG_NVM_DEBUG
/* Callers must ensure that the ppa points to a cache address */ /* Callers must ensure that the ppa points to a cache address */
...@@ -34,7 +34,8 @@ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio, ...@@ -34,7 +34,8 @@ static int pblk_read_from_cache(struct pblk *pblk, struct bio *bio,
BUG_ON(!pblk_addr_in_cache(ppa)); BUG_ON(!pblk_addr_in_cache(ppa));
#endif #endif
return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa, bio_iter); return pblk_rb_copy_to_bio(&pblk->rwb, bio, lba, ppa,
bio_iter, advanced_bio);
} }
static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
...@@ -44,7 +45,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, ...@@ -44,7 +45,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS]; struct ppa_addr ppas[PBLK_MAX_REQ_ADDRS];
sector_t blba = pblk_get_lba(bio); sector_t blba = pblk_get_lba(bio);
int nr_secs = rqd->nr_ppas; int nr_secs = rqd->nr_ppas;
int advanced_bio = 0; bool advanced_bio = false;
int i, j = 0; int i, j = 0;
/* logic error: lba out-of-bounds. Ignore read request */ /* logic error: lba out-of-bounds. Ignore read request */
...@@ -62,19 +63,26 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, ...@@ -62,19 +63,26 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
retry: retry:
if (pblk_ppa_empty(p)) { if (pblk_ppa_empty(p)) {
WARN_ON(test_and_set_bit(i, read_bitmap)); WARN_ON(test_and_set_bit(i, read_bitmap));
continue;
if (unlikely(!advanced_bio)) {
bio_advance(bio, (i) * PBLK_EXPOSED_PAGE_SIZE);
advanced_bio = true;
}
goto next;
} }
/* Try to read from write buffer. The address is later checked /* Try to read from write buffer. The address is later checked
* on the write buffer to prevent retrieving overwritten data. * on the write buffer to prevent retrieving overwritten data.
*/ */
if (pblk_addr_in_cache(p)) { if (pblk_addr_in_cache(p)) {
if (!pblk_read_from_cache(pblk, bio, lba, p, i)) { if (!pblk_read_from_cache(pblk, bio, lba, p, i,
advanced_bio)) {
pblk_lookup_l2p_seq(pblk, &p, lba, 1); pblk_lookup_l2p_seq(pblk, &p, lba, 1);
goto retry; goto retry;
} }
WARN_ON(test_and_set_bit(i, read_bitmap)); WARN_ON(test_and_set_bit(i, read_bitmap));
advanced_bio = 1; advanced_bio = true;
#ifdef CONFIG_NVM_DEBUG #ifdef CONFIG_NVM_DEBUG
atomic_long_inc(&pblk->cache_reads); atomic_long_inc(&pblk->cache_reads);
#endif #endif
...@@ -83,6 +91,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd, ...@@ -83,6 +91,7 @@ static void pblk_read_ppalist_rq(struct pblk *pblk, struct nvm_rq *rqd,
rqd->ppa_list[j++] = p; rqd->ppa_list[j++] = p;
} }
next:
if (advanced_bio) if (advanced_bio)
bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE); bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
} }
...@@ -282,7 +291,7 @@ static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd, ...@@ -282,7 +291,7 @@ static void pblk_read_rq(struct pblk *pblk, struct nvm_rq *rqd,
* write buffer to prevent retrieving overwritten data. * write buffer to prevent retrieving overwritten data.
*/ */
if (pblk_addr_in_cache(ppa)) { if (pblk_addr_in_cache(ppa)) {
if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0)) { if (!pblk_read_from_cache(pblk, bio, lba, ppa, 0, 1)) {
pblk_lookup_l2p_seq(pblk, &ppa, lba, 1); pblk_lookup_l2p_seq(pblk, &ppa, lba, 1);
goto retry; goto retry;
} }
......
...@@ -670,7 +670,7 @@ unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio, ...@@ -670,7 +670,7 @@ unsigned int pblk_rb_read_to_bio_list(struct pblk_rb *rb, struct bio *bio,
struct list_head *list, struct list_head *list,
unsigned int max); unsigned int max);
int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba, int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
struct ppa_addr ppa, int bio_iter); struct ppa_addr ppa, int bio_iter, bool advanced_bio);
unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries); unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int entries);
unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags); unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment