Commit effe6ee7 authored by Song Liu's avatar Song Liu Committed by Shaohua Li

md/r5cache: improve recovery with read ahead page pool

In r5cache recovery, the journal device is scanned page by page.
Currently, we use sync_page_io() to read journal device. This is
not efficient when we have to recovery many stripes from the journal.

To improve the speed of recovery, this patch introduces a read ahead
page pool (ra_pool) to recovery_ctx. With ra_pool, multiple consecutive
pages are read in one IO. Then the recovery code read the journal from
ra_pool.

With ra_pool, r5l_recovery_ctx has become much bigger. Therefore,
r5l_recovery_log() is refactored so r5l_recovery_ctx is not using
stack space.
Signed-off-by: default avatarSong Liu <songliubraving@fb.com>
Signed-off-by: default avatarShaohua Li <shli@fb.com>
parent aaf9f12e
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
* underneath hardware sector size. only works with PAGE_SIZE == 4096 * underneath hardware sector size. only works with PAGE_SIZE == 4096
*/ */
#define BLOCK_SECTORS (8) #define BLOCK_SECTORS (8)
#define BLOCK_SECTOR_SHIFT (3)
/* /*
* log->max_free_space is min(1/4 disk size, 10G reclaimable space). * log->max_free_space is min(1/4 disk size, 10G reclaimable space).
...@@ -1552,6 +1553,8 @@ bool r5l_log_disk_error(struct r5conf *conf) ...@@ -1552,6 +1553,8 @@ bool r5l_log_disk_error(struct r5conf *conf)
return ret; return ret;
} }
#define R5L_RECOVERY_PAGE_POOL_SIZE 256
struct r5l_recovery_ctx { struct r5l_recovery_ctx {
struct page *meta_page; /* current meta */ struct page *meta_page; /* current meta */
sector_t meta_total_blocks; /* total size of current meta and data */ sector_t meta_total_blocks; /* total size of current meta and data */
...@@ -1560,18 +1563,131 @@ struct r5l_recovery_ctx { ...@@ -1560,18 +1563,131 @@ struct r5l_recovery_ctx {
int data_parity_stripes; /* number of data_parity stripes */ int data_parity_stripes; /* number of data_parity stripes */
int data_only_stripes; /* number of data_only stripes */ int data_only_stripes; /* number of data_only stripes */
struct list_head cached_list; struct list_head cached_list;
/*
* read ahead page pool (ra_pool)
* in recovery, log is read sequentially. It is not efficient to
* read every page with sync_page_io(). The read ahead page pool
* reads multiple pages with one IO, so further log read can
* just copy data from the pool.
*/
struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE];
sector_t pool_offset; /* offset of first page in the pool */
int total_pages; /* total allocated pages */
int valid_pages; /* pages with valid data */
struct bio *ra_bio; /* bio to do the read ahead */
}; };
static int r5l_recovery_allocate_ra_pool(struct r5l_log *log,
struct r5l_recovery_ctx *ctx)
{
struct page *page;
ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_PAGES, log->bs);
if (!ctx->ra_bio)
return -ENOMEM;
ctx->valid_pages = 0;
ctx->total_pages = 0;
while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) {
page = alloc_page(GFP_KERNEL);
if (!page)
break;
ctx->ra_pool[ctx->total_pages] = page;
ctx->total_pages += 1;
}
if (ctx->total_pages == 0) {
bio_put(ctx->ra_bio);
return -ENOMEM;
}
ctx->pool_offset = 0;
return 0;
}
static void r5l_recovery_free_ra_pool(struct r5l_log *log,
struct r5l_recovery_ctx *ctx)
{
int i;
for (i = 0; i < ctx->total_pages; ++i)
put_page(ctx->ra_pool[i]);
bio_put(ctx->ra_bio);
}
/*
* fetch ctx->valid_pages pages from offset
* In normal cases, ctx->valid_pages == ctx->total_pages after the call.
* However, if the offset is close to the end of the journal device,
* ctx->valid_pages could be smaller than ctx->total_pages
*/
static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
struct r5l_recovery_ctx *ctx,
sector_t offset)
{
bio_reset(ctx->ra_bio);
ctx->ra_bio->bi_bdev = log->rdev->bdev;
bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0);
ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;
ctx->valid_pages = 0;
ctx->pool_offset = offset;
while (ctx->valid_pages < ctx->total_pages) {
bio_add_page(ctx->ra_bio,
ctx->ra_pool[ctx->valid_pages], PAGE_SIZE, 0);
ctx->valid_pages += 1;
offset = r5l_ring_add(log, offset, BLOCK_SECTORS);
if (offset == 0) /* reached end of the device */
break;
}
return submit_bio_wait(ctx->ra_bio);
}
/*
* try read a page from the read ahead page pool, if the page is not in the
* pool, call r5l_recovery_fetch_ra_pool
*/
static int r5l_recovery_read_page(struct r5l_log *log,
struct r5l_recovery_ctx *ctx,
struct page *page,
sector_t offset)
{
int ret;
if (offset < ctx->pool_offset ||
offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS) {
ret = r5l_recovery_fetch_ra_pool(log, ctx, offset);
if (ret)
return ret;
}
BUG_ON(offset < ctx->pool_offset ||
offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS);
memcpy(page_address(page),
page_address(ctx->ra_pool[(offset - ctx->pool_offset) >>
BLOCK_SECTOR_SHIFT]),
PAGE_SIZE);
return 0;
}
static int r5l_recovery_read_meta_block(struct r5l_log *log, static int r5l_recovery_read_meta_block(struct r5l_log *log,
struct r5l_recovery_ctx *ctx) struct r5l_recovery_ctx *ctx)
{ {
struct page *page = ctx->meta_page; struct page *page = ctx->meta_page;
struct r5l_meta_block *mb; struct r5l_meta_block *mb;
u32 crc, stored_crc; u32 crc, stored_crc;
int ret;
if (!sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, REQ_OP_READ, 0, ret = r5l_recovery_read_page(log, ctx, page, ctx->pos);
false)) if (ret != 0)
return -EIO; return ret;
mb = page_address(page); mb = page_address(page);
stored_crc = le32_to_cpu(mb->checksum); stored_crc = le32_to_cpu(mb->checksum);
...@@ -1653,8 +1769,7 @@ static void r5l_recovery_load_data(struct r5l_log *log, ...@@ -1653,8 +1769,7 @@ static void r5l_recovery_load_data(struct r5l_log *log,
raid5_compute_sector(conf, raid5_compute_sector(conf,
le64_to_cpu(payload->location), 0, le64_to_cpu(payload->location), 0,
&dd_idx, sh); &dd_idx, sh);
sync_page_io(log->rdev, log_offset, PAGE_SIZE, r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset);
sh->dev[dd_idx].page, REQ_OP_READ, 0, false);
sh->dev[dd_idx].log_checksum = sh->dev[dd_idx].log_checksum =
le32_to_cpu(payload->checksum[0]); le32_to_cpu(payload->checksum[0]);
ctx->meta_total_blocks += BLOCK_SECTORS; ctx->meta_total_blocks += BLOCK_SECTORS;
...@@ -1673,17 +1788,15 @@ static void r5l_recovery_load_parity(struct r5l_log *log, ...@@ -1673,17 +1788,15 @@ static void r5l_recovery_load_parity(struct r5l_log *log,
struct r5conf *conf = mddev->private; struct r5conf *conf = mddev->private;
ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded; ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded;
sync_page_io(log->rdev, log_offset, PAGE_SIZE, r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset);
sh->dev[sh->pd_idx].page, REQ_OP_READ, 0, false);
sh->dev[sh->pd_idx].log_checksum = sh->dev[sh->pd_idx].log_checksum =
le32_to_cpu(payload->checksum[0]); le32_to_cpu(payload->checksum[0]);
set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags); set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags);
if (sh->qd_idx >= 0) { if (sh->qd_idx >= 0) {
sync_page_io(log->rdev, r5l_recovery_read_page(
r5l_ring_add(log, log_offset, BLOCK_SECTORS), log, ctx, sh->dev[sh->qd_idx].page,
PAGE_SIZE, sh->dev[sh->qd_idx].page, r5l_ring_add(log, log_offset, BLOCK_SECTORS));
REQ_OP_READ, 0, false);
sh->dev[sh->qd_idx].log_checksum = sh->dev[sh->qd_idx].log_checksum =
le32_to_cpu(payload->checksum[1]); le32_to_cpu(payload->checksum[1]);
set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags); set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags);
...@@ -1814,14 +1927,15 @@ r5c_recovery_replay_stripes(struct list_head *cached_stripe_list, ...@@ -1814,14 +1927,15 @@ r5c_recovery_replay_stripes(struct list_head *cached_stripe_list,
/* if matches return 0; otherwise return -EINVAL */ /* if matches return 0; otherwise return -EINVAL */
static int static int
r5l_recovery_verify_data_checksum(struct r5l_log *log, struct page *page, r5l_recovery_verify_data_checksum(struct r5l_log *log,
struct r5l_recovery_ctx *ctx,
struct page *page,
sector_t log_offset, __le32 log_checksum) sector_t log_offset, __le32 log_checksum)
{ {
void *addr; void *addr;
u32 checksum; u32 checksum;
sync_page_io(log->rdev, log_offset, PAGE_SIZE, r5l_recovery_read_page(log, ctx, page, log_offset);
page, REQ_OP_READ, 0, false);
addr = kmap_atomic(page); addr = kmap_atomic(page);
checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE); checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE);
kunmap_atomic(addr); kunmap_atomic(addr);
...@@ -1853,17 +1967,17 @@ r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log, ...@@ -1853,17 +1967,17 @@ r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log,
if (payload->header.type == R5LOG_PAYLOAD_DATA) { if (payload->header.type == R5LOG_PAYLOAD_DATA) {
if (r5l_recovery_verify_data_checksum( if (r5l_recovery_verify_data_checksum(
log, page, log_offset, log, ctx, page, log_offset,
payload->checksum[0]) < 0) payload->checksum[0]) < 0)
goto mismatch; goto mismatch;
} else if (payload->header.type == R5LOG_PAYLOAD_PARITY) { } else if (payload->header.type == R5LOG_PAYLOAD_PARITY) {
if (r5l_recovery_verify_data_checksum( if (r5l_recovery_verify_data_checksum(
log, page, log_offset, log, ctx, page, log_offset,
payload->checksum[0]) < 0) payload->checksum[0]) < 0)
goto mismatch; goto mismatch;
if (conf->max_degraded == 2 && /* q for RAID 6 */ if (conf->max_degraded == 2 && /* q for RAID 6 */
r5l_recovery_verify_data_checksum( r5l_recovery_verify_data_checksum(
log, page, log, ctx, page,
r5l_ring_add(log, log_offset, r5l_ring_add(log, log_offset,
BLOCK_SECTORS), BLOCK_SECTORS),
payload->checksum[1]) < 0) payload->checksum[1]) < 0)
...@@ -2241,55 +2355,70 @@ static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log, ...@@ -2241,55 +2355,70 @@ static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log,
static int r5l_recovery_log(struct r5l_log *log) static int r5l_recovery_log(struct r5l_log *log)
{ {
struct mddev *mddev = log->rdev->mddev; struct mddev *mddev = log->rdev->mddev;
struct r5l_recovery_ctx ctx; struct r5l_recovery_ctx *ctx;
int ret; int ret;
sector_t pos; sector_t pos;
ctx.pos = log->last_checkpoint; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
ctx.seq = log->last_cp_seq; if (!ctx)
ctx.meta_page = alloc_page(GFP_KERNEL);
ctx.data_only_stripes = 0;
ctx.data_parity_stripes = 0;
INIT_LIST_HEAD(&ctx.cached_list);
if (!ctx.meta_page)
return -ENOMEM; return -ENOMEM;
ret = r5c_recovery_flush_log(log, &ctx); ctx->pos = log->last_checkpoint;
__free_page(ctx.meta_page); ctx->seq = log->last_cp_seq;
INIT_LIST_HEAD(&ctx->cached_list);
ctx->meta_page = alloc_page(GFP_KERNEL);
if (ret) if (!ctx->meta_page) {
return ret; ret = -ENOMEM;
goto meta_page;
}
pos = ctx.pos; if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) {
ctx.seq += 10000; ret = -ENOMEM;
goto ra_pool;
}
ret = r5c_recovery_flush_log(log, ctx);
if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0)) if (ret)
goto error;
pos = ctx->pos;
ctx->seq += 10000;
if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0))
pr_debug("md/raid:%s: starting from clean shutdown\n", pr_debug("md/raid:%s: starting from clean shutdown\n",
mdname(mddev)); mdname(mddev));
else else
pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n", pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
mdname(mddev), ctx.data_only_stripes, mdname(mddev), ctx->data_only_stripes,
ctx.data_parity_stripes); ctx->data_parity_stripes);
if (ctx.data_only_stripes == 0) { if (ctx->data_only_stripes == 0) {
log->next_checkpoint = ctx.pos; log->next_checkpoint = ctx->pos;
r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++); r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++);
ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS); ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS);
} else if (r5c_recovery_rewrite_data_only_stripes(log, &ctx)) { } else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) {
pr_err("md/raid:%s: failed to rewrite stripes to journal\n", pr_err("md/raid:%s: failed to rewrite stripes to journal\n",
mdname(mddev)); mdname(mddev));
return -EIO; ret = -EIO;
goto error;
} }
log->log_start = ctx.pos; log->log_start = ctx->pos;
log->seq = ctx.seq; log->seq = ctx->seq;
log->last_checkpoint = pos; log->last_checkpoint = pos;
r5l_write_super(log, pos); r5l_write_super(log, pos);
r5c_recovery_flush_data_only_stripes(log, &ctx); r5c_recovery_flush_data_only_stripes(log, ctx);
return 0; ret = 0;
error:
r5l_recovery_free_ra_pool(log, ctx);
ra_pool:
__free_page(ctx->meta_page);
meta_page:
kfree(ctx);
return ret;
} }
static void r5l_write_super(struct r5l_log *log, sector_t cp) static void r5l_write_super(struct r5l_log *log, sector_t cp)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment