Commit eb357060 authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: raid56: introduce btrfs_raid_bio::stripe_sectors

The new member is an array of sector_ptr pointers, they will represent
all sectors inside a full stripe (including P/Q).

They co-operate with btrfs_raid_bio::stripe_pages:

stripe_pages:   | Page 0, range [0, 64K)   | Page 1 ...
stripe_sectors: |  |  | ...             |  |
                |  |                    \- sector 15, page 0, pgoff=60K
                |  \- sector 1, page 0, pgoff=4K
                \---- sector 0, page 0, pfoff=0

With such structure, we can represent subpage sectors without using
extra pages.

Here we introduce a new helper, index_stripe_sectors(), to update
stripe_sectors[] to point to correct page and pgoff.

So every time rbio::stripe_pages[] pointer gets updated, the new helper
should be called.

The following functions have to call the new helper:

- steal_rbio()
- alloc_rbio_pages()
- alloc_rbio_parity_pages()
- alloc_rbio_essential_pages()
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 94efbe19
...@@ -52,6 +52,16 @@ struct btrfs_stripe_hash_table { ...@@ -52,6 +52,16 @@ struct btrfs_stripe_hash_table {
struct btrfs_stripe_hash table[]; struct btrfs_stripe_hash table[];
}; };
/*
* A bvec like structure to present a sector inside a page.
*
* Unlike bvec we don't need bvlen, as it's fixed to sectorsize.
*/
struct sector_ptr {
struct page *page;
unsigned int pgoff;
};
enum btrfs_rbio_ops { enum btrfs_rbio_ops {
BTRFS_RBIO_WRITE, BTRFS_RBIO_WRITE,
BTRFS_RBIO_READ_REBUILD, BTRFS_RBIO_READ_REBUILD,
...@@ -171,8 +181,12 @@ struct btrfs_raid_bio { ...@@ -171,8 +181,12 @@ struct btrfs_raid_bio {
struct page **bio_pages; struct page **bio_pages;
/* /*
* bitmap to record which horizontal stripe has data * For subpage support, we need to map each sector to above
* stripe_pages.
*/ */
struct sector_ptr *stripe_sectors;
/* Bitmap to record which horizontal stripe has data */
unsigned long *dbitmap; unsigned long *dbitmap;
/* allocated with real_stripes-many pointers for finish_*() calls */ /* allocated with real_stripes-many pointers for finish_*() calls */
...@@ -291,6 +305,26 @@ static int rbio_bucket(struct btrfs_raid_bio *rbio) ...@@ -291,6 +305,26 @@ static int rbio_bucket(struct btrfs_raid_bio *rbio)
return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS); return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
} }
/*
* Update the stripe_sectors[] array to use correct page and pgoff
*
* Should be called every time any page pointer in stripes_pages[] got modified.
*/
static void index_stripe_sectors(struct btrfs_raid_bio *rbio)
{
const u32 sectorsize = rbio->bioc->fs_info->sectorsize;
u32 offset;
int i;
for (i = 0, offset = 0; i < rbio->nr_sectors; i++, offset += sectorsize) {
int page_index = offset >> PAGE_SHIFT;
ASSERT(page_index < rbio->nr_pages);
rbio->stripe_sectors[i].page = rbio->stripe_pages[page_index];
rbio->stripe_sectors[i].pgoff = offset_in_page(offset);
}
}
/* /*
* stealing an rbio means taking all the uptodate pages from the stripe * stealing an rbio means taking all the uptodate pages from the stripe
* array in the source rbio and putting them into the destination rbio * array in the source rbio and putting them into the destination rbio
...@@ -317,6 +351,8 @@ static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest) ...@@ -317,6 +351,8 @@ static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
dest->stripe_pages[i] = s; dest->stripe_pages[i] = s;
src->stripe_pages[i] = NULL; src->stripe_pages[i] = NULL;
} }
index_stripe_sectors(dest);
index_stripe_sectors(src);
} }
/* /*
...@@ -977,6 +1013,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info, ...@@ -977,6 +1013,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
rbio = kzalloc(sizeof(*rbio) + rbio = kzalloc(sizeof(*rbio) +
sizeof(*rbio->stripe_pages) * num_pages + sizeof(*rbio->stripe_pages) * num_pages +
sizeof(*rbio->bio_pages) * num_pages + sizeof(*rbio->bio_pages) * num_pages +
sizeof(*rbio->stripe_sectors) * num_sectors +
sizeof(*rbio->finish_pointers) * real_stripes + sizeof(*rbio->finish_pointers) * real_stripes +
sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_nsectors) + sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_nsectors) +
sizeof(*rbio->finish_pbitmap) * BITS_TO_LONGS(stripe_nsectors), sizeof(*rbio->finish_pbitmap) * BITS_TO_LONGS(stripe_nsectors),
...@@ -1013,6 +1050,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info, ...@@ -1013,6 +1050,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
} while (0) } while (0)
CONSUME_ALLOC(rbio->stripe_pages, num_pages); CONSUME_ALLOC(rbio->stripe_pages, num_pages);
CONSUME_ALLOC(rbio->bio_pages, num_pages); CONSUME_ALLOC(rbio->bio_pages, num_pages);
CONSUME_ALLOC(rbio->stripe_sectors, num_sectors);
CONSUME_ALLOC(rbio->finish_pointers, real_stripes); CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_nsectors)); CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_nsectors));
CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_nsectors)); CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_nsectors));
...@@ -1032,16 +1070,29 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info, ...@@ -1032,16 +1070,29 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
/* allocate pages for all the stripes in the bio, including parity */ /* allocate pages for all the stripes in the bio, including parity */
static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
{ {
return btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages); int ret;
ret = btrfs_alloc_page_array(rbio->nr_pages, rbio->stripe_pages);
if (ret < 0)
return ret;
/* Mapping all sectors */
index_stripe_sectors(rbio);
return 0;
} }
/* only allocate pages for p/q stripes */ /* only allocate pages for p/q stripes */
static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
{ {
int data_pages = rbio_stripe_page_index(rbio, rbio->nr_data, 0); int data_pages = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
int ret;
return btrfs_alloc_page_array(rbio->nr_pages - data_pages, ret = btrfs_alloc_page_array(rbio->nr_pages - data_pages,
rbio->stripe_pages + data_pages); rbio->stripe_pages + data_pages);
if (ret < 0)
return ret;
index_stripe_sectors(rbio);
return 0;
} }
/* /*
...@@ -2283,6 +2334,7 @@ static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) ...@@ -2283,6 +2334,7 @@ static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
rbio->stripe_pages[index] = page; rbio->stripe_pages[index] = page;
} }
} }
index_stripe_sectors(rbio);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment