Commit 2943fdbc authored by Ritesh Harjani's avatar Ritesh Harjani Committed by Theodore Ts'o

ext4: Refactor mpage_map_and_submit_buffers function

This patch refactors mpage_map_and_submit_buffers to take
out the page buffers processing, as a separate function.
This will be required to add support for blocksize < pagesize
for dioread_nolock feature.

No functionality change in this patch.
Signed-off-by: default avatarRitesh Harjani <riteshh@linux.ibm.com>
Link: https://lore.kernel.org/r/20191016073711.4141-4-riteshh@linux.ibm.comSigned-off-by: default avatarTheodore Ts'o <tytso@mit.edu>
parent a00713ea
......@@ -2340,6 +2340,75 @@ static int mpage_process_page_bufs(struct mpage_da_data *mpd,
return lblk < blocks;
}
/*
* mpage_process_page - update page buffers corresponding to changed extent and
* may submit fully mapped page for IO
*
* @mpd - description of extent to map, on return next extent to map
* @m_lblk - logical block mapping.
* @m_pblk - corresponding physical mapping.
* @map_bh - determines on return whether this page requires any further
* mapping or not.
* Scan given page buffers corresponding to changed extent and update buffer
* state according to new extent state.
* We map delalloc buffers to their physical location, clear unwritten bits.
* If the given page is not fully mapped, we update @map to the next extent in
* the given page that needs mapping & return @map_bh as true.
*/
static int mpage_process_page(struct mpage_da_data *mpd, struct page *page,
ext4_lblk_t *m_lblk, ext4_fsblk_t *m_pblk,
bool *map_bh)
{
struct buffer_head *head, *bh;
ext4_io_end_t *io_end = mpd->io_submit.io_end;
ext4_lblk_t lblk = *m_lblk;
ext4_fsblk_t pblock = *m_pblk;
int err = 0;
bh = head = page_buffers(page);
do {
if (lblk < mpd->map.m_lblk)
continue;
if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
/*
* Buffer after end of mapped extent.
* Find next buffer in the page to map.
*/
mpd->map.m_len = 0;
mpd->map.m_flags = 0;
/*
* FIXME: If dioread_nolock supports
* blocksize < pagesize, we need to make
* sure we add size mapped so far to
* io_end->size as the following call
* can submit the page for IO.
*/
err = mpage_process_page_bufs(mpd, head, bh, lblk);
if (err > 0)
err = 0;
*map_bh = true;
goto out;
}
if (buffer_delay(bh)) {
clear_buffer_delay(bh);
bh->b_blocknr = pblock++;
}
clear_buffer_unwritten(bh);
} while (lblk++, (bh = bh->b_this_page) != head);
/*
* FIXME: This is going to break if dioread_nolock
* supports blocksize < pagesize as we will try to
* convert potentially unmapped parts of inode.
*/
io_end->size += PAGE_SIZE;
*map_bh = false;
out:
*m_lblk = lblk;
*m_pblk = pblock;
return err;
}
/*
* mpage_map_buffers - update buffers corresponding to changed extent and
* submit fully mapped pages for IO
......@@ -2359,12 +2428,12 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
struct pagevec pvec;
int nr_pages, i;
struct inode *inode = mpd->inode;
struct buffer_head *head, *bh;
int bpp_bits = PAGE_SHIFT - inode->i_blkbits;
pgoff_t start, end;
ext4_lblk_t lblk;
sector_t pblock;
ext4_fsblk_t pblock;
int err;
bool map_bh = false;
start = mpd->map.m_lblk >> bpp_bits;
end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
......@@ -2380,50 +2449,19 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
bh = head = page_buffers(page);
do {
if (lblk < mpd->map.m_lblk)
continue;
if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
/*
* Buffer after end of mapped extent.
* Find next buffer in the page to map.
*/
mpd->map.m_len = 0;
mpd->map.m_flags = 0;
/*
* FIXME: If dioread_nolock supports
* blocksize < pagesize, we need to make
* sure we add size mapped so far to
* io_end->size as the following call
* can submit the page for IO.
*/
err = mpage_process_page_bufs(mpd, head,
bh, lblk);
pagevec_release(&pvec);
if (err > 0)
err = 0;
return err;
}
if (buffer_delay(bh)) {
clear_buffer_delay(bh);
bh->b_blocknr = pblock++;
}
clear_buffer_unwritten(bh);
} while (lblk++, (bh = bh->b_this_page) != head);
err = mpage_process_page(mpd, page, &lblk, &pblock,
&map_bh);
/*
* FIXME: This is going to break if dioread_nolock
* supports blocksize < pagesize as we will try to
* convert potentially unmapped parts of inode.
* If map_bh is true, means page may require further bh
* mapping, or maybe the page was submitted for IO.
* So we return to call further extent mapping.
*/
mpd->io_submit.io_end->size += PAGE_SIZE;
if (err < 0 || map_bh == true)
goto out;
/* Page fully mapped - let IO run! */
err = mpage_submit_page(mpd, page);
if (err < 0) {
pagevec_release(&pvec);
return err;
}
if (err < 0)
goto out;
}
pagevec_release(&pvec);
}
......@@ -2431,6 +2469,9 @@ static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
mpd->map.m_len = 0;
mpd->map.m_flags = 0;
return 0;
out:
pagevec_release(&pvec);
return err;
}
static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment