Commit 1e05ff36 authored by Gao Xiang's avatar Gao Xiang Committed by Greg Kroah-Hartman

staging: erofs: complete error handing of z_erofs_do_read_page

This patch completes error handing code of z_erofs_do_read_page.
PG_error will be set when some read error happens, therefore
z_erofs_onlinepage_endio will unlock this page without setting
PG_uptodate.
Reviewed-by: default avatarChao Yu <yucxhao0@huawei.com>
Signed-off-by: default avatarGao Xiang <gaoxiang25@huawei.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 0734ffbf
...@@ -611,7 +611,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe, ...@@ -611,7 +611,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
enum z_erofs_page_type page_type; enum z_erofs_page_type page_type;
unsigned int cur, end, spiltted, index; unsigned int cur, end, spiltted, index;
int err; int err = 0;
/* register locked file pages as online pages in pack */ /* register locked file pages as online pages in pack */
z_erofs_onlinepage_init(page); z_erofs_onlinepage_init(page);
...@@ -638,12 +638,11 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe, ...@@ -638,12 +638,11 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
if (unlikely(err)) if (unlikely(err))
goto err_out; goto err_out;
/* deal with hole (FIXME! broken now) */
if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED))) if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
goto hitted; goto hitted;
DBG_BUGON(map->m_plen != 1 << sbi->clusterbits); DBG_BUGON(map->m_plen != 1 << sbi->clusterbits);
BUG_ON(erofs_blkoff(map->m_pa)); DBG_BUGON(erofs_blkoff(map->m_pa));
err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head); err = z_erofs_vle_work_iter_begin(builder, sb, map, &fe->owned_head);
if (unlikely(err)) if (unlikely(err))
...@@ -688,7 +687,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe, ...@@ -688,7 +687,7 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
err = z_erofs_vle_work_add_page(builder, err = z_erofs_vle_work_add_page(builder,
newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE); newpage, Z_EROFS_PAGE_TYPE_EXCLUSIVE);
if (!err) if (likely(!err))
goto retry; goto retry;
} }
...@@ -699,9 +698,10 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe, ...@@ -699,9 +698,10 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
/* FIXME! avoid the last relundant fixup & endio */ /* FIXME! avoid the last relundant fixup & endio */
z_erofs_onlinepage_fixup(page, index, true); z_erofs_onlinepage_fixup(page, index, true);
++spiltted;
/* also update nr_pages and increase queued_pages */ /* bump up the number of spiltted parts of a page */
++spiltted;
/* also update nr_pages */
work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1); work->nr_pages = max_t(pgoff_t, work->nr_pages, index + 1);
next_part: next_part:
/* can be used for verification */ /* can be used for verification */
...@@ -711,16 +711,18 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe, ...@@ -711,16 +711,18 @@ static int z_erofs_do_read_page(struct z_erofs_vle_frontend *fe,
if (end > 0) if (end > 0)
goto repeat; goto repeat;
out:
/* FIXME! avoid the last relundant fixup & endio */ /* FIXME! avoid the last relundant fixup & endio */
z_erofs_onlinepage_endio(page); z_erofs_onlinepage_endio(page);
debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu", debugln("%s, finish page: %pK spiltted: %u map->m_llen %llu",
__func__, page, spiltted, map->m_llen); __func__, page, spiltted, map->m_llen);
return 0; return err;
/* if some error occurred while processing this page */
err_out: err_out:
/* TODO: the missing error handing cases */ SetPageError(page);
return err; goto out;
} }
static void z_erofs_vle_unzip_kickoff(void *ptr, int bios) static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment