Commit 7f59b277 authored by Eric Biggers's avatar Eric Biggers Committed by Jaegeuk Kim

f2fs: clean up post-read processing

Rework the post-read processing logic to be much easier to understand.

At least one bug is fixed by this: if an I/O error occurred when reading
from disk, decryption and verity would be performed on the uninitialized
data, causing misleading messages in the kernel log.
Signed-off-by: default avatarEric Biggers <ebiggers@google.com>
Reviewed-by: default avatarChao Yu <yuchao0@huawei.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent cf740403
...@@ -756,38 +756,27 @@ static int f2fs_compress_pages(struct compress_ctx *cc) ...@@ -756,38 +756,27 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
return ret; return ret;
} }
void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity) static void f2fs_decompress_cluster(struct decompress_io_ctx *dic)
{ {
struct decompress_io_ctx *dic =
(struct decompress_io_ctx *)page_private(page);
struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode); struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
struct f2fs_inode_info *fi= F2FS_I(dic->inode); struct f2fs_inode_info *fi = F2FS_I(dic->inode);
const struct f2fs_compress_ops *cops = const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm]; f2fs_cops[fi->i_compress_algorithm];
int ret; int ret;
int i; int i;
dec_page_count(sbi, F2FS_RD_DATA);
if (bio->bi_status || PageError(page))
dic->failed = true;
if (atomic_dec_return(&dic->pending_pages))
return;
trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx, trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
dic->cluster_size, fi->i_compress_algorithm); dic->cluster_size, fi->i_compress_algorithm);
/* submit partial compressed pages */
if (dic->failed) { if (dic->failed) {
ret = -EIO; ret = -EIO;
goto out_free_dic; goto out_end_io;
} }
dic->tpages = page_array_alloc(dic->inode, dic->cluster_size); dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
if (!dic->tpages) { if (!dic->tpages) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_free_dic; goto out_end_io;
} }
for (i = 0; i < dic->cluster_size; i++) { for (i = 0; i < dic->cluster_size; i++) {
...@@ -799,20 +788,20 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity) ...@@ -799,20 +788,20 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
dic->tpages[i] = f2fs_compress_alloc_page(); dic->tpages[i] = f2fs_compress_alloc_page();
if (!dic->tpages[i]) { if (!dic->tpages[i]) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_free_dic; goto out_end_io;
} }
} }
if (cops->init_decompress_ctx) { if (cops->init_decompress_ctx) {
ret = cops->init_decompress_ctx(dic); ret = cops->init_decompress_ctx(dic);
if (ret) if (ret)
goto out_free_dic; goto out_end_io;
} }
dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size); dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
if (!dic->rbuf) { if (!dic->rbuf) {
ret = -ENOMEM; ret = -ENOMEM;
goto destroy_decompress_ctx; goto out_destroy_decompress_ctx;
} }
dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages); dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
...@@ -851,18 +840,34 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity) ...@@ -851,18 +840,34 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
vm_unmap_ram(dic->cbuf, dic->nr_cpages); vm_unmap_ram(dic->cbuf, dic->nr_cpages);
out_vunmap_rbuf: out_vunmap_rbuf:
vm_unmap_ram(dic->rbuf, dic->cluster_size); vm_unmap_ram(dic->rbuf, dic->cluster_size);
destroy_decompress_ctx: out_destroy_decompress_ctx:
if (cops->destroy_decompress_ctx) if (cops->destroy_decompress_ctx)
cops->destroy_decompress_ctx(dic); cops->destroy_decompress_ctx(dic);
out_free_dic: out_end_io:
if (!verity)
f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
ret, false);
trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx, trace_f2fs_decompress_pages_end(dic->inode, dic->cluster_idx,
dic->clen, ret); dic->clen, ret);
if (!verity) f2fs_decompress_end_io(dic, ret);
f2fs_free_dic(dic); }
/*
* This is called when a page of a compressed cluster has been read from disk
* (or failed to be read from disk). It checks whether this page was the last
* page being waited on in the cluster, and if so, it decompresses the cluster
* (or in the case of a failure, cleans up without actually decompressing).
*/
void f2fs_end_read_compressed_page(struct page *page, bool failed)
{
struct decompress_io_ctx *dic =
(struct decompress_io_ctx *)page_private(page);
struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
dec_page_count(sbi, F2FS_RD_DATA);
if (failed)
WRITE_ONCE(dic->failed, true);
if (atomic_dec_and_test(&dic->remaining_pages))
f2fs_decompress_cluster(dic);
} }
static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index) static bool is_page_in_cluster(struct compress_ctx *cc, pgoff_t index)
...@@ -1529,6 +1534,8 @@ int f2fs_write_multi_pages(struct compress_ctx *cc, ...@@ -1529,6 +1534,8 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
return err; return err;
} }
static void f2fs_free_dic(struct decompress_io_ctx *dic);
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
{ {
struct decompress_io_ctx *dic; struct decompress_io_ctx *dic;
...@@ -1547,12 +1554,14 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) ...@@ -1547,12 +1554,14 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
dic->magic = F2FS_COMPRESSED_PAGE_MAGIC; dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
dic->inode = cc->inode; dic->inode = cc->inode;
atomic_set(&dic->pending_pages, cc->nr_cpages); atomic_set(&dic->remaining_pages, cc->nr_cpages);
dic->cluster_idx = cc->cluster_idx; dic->cluster_idx = cc->cluster_idx;
dic->cluster_size = cc->cluster_size; dic->cluster_size = cc->cluster_size;
dic->log_cluster_size = cc->log_cluster_size; dic->log_cluster_size = cc->log_cluster_size;
dic->nr_cpages = cc->nr_cpages; dic->nr_cpages = cc->nr_cpages;
refcount_set(&dic->refcnt, 1);
dic->failed = false; dic->failed = false;
dic->need_verity = f2fs_need_verity(cc->inode, start_idx);
for (i = 0; i < dic->cluster_size; i++) for (i = 0; i < dic->cluster_size; i++)
dic->rpages[i] = cc->rpages[i]; dic->rpages[i] = cc->rpages[i];
...@@ -1581,7 +1590,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) ...@@ -1581,7 +1590,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
void f2fs_free_dic(struct decompress_io_ctx *dic) static void f2fs_free_dic(struct decompress_io_ctx *dic)
{ {
int i; int i;
...@@ -1609,30 +1618,88 @@ void f2fs_free_dic(struct decompress_io_ctx *dic) ...@@ -1609,30 +1618,88 @@ void f2fs_free_dic(struct decompress_io_ctx *dic)
kmem_cache_free(dic_entry_slab, dic); kmem_cache_free(dic_entry_slab, dic);
} }
void f2fs_decompress_end_io(struct page **rpages, static void f2fs_put_dic(struct decompress_io_ctx *dic)
unsigned int cluster_size, bool err, bool verity) {
if (refcount_dec_and_test(&dic->refcnt))
f2fs_free_dic(dic);
}
/*
* Update and unlock the cluster's pagecache pages, and release the reference to
* the decompress_io_ctx that was being held for I/O completion.
*/
static void __f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
{ {
int i; int i;
for (i = 0; i < cluster_size; i++) { for (i = 0; i < dic->cluster_size; i++) {
struct page *rpage = rpages[i]; struct page *rpage = dic->rpages[i];
if (!rpage) if (!rpage)
continue; continue;
if (err || PageError(rpage)) /* PG_error was set if verity failed. */
goto clear_uptodate; if (failed || PageError(rpage)) {
ClearPageUptodate(rpage);
if (!verity || fsverity_verify_page(rpage)) { /* will re-read again later */
ClearPageError(rpage);
} else {
SetPageUptodate(rpage); SetPageUptodate(rpage);
goto unlock;
} }
clear_uptodate:
ClearPageUptodate(rpage);
ClearPageError(rpage);
unlock:
unlock_page(rpage); unlock_page(rpage);
} }
f2fs_put_dic(dic);
}
static void f2fs_verify_cluster(struct work_struct *work)
{
struct decompress_io_ctx *dic =
container_of(work, struct decompress_io_ctx, verity_work);
int i;
/* Verify the cluster's decompressed pages with fs-verity. */
for (i = 0; i < dic->cluster_size; i++) {
struct page *rpage = dic->rpages[i];
if (rpage && !fsverity_verify_page(rpage))
SetPageError(rpage);
}
__f2fs_decompress_end_io(dic, false);
}
/*
* This is called when a compressed cluster has been decompressed
* (or failed to be read and/or decompressed).
*/
void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed)
{
if (!failed && dic->need_verity) {
/*
* Note that to avoid deadlocks, the verity work can't be done
* on the decompression workqueue. This is because verifying
* the data pages can involve reading metadata pages from the
* file, and these metadata pages may be compressed.
*/
INIT_WORK(&dic->verity_work, f2fs_verify_cluster);
fsverity_enqueue_verify_work(&dic->verity_work);
} else {
__f2fs_decompress_end_io(dic, failed);
}
}
/*
* Put a reference to a compressed page's decompress_io_ctx.
*
* This is called when the page is no longer needed and can be freed.
*/
void f2fs_put_page_dic(struct page *page)
{
struct decompress_io_ctx *dic =
(struct decompress_io_ctx *)page_private(page);
f2fs_put_dic(dic);
} }
int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi)
......
This diff is collapsed.
...@@ -1341,7 +1341,7 @@ struct compress_io_ctx { ...@@ -1341,7 +1341,7 @@ struct compress_io_ctx {
atomic_t pending_pages; /* in-flight compressed page count */ atomic_t pending_pages; /* in-flight compressed page count */
}; };
/* decompress io context for read IO path */ /* Context for decompressing one cluster on the read IO path */
struct decompress_io_ctx { struct decompress_io_ctx {
u32 magic; /* magic number to indicate page is compressed */ u32 magic; /* magic number to indicate page is compressed */
struct inode *inode; /* inode the context belong to */ struct inode *inode; /* inode the context belong to */
...@@ -1357,11 +1357,37 @@ struct decompress_io_ctx { ...@@ -1357,11 +1357,37 @@ struct decompress_io_ctx {
struct compress_data *cbuf; /* virtual mapped address on cpages */ struct compress_data *cbuf; /* virtual mapped address on cpages */
size_t rlen; /* valid data length in rbuf */ size_t rlen; /* valid data length in rbuf */
size_t clen; /* valid data length in cbuf */ size_t clen; /* valid data length in cbuf */
atomic_t pending_pages; /* in-flight compressed page count */
atomic_t verity_pages; /* in-flight page count for verity */ /*
bool failed; /* indicate IO error during decompression */ * The number of compressed pages remaining to be read in this cluster.
* This is initially nr_cpages. It is decremented by 1 each time a page
* has been read (or failed to be read). When it reaches 0, the cluster
* is decompressed (or an error is reported).
*
* If an error occurs before all the pages have been submitted for I/O,
* then this will never reach 0. In this case the I/O submitter is
* responsible for calling f2fs_decompress_end_io() instead.
*/
atomic_t remaining_pages;
/*
* Number of references to this decompress_io_ctx.
*
* One reference is held for I/O completion. This reference is dropped
* after the pagecache pages are updated and unlocked -- either after
* decompression (and verity if enabled), or after an error.
*
* In addition, each compressed page holds a reference while it is in a
* bio. These references are necessary prevent compressed pages from
* being freed while they are still in a bio.
*/
refcount_t refcnt;
bool failed; /* IO error occurred before decompression? */
bool need_verity; /* need fs-verity verification after decompression? */
void *private; /* payload buffer for specified decompression algorithm */ void *private; /* payload buffer for specified decompression algorithm */
void *private2; /* extra payload buffer */ void *private2; /* extra payload buffer */
struct work_struct verity_work; /* work to verify the decompressed pages */
}; };
#define NULL_CLUSTER ((unsigned int)(~0)) #define NULL_CLUSTER ((unsigned int)(~0))
...@@ -3883,7 +3909,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page); ...@@ -3883,7 +3909,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page);
bool f2fs_is_compress_backend_ready(struct inode *inode); bool f2fs_is_compress_backend_ready(struct inode *inode);
int f2fs_init_compress_mempool(void); int f2fs_init_compress_mempool(void);
void f2fs_destroy_compress_mempool(void); void f2fs_destroy_compress_mempool(void);
void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity); void f2fs_end_read_compressed_page(struct page *page, bool failed);
bool f2fs_cluster_is_empty(struct compress_ctx *cc); bool f2fs_cluster_is_empty(struct compress_ctx *cc);
bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index); bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page); void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
...@@ -3896,9 +3922,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, ...@@ -3896,9 +3922,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
unsigned nr_pages, sector_t *last_block_in_bio, unsigned nr_pages, sector_t *last_block_in_bio,
bool is_readahead, bool for_write); bool is_readahead, bool for_write);
struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc); struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc);
void f2fs_free_dic(struct decompress_io_ctx *dic); void f2fs_decompress_end_io(struct decompress_io_ctx *dic, bool failed);
void f2fs_decompress_end_io(struct page **rpages, void f2fs_put_page_dic(struct page *page);
unsigned int cluster_size, bool err, bool verity);
int f2fs_init_compress_ctx(struct compress_ctx *cc); int f2fs_init_compress_ctx(struct compress_ctx *cc);
void f2fs_destroy_compress_ctx(struct compress_ctx *cc); void f2fs_destroy_compress_ctx(struct compress_ctx *cc);
void f2fs_init_compress_info(struct f2fs_sb_info *sbi); void f2fs_init_compress_info(struct f2fs_sb_info *sbi);
...@@ -3922,6 +3947,14 @@ static inline struct page *f2fs_compress_control_page(struct page *page) ...@@ -3922,6 +3947,14 @@ static inline struct page *f2fs_compress_control_page(struct page *page)
} }
static inline int f2fs_init_compress_mempool(void) { return 0; } static inline int f2fs_init_compress_mempool(void) { return 0; }
static inline void f2fs_destroy_compress_mempool(void) { } static inline void f2fs_destroy_compress_mempool(void) { }
static inline void f2fs_end_read_compressed_page(struct page *page, bool failed)
{
WARN_ON_ONCE(1);
}
static inline void f2fs_put_page_dic(struct page *page)
{
WARN_ON_ONCE(1);
}
static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; } static inline int f2fs_init_page_array_cache(struct f2fs_sb_info *sbi) { return 0; }
static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { } static inline void f2fs_destroy_page_array_cache(struct f2fs_sb_info *sbi) { }
static inline int __init f2fs_init_compress_cache(void) { return 0; } static inline int __init f2fs_init_compress_cache(void) { return 0; }
...@@ -4126,6 +4159,12 @@ static inline bool f2fs_force_buffered_io(struct inode *inode, ...@@ -4126,6 +4159,12 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
return false; return false;
} }
static inline bool f2fs_need_verity(const struct inode *inode, pgoff_t idx)
{
return fsverity_active(inode) &&
idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
}
#ifdef CONFIG_F2FS_FAULT_INJECTION #ifdef CONFIG_F2FS_FAULT_INJECTION
extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate, extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
unsigned int type); unsigned int type);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment