Commit 5ddcee1f authored by Gao Xiang's avatar Gao Xiang

erofs: get rid of __stagingpage_alloc helper

Now open code is much cleaner due to iterative development.

Link: https://lore.kernel.org/r/20191124025217.12345-1-hsiangkao@aol.comReviewed-by: default avatarChao Yu <yuchao0@huawei.com>
Signed-off-by: default avatarGao Xiang <gaoxiang25@huawei.com>
parent bda17a45
...@@ -73,7 +73,7 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq, ...@@ -73,7 +73,7 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
victim = availables[--top]; victim = availables[--top];
get_page(victim); get_page(victim);
} else { } else {
victim = erofs_allocpage(pagepool, GFP_KERNEL, false); victim = erofs_allocpage(pagepool, GFP_KERNEL);
if (!victim) if (!victim)
return -ENOMEM; return -ENOMEM;
victim->mapping = Z_EROFS_MAPPING_STAGING; victim->mapping = Z_EROFS_MAPPING_STAGING;
......
...@@ -382,7 +382,7 @@ int erofs_namei(struct inode *dir, struct qstr *name, ...@@ -382,7 +382,7 @@ int erofs_namei(struct inode *dir, struct qstr *name,
extern const struct file_operations erofs_dir_fops; extern const struct file_operations erofs_dir_fops;
/* utils.c / zdata.c */ /* utils.c / zdata.c */
struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail); struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);
#if (EROFS_PCPUBUF_NR_PAGES > 0) #if (EROFS_PCPUBUF_NR_PAGES > 0)
void *erofs_get_pcpubuf(unsigned int pagenr); void *erofs_get_pcpubuf(unsigned int pagenr);
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
#include "internal.h" #include "internal.h"
#include <linux/pagevec.h> #include <linux/pagevec.h>
struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail) struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
{ {
struct page *page; struct page *page;
...@@ -16,7 +16,7 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail) ...@@ -16,7 +16,7 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail)
DBG_BUGON(page_ref_count(page) != 1); DBG_BUGON(page_ref_count(page) != 1);
list_del(&page->lru); list_del(&page->lru);
} else { } else {
page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0); page = alloc_page(gfp);
} }
return page; return page;
} }
......
...@@ -546,15 +546,6 @@ static bool z_erofs_collector_end(struct z_erofs_collector *clt) ...@@ -546,15 +546,6 @@ static bool z_erofs_collector_end(struct z_erofs_collector *clt)
return true; return true;
} }
static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
gfp_t gfp)
{
struct page *page = erofs_allocpage(pagepool, gfp, true);
page->mapping = Z_EROFS_MAPPING_STAGING;
return page;
}
static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe, static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
unsigned int cachestrategy, unsigned int cachestrategy,
erofs_off_t la) erofs_off_t la)
...@@ -661,8 +652,9 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, ...@@ -661,8 +652,9 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
/* should allocate an additional staging page for pagevec */ /* should allocate an additional staging page for pagevec */
if (err == -EAGAIN) { if (err == -EAGAIN) {
struct page *const newpage = struct page *const newpage =
__stagingpage_alloc(pagepool, GFP_NOFS); erofs_allocpage(pagepool, GFP_NOFS | __GFP_NOFAIL);
newpage->mapping = Z_EROFS_MAPPING_STAGING;
err = z_erofs_attach_page(clt, newpage, err = z_erofs_attach_page(clt, newpage,
Z_EROFS_PAGE_TYPE_EXCLUSIVE); Z_EROFS_PAGE_TYPE_EXCLUSIVE);
if (!err) if (!err)
...@@ -1079,19 +1071,24 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, ...@@ -1079,19 +1071,24 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
out_allocpage: out_allocpage:
page = __stagingpage_alloc(pagepool, gfp); page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL);
if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) {
/* non-LRU / non-movable temporary page is needed */
page->mapping = Z_EROFS_MAPPING_STAGING;
tocache = false;
}
if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) { if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
if (tocache) {
/* since it added to managed cache successfully */
unlock_page(page);
put_page(page);
} else {
list_add(&page->lru, pagepool); list_add(&page->lru, pagepool);
cpu_relax();
goto repeat;
} }
if (!tocache) cond_resched();
goto out; goto repeat;
if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
page->mapping = Z_EROFS_MAPPING_STAGING;
goto out;
} }
set_page_private(page, (unsigned long)pcl); set_page_private(page, (unsigned long)pcl);
SetPagePrivate(page); SetPagePrivate(page);
out: /* the only exit (for tracing and debugging) */ out: /* the only exit (for tracing and debugging) */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment