Commit 7de8728f authored by Uladzislau Rezki (Sony)'s avatar Uladzislau Rezki (Sony) Committed by Andrew Morton

mm: vmalloc: refactor vm_area_alloc_pages() function

The aim is to simplify and making the vm_area_alloc_pages()
function less confusing as it became more clogged nowadays:

- eliminate a "bulk_gfp" variable and do not overwrite a gfp
  flag for bulk allocator;
- drop __GFP_NOFAIL flag for high-order-page requests on upper
  layer. It becomes less spread between levels when it comes to
  __GFP_NOFAIL allocations;
- add a comment about a fallback path if high-order attempt is
  unsuccessful because for such cases __GFP_NOFAIL is dropped;
- fix a typo in a commit message.

Link: https://lkml.kernel.org/r/20240827190916.34242-1-urezki@gmail.comSigned-off-by: default avatarUladzislau Rezki (Sony) <urezki@gmail.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Reviewed-by: default avatarBaoquan He <bhe@redhat.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Oleksiy Avramchenko <oleksiy.avramchenko@sony.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 01c373e9
......@@ -3515,8 +3515,6 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
unsigned int order, unsigned int nr_pages, struct page **pages)
{
unsigned int nr_allocated = 0;
gfp_t alloc_gfp = gfp;
bool nofail = gfp & __GFP_NOFAIL;
struct page *page;
int i;
......@@ -3527,9 +3525,6 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
* more permissive.
*/
if (!order) {
/* bulk allocator doesn't support nofail req. officially */
gfp_t bulk_gfp = gfp & ~__GFP_NOFAIL;
while (nr_allocated < nr_pages) {
unsigned int nr, nr_pages_request;
......@@ -3547,12 +3542,11 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
* but mempolicy wants to alloc memory by interleaving.
*/
if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE)
nr = alloc_pages_bulk_array_mempolicy_noprof(bulk_gfp,
nr = alloc_pages_bulk_array_mempolicy_noprof(gfp,
nr_pages_request,
pages + nr_allocated);
else
nr = alloc_pages_bulk_array_node_noprof(bulk_gfp, nid,
nr = alloc_pages_bulk_array_node_noprof(gfp, nid,
nr_pages_request,
pages + nr_allocated);
......@@ -3566,30 +3560,24 @@ vm_area_alloc_pages(gfp_t gfp, int nid,
if (nr != nr_pages_request)
break;
}
} else if (gfp & __GFP_NOFAIL) {
/*
* Higher order nofail allocations are really expensive and
* potentially dangerous (pre-mature OOM, disruptive reclaim
* and compaction etc.
*/
alloc_gfp &= ~__GFP_NOFAIL;
}
/* High-order pages or fallback path if "bulk" fails. */
while (nr_allocated < nr_pages) {
if (!nofail && fatal_signal_pending(current))
if (!(gfp & __GFP_NOFAIL) && fatal_signal_pending(current))
break;
if (nid == NUMA_NO_NODE)
page = alloc_pages_noprof(alloc_gfp, order);
page = alloc_pages_noprof(gfp, order);
else
page = alloc_pages_node_noprof(nid, alloc_gfp, order);
page = alloc_pages_node_noprof(nid, gfp, order);
if (unlikely(!page))
break;
/*
* Higher order allocations must be able to be treated as
* indepdenent small pages by callers (as they can with
* independent small pages by callers (as they can with
* small-page vmallocs). Some drivers do their own refcounting
* on vmalloc_to_page() pages, some use page->mapping,
* page->lru, etc.
......@@ -3650,7 +3638,16 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
page_order = vm_area_page_order(area);
area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN,
/*
* Higher order nofail allocations are really expensive and
* potentially dangerous (pre-mature OOM, disruptive reclaim
* and compaction etc.
*
* Please note, the __vmalloc_node_range_noprof() falls-back
* to order-0 pages if high-order attempt is unsuccessful.
*/
area->nr_pages = vm_area_alloc_pages((page_order ?
gfp_mask & ~__GFP_NOFAIL : gfp_mask) | __GFP_NOWARN,
node, page_order, nr_small_pages, area->pages);
atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment