Commit 55ad43e8 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Andrew Morton

mm: add a helper to accept page

Accept a given struct page and add it free list.

The help is useful for physical memory scanners that want to use free
unaccepted memory.

Link: https://lkml.kernel.org/r/20240809114854.3745464-7-kirill.shutemov@linux.intel.comSigned-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: default avatarDavid Hildenbrand <david@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Mike Rapoport (Microsoft) <rppt@kernel.org>
Cc: Tom Lendacky <thomas.lendacky@amd.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 5adfeaec
......@@ -1432,4 +1432,12 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long new_addr, unsigned long len,
bool need_rmap_locks, bool for_stack);
#ifdef CONFIG_UNACCEPTED_MEMORY
void accept_page(struct page *page);
#else /* CONFIG_UNACCEPTED_MEMORY */
static inline void accept_page(struct page *page)
{
}
#endif /* CONFIG_UNACCEPTED_MEMORY */
#endif /* __MM_INTERNAL_H */
......@@ -6935,27 +6935,18 @@ static bool page_contains_unaccepted(struct page *page, unsigned int order)
return range_contains_unaccepted_memory(start, PAGE_SIZE << order);
}
static bool try_to_accept_memory_one(struct zone *zone)
static void __accept_page(struct zone *zone, unsigned long *flags,
struct page *page)
{
unsigned long flags;
struct page *page;
bool last;
spin_lock_irqsave(&zone->lock, flags);
page = list_first_entry_or_null(&zone->unaccepted_pages,
struct page, lru);
if (!page) {
spin_unlock_irqrestore(&zone->lock, flags);
return false;
}
list_del(&page->lru);
last = list_empty(&zone->unaccepted_pages);
account_freepages(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE);
__mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES);
__ClearPageUnaccepted(page);
spin_unlock_irqrestore(&zone->lock, flags);
spin_unlock_irqrestore(&zone->lock, *flags);
accept_memory(page_to_phys(page), PAGE_SIZE << MAX_PAGE_ORDER);
......@@ -6963,6 +6954,38 @@ static bool try_to_accept_memory_one(struct zone *zone)
if (last)
static_branch_dec(&zones_with_unaccepted_pages);
}
void accept_page(struct page *page)
{
struct zone *zone = page_zone(page);
unsigned long flags;
spin_lock_irqsave(&zone->lock, flags);
if (!PageUnaccepted(page)) {
spin_unlock_irqrestore(&zone->lock, flags);
return;
}
/* Unlocks zone->lock */
__accept_page(zone, &flags, page);
}
static bool try_to_accept_memory_one(struct zone *zone)
{
unsigned long flags;
struct page *page;
spin_lock_irqsave(&zone->lock, flags);
page = list_first_entry_or_null(&zone->unaccepted_pages,
struct page, lru);
if (!page) {
spin_unlock_irqrestore(&zone->lock, flags);
return false;
}
/* Unlocks zone->lock */
__accept_page(zone, &flags, page);
return true;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment