Commit 4468b8f1 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: uninline page_xchg_last_nid()

Andrew Morton pointed out that page_xchg_last_nid() and
reset_page_last_nid() were "getting nuttily large" and asked that it be
investigated.

reset_page_last_nid() is on the page free path and it would be
unfortunate to make that path more expensive than it needs to be.  Due
to the internal use of page_xchg_last_nid() it is already too expensive
but fortunately, it should also be impossible for the page->flags to be
updated in parallel when we call reset_page_last_nid().  Instead of
unlining the function, it uses a simplier implementation that assumes no
parallel updates and should now be sufficiently short for inlining.

page_xchg_last_nid() is called in paths that are already quite expensive
(splitting huge page, fault handling, migration) and it is reasonable to
uninline.  There was not really a good place to place the function but
mm/mmzone.c was the closest fit IMO.

This patch saved 128 bytes of text in the vmlinux file for the kernel
configuration I used for testing automatic NUMA balancing.
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 6acc8b02
...@@ -677,25 +677,14 @@ static inline int page_last_nid(struct page *page) ...@@ -677,25 +677,14 @@ static inline int page_last_nid(struct page *page)
return (page->flags >> LAST_NID_PGSHIFT) & LAST_NID_MASK; return (page->flags >> LAST_NID_PGSHIFT) & LAST_NID_MASK;
} }
static inline int page_xchg_last_nid(struct page *page, int nid) extern int page_xchg_last_nid(struct page *page, int nid);
{
unsigned long old_flags, flags;
int last_nid;
do {
old_flags = flags = page->flags;
last_nid = page_last_nid(page);
flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT);
flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT;
} while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
return last_nid;
}
static inline void reset_page_last_nid(struct page *page) static inline void reset_page_last_nid(struct page *page)
{ {
page_xchg_last_nid(page, (1 << LAST_NID_SHIFT) - 1); int nid = (1 << LAST_NID_SHIFT) - 1;
page->flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT);
page->flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT;
} }
#endif /* LAST_NID_NOT_IN_PAGE_FLAGS */ #endif /* LAST_NID_NOT_IN_PAGE_FLAGS */
#else #else
......
/* /*
* linux/mm/mmzone.c * linux/mm/mmzone.c
* *
* management codes for pgdats and zones. * management codes for pgdats, zones and page flags
*/ */
...@@ -96,3 +96,21 @@ void lruvec_init(struct lruvec *lruvec) ...@@ -96,3 +96,21 @@ void lruvec_init(struct lruvec *lruvec)
for_each_lru(lru) for_each_lru(lru)
INIT_LIST_HEAD(&lruvec->lists[lru]); INIT_LIST_HEAD(&lruvec->lists[lru]);
} }
#if defined(CONFIG_NUMA_BALANCING) && !defined(LAST_NID_NOT_IN_PAGE_FLAGS)
int page_xchg_last_nid(struct page *page, int nid)
{
unsigned long old_flags, flags;
int last_nid;
do {
old_flags = flags = page->flags;
last_nid = page_last_nid(page);
flags &= ~(LAST_NID_MASK << LAST_NID_PGSHIFT);
flags |= (nid & LAST_NID_MASK) << LAST_NID_PGSHIFT;
} while (unlikely(cmpxchg(&page->flags, old_flags, flags) != old_flags));
return last_nid;
}
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment