Commit ebf360f9 authored by Andrea Arcangeli's avatar Andrea Arcangeli Committed by Linus Torvalds

mm: hugetlbfs: move the put/get_page slab and hugetlbfs optimization in a faster path

We don't actually need a reference on the head page in the slab and
hugetlbfs paths, as long as we add a smp_rmb() which should be faster
than get_page_unless_zero.

[akpm@linux-foundation.org: fix typo in comment]
Signed-off-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Cc: Khalid Aziz <khalid.aziz@oracle.com>
Cc: Pravin Shelar <pshelar@nicira.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Ben Hutchings <bhutchings@solarflare.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Johannes Weiner <jweiner@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Minchan Kim <minchan@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a0368d4e
...@@ -86,28 +86,39 @@ static void put_compound_page(struct page *page) ...@@ -86,28 +86,39 @@ static void put_compound_page(struct page *page)
/* __split_huge_page_refcount can run under us */ /* __split_huge_page_refcount can run under us */
struct page *page_head = compound_trans_head(page); struct page *page_head = compound_trans_head(page);
if (likely(page != page_head &&
get_page_unless_zero(page_head))) {
unsigned long flags;
/* /*
* THP can not break up slab pages so avoid taking * THP can not break up slab pages so avoid taking
* compound_lock(). Slab performs non-atomic bit ops * compound_lock(). Slab performs non-atomic bit ops
* on page->flags for better performance. In particular * on page->flags for better performance. In
* slab_unlock() in slub used to be a hot path. It is * particular slab_unlock() in slub used to be a hot
* still hot on arches that do not support * path. It is still hot on arches that do not support
* this_cpu_cmpxchg_double(). * this_cpu_cmpxchg_double().
*
* If "page" is part of a slab or hugetlbfs page it
* cannot be splitted and the head page cannot change
* from under us. And if "page" is part of a THP page
* under splitting, if the head page pointed by the
* THP tail isn't a THP head anymore, we'll find
* PageTail clear after smp_rmb() and we'll treat it
* as a single page.
*/ */
if (PageSlab(page_head) || PageHeadHuge(page_head)) { if (PageSlab(page_head) || PageHeadHuge(page_head)) {
/*
* If "page" is a THP tail, we must read the tail page
* flags after the head page flags. The
* split_huge_page side enforces write memory
* barriers between clearing PageTail and before the
* head page can be freed and reallocated.
*/
smp_rmb();
if (likely(PageTail(page))) { if (likely(PageTail(page))) {
/* /*
* __split_huge_page_refcount * __split_huge_page_refcount
* cannot race here. * cannot race here.
*/ */
VM_BUG_ON(!PageHead(page_head)); VM_BUG_ON(!PageHead(page_head));
VM_BUG_ON(page_mapcount(page) <= 0);
atomic_dec(&page->_mapcount); atomic_dec(&page->_mapcount);
if (put_page_testzero(page_head))
VM_BUG_ON(1);
if (put_page_testzero(page_head)) if (put_page_testzero(page_head))
__put_compound_page(page_head); __put_compound_page(page_head);
return; return;
...@@ -123,8 +134,13 @@ static void put_compound_page(struct page *page) ...@@ -123,8 +134,13 @@ static void put_compound_page(struct page *page)
* reallocated as slab on * reallocated as slab on
* x86). * x86).
*/ */
goto skip_lock; goto out_put_single;
} }
if (likely(page != page_head &&
get_page_unless_zero(page_head))) {
unsigned long flags;
/* /*
* page_head wasn't a dangling pointer but it * page_head wasn't a dangling pointer but it
* may not be a head page anymore by the time * may not be a head page anymore by the time
...@@ -135,7 +151,6 @@ static void put_compound_page(struct page *page) ...@@ -135,7 +151,6 @@ static void put_compound_page(struct page *page)
if (unlikely(!PageTail(page))) { if (unlikely(!PageTail(page))) {
/* __split_huge_page_refcount run before us */ /* __split_huge_page_refcount run before us */
compound_unlock_irqrestore(page_head, flags); compound_unlock_irqrestore(page_head, flags);
skip_lock:
if (put_page_testzero(page_head)) { if (put_page_testzero(page_head)) {
/* /*
* The head page may have been * The head page may have been
...@@ -221,12 +236,12 @@ bool __get_page_tail(struct page *page) ...@@ -221,12 +236,12 @@ bool __get_page_tail(struct page *page)
* split_huge_page(). * split_huge_page().
*/ */
unsigned long flags; unsigned long flags;
bool got = false; bool got;
struct page *page_head = compound_trans_head(page); struct page *page_head = compound_trans_head(page);
if (likely(page != page_head && get_page_unless_zero(page_head))) {
/* Ref to put_compound_page() comment. */ /* Ref to put_compound_page() comment. */
if (PageSlab(page_head) || PageHeadHuge(page_head)) { if (PageSlab(page_head) || PageHeadHuge(page_head)) {
smp_rmb();
if (likely(PageTail(page))) { if (likely(PageTail(page))) {
/* /*
* This is a hugetlbfs page or a slab * This is a hugetlbfs page or a slab
...@@ -234,7 +249,7 @@ bool __get_page_tail(struct page *page) ...@@ -234,7 +249,7 @@ bool __get_page_tail(struct page *page)
* cannot race here. * cannot race here.
*/ */
VM_BUG_ON(!PageHead(page_head)); VM_BUG_ON(!PageHead(page_head));
__get_page_tail_foll(page, false); __get_page_tail_foll(page, true);
return true; return true;
} else { } else {
/* /*
...@@ -246,11 +261,12 @@ bool __get_page_tail(struct page *page) ...@@ -246,11 +261,12 @@ bool __get_page_tail(struct page *page)
* (only possible if reallocated as * (only possible if reallocated as
* slab on x86). * slab on x86).
*/ */
put_page(page_head);
return false; return false;
} }
} }
got = false;
if (likely(page != page_head && get_page_unless_zero(page_head))) {
/* /*
* page_head wasn't a dangling pointer but it * page_head wasn't a dangling pointer but it
* may not be a head page anymore by the time * may not be a head page anymore by the time
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment