Commit 1d798ca3 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

mm: make compound_head() robust

Hugh has pointed that compound_head() call can be unsafe in some
context. There's one example:

	CPU0					CPU1

isolate_migratepages_block()
  page_count()
    compound_head()
      !!PageTail() == true
					put_page()
					  tail->first_page = NULL
      head = tail->first_page
					alloc_pages(__GFP_COMP)
					   prep_compound_page()
					     tail->first_page = head
					     __SetPageTail(p);
      !!PageTail() == true
    <head == NULL dereferencing>

The race is pure theoretical. I don't it's possible to trigger it in
practice. But who knows.

We can fix the race by changing how encode PageTail() and compound_head()
within struct page to be able to update them in one shot.

The patch introduces page->compound_head into third double word block in
front of compound_dtor and compound_order. Bit 0 encodes PageTail() and
the rest bits are pointer to head page if bit zero is set.

The patch moves page->pmd_huge_pte out of word, just in case if an
architecture defines pgtable_t into something what can have the bit 0
set.

hugetlb_cgroup uses page->lru.next in the second tail page to store
pointer struct hugetlb_cgroup. The patch switch it to use page->private
in the second tail page instead. The space is free since ->first_page is
removed from the union.

The patch also opens possibility to remove HUGETLB_CGROUP_MIN_ORDER
limitation, since there's now space in first tail page to store struct
hugetlb_cgroup pointer. But that's out of scope of the patch.

That means page->compound_head shares storage space with:

 - page->lru.next;
 - page->next;
 - page->rcu_head.next;

That's too long list to be absolutely sure, but looks like nobody uses
bit 0 of the word.

page->rcu_head.next guaranteed[1] to have bit 0 clean as long as we use
call_rcu(), call_rcu_bh(), call_rcu_sched(), or call_srcu(). But future
call_rcu_lazy() is not allowed as it makes use of the bit and we can
get false positive PageTail().

[1] http://lkml.kernel.org/g/20150827163634.GD4029@linux.vnet.ibm.comSigned-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Reviewed-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f1e61557
...@@ -54,8 +54,8 @@ everything required is done by pgtable_page_ctor() and pgtable_page_dtor(), ...@@ -54,8 +54,8 @@ everything required is done by pgtable_page_ctor() and pgtable_page_dtor(),
which must be called on PTE table allocation / freeing. which must be called on PTE table allocation / freeing.
Make sure the architecture doesn't use slab allocator for page table Make sure the architecture doesn't use slab allocator for page table
allocation: slab uses page->slab_cache and page->first_page for its pages. allocation: slab uses page->slab_cache for its pages.
These fields share storage with page->ptl. This field shares storage with page->ptl.
PMD split lock only makes sense if you have more than two page table PMD split lock only makes sense if you have more than two page table
levels. levels.
......
...@@ -169,7 +169,6 @@ CONFIG_FLATMEM_MANUAL=y ...@@ -169,7 +169,6 @@ CONFIG_FLATMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set # CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_PAGEFLAGS_EXTENDED=y
CONFIG_SPLIT_PTLOCK_CPUS=4 CONFIG_SPLIT_PTLOCK_CPUS=4
# CONFIG_PHYS_ADDR_T_64BIT is not set # CONFIG_PHYS_ADDR_T_64BIT is not set
CONFIG_ZONE_DMA_FLAG=1 CONFIG_ZONE_DMA_FLAG=1
......
...@@ -32,7 +32,7 @@ static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page) ...@@ -32,7 +32,7 @@ static inline struct hugetlb_cgroup *hugetlb_cgroup_from_page(struct page *page)
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
return NULL; return NULL;
return (struct hugetlb_cgroup *)page[2].lru.next; return (struct hugetlb_cgroup *)page[2].private;
} }
static inline static inline
...@@ -42,7 +42,7 @@ int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg) ...@@ -42,7 +42,7 @@ int set_hugetlb_cgroup(struct page *page, struct hugetlb_cgroup *h_cg)
if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER) if (compound_order(page) < HUGETLB_CGROUP_MIN_ORDER)
return -1; return -1;
page[2].lru.next = (void *)h_cg; page[2].private = (unsigned long)h_cg;
return 0; return 0;
} }
......
...@@ -430,46 +430,6 @@ static inline void compound_unlock_irqrestore(struct page *page, ...@@ -430,46 +430,6 @@ static inline void compound_unlock_irqrestore(struct page *page,
#endif #endif
} }
static inline struct page *compound_head_by_tail(struct page *tail)
{
struct page *head = tail->first_page;
/*
* page->first_page may be a dangling pointer to an old
* compound page, so recheck that it is still a tail
* page before returning.
*/
smp_rmb();
if (likely(PageTail(tail)))
return head;
return tail;
}
/*
* Since either compound page could be dismantled asynchronously in THP
* or we access asynchronously arbitrary positioned struct page, there
* would be tail flag race. To handle this race, we should call
* smp_rmb() before checking tail flag. compound_head_by_tail() did it.
*/
static inline struct page *compound_head(struct page *page)
{
if (unlikely(PageTail(page)))
return compound_head_by_tail(page);
return page;
}
/*
* If we access compound page synchronously such as access to
* allocated page, there is no need to handle tail flag race, so we can
* check tail flag directly without any synchronization primitive.
*/
static inline struct page *compound_head_fast(struct page *page)
{
if (unlikely(PageTail(page)))
return page->first_page;
return page;
}
/* /*
* The atomic page->_mapcount, starts from -1: so that transitions * The atomic page->_mapcount, starts from -1: so that transitions
* both from it and to it can be tracked, using atomic_inc_and_test * both from it and to it can be tracked, using atomic_inc_and_test
...@@ -518,7 +478,7 @@ static inline void get_huge_page_tail(struct page *page) ...@@ -518,7 +478,7 @@ static inline void get_huge_page_tail(struct page *page)
VM_BUG_ON_PAGE(!PageTail(page), page); VM_BUG_ON_PAGE(!PageTail(page), page);
VM_BUG_ON_PAGE(page_mapcount(page) < 0, page); VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page); VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
if (compound_tail_refcounted(page->first_page)) if (compound_tail_refcounted(compound_head(page)))
atomic_inc(&page->_mapcount); atomic_inc(&page->_mapcount);
} }
...@@ -541,13 +501,7 @@ static inline struct page *virt_to_head_page(const void *x) ...@@ -541,13 +501,7 @@ static inline struct page *virt_to_head_page(const void *x)
{ {
struct page *page = virt_to_page(x); struct page *page = virt_to_page(x);
/* return compound_head(page);
* We don't need to worry about synchronization of tail flag
* when we call virt_to_head_page() since it is only called for
* already allocated page and this page won't be freed until
* this virt_to_head_page() is finished. So use _fast variant.
*/
return compound_head_fast(page);
} }
/* /*
...@@ -1586,8 +1540,7 @@ static inline bool ptlock_init(struct page *page) ...@@ -1586,8 +1540,7 @@ static inline bool ptlock_init(struct page *page)
* with 0. Make sure nobody took it in use in between. * with 0. Make sure nobody took it in use in between.
* *
* It can happen if arch try to use slab for page table allocation: * It can happen if arch try to use slab for page table allocation:
* slab code uses page->slab_cache and page->first_page (for tail * slab code uses page->slab_cache, which share storage with page->ptl.
* pages), which share storage with page->ptl.
*/ */
VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
if (!ptlock_alloc(page)) if (!ptlock_alloc(page))
......
...@@ -111,7 +111,13 @@ struct page { ...@@ -111,7 +111,13 @@ struct page {
}; };
}; };
/* Third double word block */ /*
* Third double word block
*
* WARNING: bit 0 of the first word encode PageTail(). That means
* the rest users of the storage space MUST NOT use the bit to
* avoid collision and false-positive PageTail().
*/
union { union {
struct list_head lru; /* Pageout list, eg. active_list struct list_head lru; /* Pageout list, eg. active_list
* protected by zone->lru_lock ! * protected by zone->lru_lock !
...@@ -132,14 +138,23 @@ struct page { ...@@ -132,14 +138,23 @@ struct page {
struct rcu_head rcu_head; /* Used by SLAB struct rcu_head rcu_head; /* Used by SLAB
* when destroying via RCU * when destroying via RCU
*/ */
/* First tail page of compound page */ /* Tail pages of compound page */
struct { struct {
unsigned long compound_head; /* If bit zero is set */
/* First tail page only */
unsigned short int compound_dtor; unsigned short int compound_dtor;
unsigned short int compound_order; unsigned short int compound_order;
}; };
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS
pgtable_t pmd_huge_pte; /* protected by page->ptl */ struct {
unsigned long __pad; /* do not overlay pmd_huge_pte
* with compound_head to avoid
* possible bit 0 collision.
*/
pgtable_t pmd_huge_pte; /* protected by page->ptl */
};
#endif #endif
}; };
...@@ -160,7 +175,6 @@ struct page { ...@@ -160,7 +175,6 @@ struct page {
#endif #endif
#endif #endif
struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */ struct kmem_cache *slab_cache; /* SL[AU]B: Pointer to slab */
struct page *first_page; /* Compound tail pages */
}; };
#ifdef CONFIG_MEMCG #ifdef CONFIG_MEMCG
......
...@@ -86,12 +86,7 @@ enum pageflags { ...@@ -86,12 +86,7 @@ enum pageflags {
PG_private, /* If pagecache, has fs-private data */ PG_private, /* If pagecache, has fs-private data */
PG_private_2, /* If pagecache, has fs aux data */ PG_private_2, /* If pagecache, has fs aux data */
PG_writeback, /* Page is under writeback */ PG_writeback, /* Page is under writeback */
#ifdef CONFIG_PAGEFLAGS_EXTENDED
PG_head, /* A head page */ PG_head, /* A head page */
PG_tail, /* A tail page */
#else
PG_compound, /* A compound page */
#endif
PG_swapcache, /* Swap page: swp_entry_t in private */ PG_swapcache, /* Swap page: swp_entry_t in private */
PG_mappedtodisk, /* Has blocks allocated on-disk */ PG_mappedtodisk, /* Has blocks allocated on-disk */
PG_reclaim, /* To be reclaimed asap */ PG_reclaim, /* To be reclaimed asap */
...@@ -398,85 +393,46 @@ static inline void set_page_writeback_keepwrite(struct page *page) ...@@ -398,85 +393,46 @@ static inline void set_page_writeback_keepwrite(struct page *page)
test_set_page_writeback_keepwrite(page); test_set_page_writeback_keepwrite(page);
} }
#ifdef CONFIG_PAGEFLAGS_EXTENDED
/*
* System with lots of page flags available. This allows separate
* flags for PageHead() and PageTail() checks of compound pages so that bit
* tests can be used in performance sensitive paths. PageCompound is
* generally not used in hot code paths except arch/powerpc/mm/init_64.c
* and arch/powerpc/kvm/book3s_64_vio_hv.c which use it to detect huge pages
* and avoid handling those in real mode.
*/
__PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head) __PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head)
__PAGEFLAG(Tail, tail)
static inline int PageCompound(struct page *page) static inline int PageTail(struct page *page)
{
return page->flags & ((1L << PG_head) | (1L << PG_tail));
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline void ClearPageCompound(struct page *page)
{ {
BUG_ON(!PageHead(page)); return READ_ONCE(page->compound_head) & 1;
ClearPageHead(page);
} }
#endif
#define PG_head_mask ((1L << PG_head))
#else static inline void set_compound_head(struct page *page, struct page *head)
/*
* Reduce page flag use as much as possible by overlapping
* compound page flags with the flags used for page cache pages. Possible
* because PageCompound is always set for compound pages and not for
* pages on the LRU and/or pagecache.
*/
TESTPAGEFLAG(Compound, compound)
__SETPAGEFLAG(Head, compound) __CLEARPAGEFLAG(Head, compound)
/*
* PG_reclaim is used in combination with PG_compound to mark the
* head and tail of a compound page. This saves one page flag
* but makes it impossible to use compound pages for the page cache.
* The PG_reclaim bit would have to be used for reclaim or readahead
* if compound pages enter the page cache.
*
* PG_compound & PG_reclaim => Tail page
* PG_compound & ~PG_reclaim => Head page
*/
#define PG_head_mask ((1L << PG_compound))
#define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
static inline int PageHead(struct page *page)
{ {
return ((page->flags & PG_head_tail_mask) == PG_head_mask); WRITE_ONCE(page->compound_head, (unsigned long)head + 1);
} }
static inline int PageTail(struct page *page) static inline void clear_compound_head(struct page *page)
{ {
return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask); WRITE_ONCE(page->compound_head, 0);
} }
static inline void __SetPageTail(struct page *page) static inline struct page *compound_head(struct page *page)
{ {
page->flags |= PG_head_tail_mask; unsigned long head = READ_ONCE(page->compound_head);
if (unlikely(head & 1))
return (struct page *) (head - 1);
return page;
} }
static inline void __ClearPageTail(struct page *page) static inline int PageCompound(struct page *page)
{ {
page->flags &= ~PG_head_tail_mask; return PageHead(page) || PageTail(page);
}
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline void ClearPageCompound(struct page *page) static inline void ClearPageCompound(struct page *page)
{ {
BUG_ON((page->flags & PG_head_tail_mask) != (1 << PG_compound)); BUG_ON(!PageHead(page));
clear_bit(PG_compound, &page->flags); ClearPageHead(page);
} }
#endif #endif
#endif /* !PAGEFLAGS_EXTENDED */ #define PG_head_mask ((1L << PG_head))
#ifdef CONFIG_HUGETLB_PAGE #ifdef CONFIG_HUGETLB_PAGE
int PageHuge(struct page *page); int PageHuge(struct page *page);
......
...@@ -200,18 +200,6 @@ config MEMORY_HOTREMOVE ...@@ -200,18 +200,6 @@ config MEMORY_HOTREMOVE
depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE depends on MEMORY_HOTPLUG && ARCH_ENABLE_MEMORY_HOTREMOVE
depends on MIGRATION depends on MIGRATION
#
# If we have space for more page flags then we can enable additional
# optimizations and functionality.
#
# Regular Sparsemem takes page flag bits for the sectionid if it does not
# use a virtual memmap. Disable extended page flags for 32 bit platforms
# that require the use of a sectionid in the page flags.
#
config PAGEFLAGS_EXTENDED
def_bool y
depends on 64BIT || SPARSEMEM_VMEMMAP || !SPARSEMEM
# Heavily threaded applications may benefit from splitting the mm-wide # Heavily threaded applications may benefit from splitting the mm-wide
# page_table_lock, so that faults on different parts of the user address # page_table_lock, so that faults on different parts of the user address
# space can be handled with less contention: split it at this NR_CPUS. # space can be handled with less contention: split it at this NR_CPUS.
......
...@@ -25,12 +25,7 @@ static const struct trace_print_flags pageflag_names[] = { ...@@ -25,12 +25,7 @@ static const struct trace_print_flags pageflag_names[] = {
{1UL << PG_private, "private" }, {1UL << PG_private, "private" },
{1UL << PG_private_2, "private_2" }, {1UL << PG_private_2, "private_2" },
{1UL << PG_writeback, "writeback" }, {1UL << PG_writeback, "writeback" },
#ifdef CONFIG_PAGEFLAGS_EXTENDED
{1UL << PG_head, "head" }, {1UL << PG_head, "head" },
{1UL << PG_tail, "tail" },
#else
{1UL << PG_compound, "compound" },
#endif
{1UL << PG_swapcache, "swapcache" }, {1UL << PG_swapcache, "swapcache" },
{1UL << PG_mappedtodisk, "mappedtodisk" }, {1UL << PG_mappedtodisk, "mappedtodisk" },
{1UL << PG_reclaim, "reclaim" }, {1UL << PG_reclaim, "reclaim" },
......
...@@ -1755,8 +1755,7 @@ static void __split_huge_page_refcount(struct page *page, ...@@ -1755,8 +1755,7 @@ static void __split_huge_page_refcount(struct page *page,
(1L << PG_unevictable))); (1L << PG_unevictable)));
page_tail->flags |= (1L << PG_dirty); page_tail->flags |= (1L << PG_dirty);
/* clear PageTail before overwriting first_page */ clear_compound_head(page_tail);
smp_wmb();
if (page_is_young(page)) if (page_is_young(page))
set_page_young(page_tail); set_page_young(page_tail);
......
...@@ -1001,9 +1001,8 @@ static void destroy_compound_gigantic_page(struct page *page, ...@@ -1001,9 +1001,8 @@ static void destroy_compound_gigantic_page(struct page *page,
struct page *p = page + 1; struct page *p = page + 1;
for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) { for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
__ClearPageTail(p); clear_compound_head(p);
set_page_refcounted(p); set_page_refcounted(p);
p->first_page = NULL;
} }
set_compound_order(page, 0); set_compound_order(page, 0);
...@@ -1276,10 +1275,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order) ...@@ -1276,10 +1275,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned long order)
*/ */
__ClearPageReserved(p); __ClearPageReserved(p);
set_page_count(p, 0); set_page_count(p, 0);
p->first_page = page; set_compound_head(p, page);
/* Make sure p->first_page is always valid for PageTail() */
smp_wmb();
__SetPageTail(p);
} }
} }
......
...@@ -385,7 +385,7 @@ void __init hugetlb_cgroup_file_init(void) ...@@ -385,7 +385,7 @@ void __init hugetlb_cgroup_file_init(void)
/* /*
* Add cgroup control files only if the huge page consists * Add cgroup control files only if the huge page consists
* of more than two normal pages. This is because we use * of more than two normal pages. This is because we use
* page[2].lru.next for storing cgroup details. * page[2].private for storing cgroup details.
*/ */
if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER) if (huge_page_order(h) >= HUGETLB_CGROUP_MIN_ORDER)
__hugetlb_cgroup_file_init(hstate_index(h)); __hugetlb_cgroup_file_init(hstate_index(h));
......
...@@ -80,9 +80,9 @@ static inline void __get_page_tail_foll(struct page *page, ...@@ -80,9 +80,9 @@ static inline void __get_page_tail_foll(struct page *page,
* speculative page access (like in * speculative page access (like in
* page_cache_get_speculative()) on tail pages. * page_cache_get_speculative()) on tail pages.
*/ */
VM_BUG_ON_PAGE(atomic_read(&page->first_page->_count) <= 0, page); VM_BUG_ON_PAGE(atomic_read(&compound_head(page)->_count) <= 0, page);
if (get_page_head) if (get_page_head)
atomic_inc(&page->first_page->_count); atomic_inc(&compound_head(page)->_count);
get_huge_page_tail(page); get_huge_page_tail(page);
} }
......
...@@ -776,8 +776,6 @@ static int me_huge_page(struct page *p, unsigned long pfn) ...@@ -776,8 +776,6 @@ static int me_huge_page(struct page *p, unsigned long pfn)
#define lru (1UL << PG_lru) #define lru (1UL << PG_lru)
#define swapbacked (1UL << PG_swapbacked) #define swapbacked (1UL << PG_swapbacked)
#define head (1UL << PG_head) #define head (1UL << PG_head)
#define tail (1UL << PG_tail)
#define compound (1UL << PG_compound)
#define slab (1UL << PG_slab) #define slab (1UL << PG_slab)
#define reserved (1UL << PG_reserved) #define reserved (1UL << PG_reserved)
...@@ -800,12 +798,7 @@ static struct page_state { ...@@ -800,12 +798,7 @@ static struct page_state {
*/ */
{ slab, slab, MF_MSG_SLAB, me_kernel }, { slab, slab, MF_MSG_SLAB, me_kernel },
#ifdef CONFIG_PAGEFLAGS_EXTENDED
{ head, head, MF_MSG_HUGE, me_huge_page }, { head, head, MF_MSG_HUGE, me_huge_page },
{ tail, tail, MF_MSG_HUGE, me_huge_page },
#else
{ compound, compound, MF_MSG_HUGE, me_huge_page },
#endif
{ sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty }, { sc|dirty, sc|dirty, MF_MSG_DIRTY_SWAPCACHE, me_swapcache_dirty },
{ sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean }, { sc|dirty, sc, MF_MSG_CLEAN_SWAPCACHE, me_swapcache_clean },
......
...@@ -445,15 +445,15 @@ static void bad_page(struct page *page, const char *reason, ...@@ -445,15 +445,15 @@ static void bad_page(struct page *page, const char *reason,
/* /*
* Higher-order pages are called "compound pages". They are structured thusly: * Higher-order pages are called "compound pages". They are structured thusly:
* *
* The first PAGE_SIZE page is called the "head page". * The first PAGE_SIZE page is called the "head page" and have PG_head set.
* *
* The remaining PAGE_SIZE pages are called "tail pages". * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
* in bit 0 of page->compound_head. The rest of bits is pointer to head page.
* *
* All pages have PG_compound set. All tail pages have their ->first_page * The first tail page's ->compound_dtor holds the offset in array of compound
* pointing at the head page. * page destructors. See compound_page_dtors.
* *
* The first tail page's ->lru.next holds the address of the compound page's * The first tail page's ->compound_order holds the order of allocation.
* put_page() function. Its ->lru.prev holds the order of allocation.
* This usage means that zero-order pages may not be compound. * This usage means that zero-order pages may not be compound.
*/ */
...@@ -473,10 +473,7 @@ void prep_compound_page(struct page *page, unsigned long order) ...@@ -473,10 +473,7 @@ void prep_compound_page(struct page *page, unsigned long order)
for (i = 1; i < nr_pages; i++) { for (i = 1; i < nr_pages; i++) {
struct page *p = page + i; struct page *p = page + i;
set_page_count(p, 0); set_page_count(p, 0);
p->first_page = page; set_compound_head(p, page);
/* Make sure p->first_page is always valid for PageTail() */
smp_wmb();
__SetPageTail(p);
} }
} }
...@@ -854,17 +851,30 @@ static void free_one_page(struct zone *zone, ...@@ -854,17 +851,30 @@ static void free_one_page(struct zone *zone,
static int free_tail_pages_check(struct page *head_page, struct page *page) static int free_tail_pages_check(struct page *head_page, struct page *page)
{ {
if (!IS_ENABLED(CONFIG_DEBUG_VM)) int ret = 1;
return 0;
/*
* We rely page->lru.next never has bit 0 set, unless the page
* is PageTail(). Let's make sure that's true even for poisoned ->lru.
*/
BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
ret = 0;
goto out;
}
if (unlikely(!PageTail(page))) { if (unlikely(!PageTail(page))) {
bad_page(page, "PageTail not set", 0); bad_page(page, "PageTail not set", 0);
return 1; goto out;
} }
if (unlikely(page->first_page != head_page)) { if (unlikely(compound_head(page) != head_page)) {
bad_page(page, "first_page not consistent", 0); bad_page(page, "compound_head not consistent", 0);
return 1; goto out;
} }
return 0; ret = 0;
out:
clear_compound_head(page);
return ret;
} }
static void __meminit __init_single_page(struct page *page, unsigned long pfn, static void __meminit __init_single_page(struct page *page, unsigned long pfn,
...@@ -931,6 +941,10 @@ void __meminit reserve_bootmem_region(unsigned long start, unsigned long end) ...@@ -931,6 +941,10 @@ void __meminit reserve_bootmem_region(unsigned long start, unsigned long end)
struct page *page = pfn_to_page(start_pfn); struct page *page = pfn_to_page(start_pfn);
init_reserved_page(start_pfn); init_reserved_page(start_pfn);
/* Avoid false-positive PageTail() */
INIT_LIST_HEAD(&page->lru);
SetPageReserved(page); SetPageReserved(page);
} }
} }
......
...@@ -201,7 +201,7 @@ void put_refcounted_compound_page(struct page *page_head, struct page *page) ...@@ -201,7 +201,7 @@ void put_refcounted_compound_page(struct page *page_head, struct page *page)
__put_single_page(page); __put_single_page(page);
return; return;
} }
VM_BUG_ON_PAGE(page_head != page->first_page, page); VM_BUG_ON_PAGE(page_head != compound_head(page), page);
/* /*
* We can release the refcount taken by * We can release the refcount taken by
* get_page_unless_zero() now that * get_page_unless_zero() now that
...@@ -262,7 +262,7 @@ static void put_compound_page(struct page *page) ...@@ -262,7 +262,7 @@ static void put_compound_page(struct page *page)
* Case 3 is possible, as we may race with * Case 3 is possible, as we may race with
* __split_huge_page_refcount tearing down a THP page. * __split_huge_page_refcount tearing down a THP page.
*/ */
page_head = compound_head_by_tail(page); page_head = compound_head(page);
if (!__compound_tail_refcounted(page_head)) if (!__compound_tail_refcounted(page_head))
put_unrefcounted_compound_page(page_head, page); put_unrefcounted_compound_page(page_head, page);
else else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment