Commit 725d704e authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds

[PATCH] mm: VM_BUG_ON

Introduce a VM_BUG_ON, which is turned on with CONFIG_DEBUG_VM.  Use this
in the lightweight, inline refcounting functions; PageLRU and PageActive
checks in vmscan, because they're pretty well confined to vmscan.  And in
page allocate/free fastpaths which can be the hottest parts of the kernel
for kbuilds.

Unlike BUG_ON, VM_BUG_ON must not be used to execute statements with
side-effects, and should not be used outside core mm code.
Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: Christoph Lameter <clameter@engr.sgi.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent a6ca1b99
...@@ -278,6 +278,12 @@ struct page { ...@@ -278,6 +278,12 @@ struct page {
*/ */
#include <linux/page-flags.h> #include <linux/page-flags.h>
#ifdef CONFIG_DEBUG_VM
#define VM_BUG_ON(cond) BUG_ON(cond)
#else
#define VM_BUG_ON(condition) do { } while(0)
#endif
/* /*
* Methods to modify the page usage count. * Methods to modify the page usage count.
* *
...@@ -297,7 +303,7 @@ struct page { ...@@ -297,7 +303,7 @@ struct page {
*/ */
static inline int put_page_testzero(struct page *page) static inline int put_page_testzero(struct page *page)
{ {
BUG_ON(atomic_read(&page->_count) == 0); VM_BUG_ON(atomic_read(&page->_count) == 0);
return atomic_dec_and_test(&page->_count); return atomic_dec_and_test(&page->_count);
} }
...@@ -307,6 +313,7 @@ static inline int put_page_testzero(struct page *page) ...@@ -307,6 +313,7 @@ static inline int put_page_testzero(struct page *page)
*/ */
static inline int get_page_unless_zero(struct page *page) static inline int get_page_unless_zero(struct page *page)
{ {
VM_BUG_ON(PageCompound(page));
return atomic_inc_not_zero(&page->_count); return atomic_inc_not_zero(&page->_count);
} }
...@@ -323,6 +330,7 @@ static inline void get_page(struct page *page) ...@@ -323,6 +330,7 @@ static inline void get_page(struct page *page)
{ {
if (unlikely(PageCompound(page))) if (unlikely(PageCompound(page)))
page = (struct page *)page_private(page); page = (struct page *)page_private(page);
VM_BUG_ON(atomic_read(&page->_count) == 0);
atomic_inc(&page->_count); atomic_inc(&page->_count);
} }
......
...@@ -24,8 +24,8 @@ static inline void set_page_count(struct page *page, int v) ...@@ -24,8 +24,8 @@ static inline void set_page_count(struct page *page, int v)
*/ */
static inline void set_page_refcounted(struct page *page) static inline void set_page_refcounted(struct page *page)
{ {
BUG_ON(PageCompound(page) && page_private(page) != (unsigned long)page); VM_BUG_ON(PageCompound(page) && page_private(page) != (unsigned long)page);
BUG_ON(atomic_read(&page->_count)); VM_BUG_ON(atomic_read(&page->_count));
set_page_count(page, 1); set_page_count(page, 1);
} }
......
...@@ -127,7 +127,6 @@ static int bad_range(struct zone *zone, struct page *page) ...@@ -127,7 +127,6 @@ static int bad_range(struct zone *zone, struct page *page)
return 0; return 0;
} }
#else #else
static inline int bad_range(struct zone *zone, struct page *page) static inline int bad_range(struct zone *zone, struct page *page)
{ {
...@@ -218,12 +217,12 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags) ...@@ -218,12 +217,12 @@ static inline void prep_zero_page(struct page *page, int order, gfp_t gfp_flags)
{ {
int i; int i;
BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM); VM_BUG_ON((gfp_flags & (__GFP_WAIT | __GFP_HIGHMEM)) == __GFP_HIGHMEM);
/* /*
* clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO * clear_highpage() will use KM_USER0, so it's a bug to use __GFP_ZERO
* and __GFP_HIGHMEM from hard or soft interrupt context. * and __GFP_HIGHMEM from hard or soft interrupt context.
*/ */
BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt()); VM_BUG_ON((gfp_flags & __GFP_HIGHMEM) && in_interrupt());
for (i = 0; i < (1 << order); i++) for (i = 0; i < (1 << order); i++)
clear_highpage(page + i); clear_highpage(page + i);
} }
...@@ -347,8 +346,8 @@ static inline void __free_one_page(struct page *page, ...@@ -347,8 +346,8 @@ static inline void __free_one_page(struct page *page,
page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1); page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
BUG_ON(page_idx & (order_size - 1)); VM_BUG_ON(page_idx & (order_size - 1));
BUG_ON(bad_range(zone, page)); VM_BUG_ON(bad_range(zone, page));
zone->free_pages += order_size; zone->free_pages += order_size;
while (order < MAX_ORDER-1) { while (order < MAX_ORDER-1) {
...@@ -421,7 +420,7 @@ static void free_pages_bulk(struct zone *zone, int count, ...@@ -421,7 +420,7 @@ static void free_pages_bulk(struct zone *zone, int count,
while (count--) { while (count--) {
struct page *page; struct page *page;
BUG_ON(list_empty(list)); VM_BUG_ON(list_empty(list));
page = list_entry(list->prev, struct page, lru); page = list_entry(list->prev, struct page, lru);
/* have to delete it as __free_one_page list manipulates */ /* have to delete it as __free_one_page list manipulates */
list_del(&page->lru); list_del(&page->lru);
...@@ -512,7 +511,7 @@ static inline void expand(struct zone *zone, struct page *page, ...@@ -512,7 +511,7 @@ static inline void expand(struct zone *zone, struct page *page,
area--; area--;
high--; high--;
size >>= 1; size >>= 1;
BUG_ON(bad_range(zone, &page[size])); VM_BUG_ON(bad_range(zone, &page[size]));
list_add(&page[size].lru, &area->free_list); list_add(&page[size].lru, &area->free_list);
area->nr_free++; area->nr_free++;
set_page_order(&page[size], high); set_page_order(&page[size], high);
...@@ -761,8 +760,8 @@ void split_page(struct page *page, unsigned int order) ...@@ -761,8 +760,8 @@ void split_page(struct page *page, unsigned int order)
{ {
int i; int i;
BUG_ON(PageCompound(page)); VM_BUG_ON(PageCompound(page));
BUG_ON(!page_count(page)); VM_BUG_ON(!page_count(page));
for (i = 1; i < (1 << order); i++) for (i = 1; i < (1 << order); i++)
set_page_refcounted(page + i); set_page_refcounted(page + i);
} }
...@@ -809,7 +808,7 @@ static struct page *buffered_rmqueue(struct zonelist *zonelist, ...@@ -809,7 +808,7 @@ static struct page *buffered_rmqueue(struct zonelist *zonelist,
local_irq_restore(flags); local_irq_restore(flags);
put_cpu(); put_cpu();
BUG_ON(bad_range(zone, page)); VM_BUG_ON(bad_range(zone, page));
if (prep_new_page(page, order, gfp_flags)) if (prep_new_page(page, order, gfp_flags))
goto again; goto again;
return page; return page;
...@@ -1083,7 +1082,7 @@ fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) ...@@ -1083,7 +1082,7 @@ fastcall unsigned long get_zeroed_page(gfp_t gfp_mask)
* get_zeroed_page() returns a 32-bit address, which cannot represent * get_zeroed_page() returns a 32-bit address, which cannot represent
* a highmem page * a highmem page
*/ */
BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0); VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);
page = alloc_pages(gfp_mask | __GFP_ZERO, 0); page = alloc_pages(gfp_mask | __GFP_ZERO, 0);
if (page) if (page)
...@@ -1116,7 +1115,7 @@ EXPORT_SYMBOL(__free_pages); ...@@ -1116,7 +1115,7 @@ EXPORT_SYMBOL(__free_pages);
fastcall void free_pages(unsigned long addr, unsigned int order) fastcall void free_pages(unsigned long addr, unsigned int order)
{ {
if (addr != 0) { if (addr != 0) {
BUG_ON(!virt_addr_valid((void *)addr)); VM_BUG_ON(!virt_addr_valid((void *)addr));
__free_pages(virt_to_page((void *)addr), order); __free_pages(virt_to_page((void *)addr), order);
} }
} }
......
...@@ -233,7 +233,7 @@ void fastcall __page_cache_release(struct page *page) ...@@ -233,7 +233,7 @@ void fastcall __page_cache_release(struct page *page)
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
spin_lock_irqsave(&zone->lru_lock, flags); spin_lock_irqsave(&zone->lru_lock, flags);
BUG_ON(!PageLRU(page)); VM_BUG_ON(!PageLRU(page));
__ClearPageLRU(page); __ClearPageLRU(page);
del_page_from_lru(zone, page); del_page_from_lru(zone, page);
spin_unlock_irqrestore(&zone->lru_lock, flags); spin_unlock_irqrestore(&zone->lru_lock, flags);
...@@ -284,7 +284,7 @@ void release_pages(struct page **pages, int nr, int cold) ...@@ -284,7 +284,7 @@ void release_pages(struct page **pages, int nr, int cold)
zone = pagezone; zone = pagezone;
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
} }
BUG_ON(!PageLRU(page)); VM_BUG_ON(!PageLRU(page));
__ClearPageLRU(page); __ClearPageLRU(page);
del_page_from_lru(zone, page); del_page_from_lru(zone, page);
} }
...@@ -337,7 +337,7 @@ void __pagevec_release_nonlru(struct pagevec *pvec) ...@@ -337,7 +337,7 @@ void __pagevec_release_nonlru(struct pagevec *pvec)
for (i = 0; i < pagevec_count(pvec); i++) { for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i]; struct page *page = pvec->pages[i];
BUG_ON(PageLRU(page)); VM_BUG_ON(PageLRU(page));
if (put_page_testzero(page)) if (put_page_testzero(page))
pagevec_add(&pages_to_free, page); pagevec_add(&pages_to_free, page);
} }
...@@ -364,7 +364,7 @@ void __pagevec_lru_add(struct pagevec *pvec) ...@@ -364,7 +364,7 @@ void __pagevec_lru_add(struct pagevec *pvec)
zone = pagezone; zone = pagezone;
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
} }
BUG_ON(PageLRU(page)); VM_BUG_ON(PageLRU(page));
SetPageLRU(page); SetPageLRU(page);
add_page_to_inactive_list(zone, page); add_page_to_inactive_list(zone, page);
} }
...@@ -391,9 +391,9 @@ void __pagevec_lru_add_active(struct pagevec *pvec) ...@@ -391,9 +391,9 @@ void __pagevec_lru_add_active(struct pagevec *pvec)
zone = pagezone; zone = pagezone;
spin_lock_irq(&zone->lru_lock); spin_lock_irq(&zone->lru_lock);
} }
BUG_ON(PageLRU(page)); VM_BUG_ON(PageLRU(page));
SetPageLRU(page); SetPageLRU(page);
BUG_ON(PageActive(page)); VM_BUG_ON(PageActive(page));
SetPageActive(page); SetPageActive(page);
add_page_to_active_list(zone, page); add_page_to_active_list(zone, page);
} }
......
...@@ -440,7 +440,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -440,7 +440,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (TestSetPageLocked(page)) if (TestSetPageLocked(page))
goto keep; goto keep;
BUG_ON(PageActive(page)); VM_BUG_ON(PageActive(page));
sc->nr_scanned++; sc->nr_scanned++;
...@@ -564,7 +564,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, ...@@ -564,7 +564,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
unlock_page(page); unlock_page(page);
keep: keep:
list_add(&page->lru, &ret_pages); list_add(&page->lru, &ret_pages);
BUG_ON(PageLRU(page)); VM_BUG_ON(PageLRU(page));
} }
list_splice(&ret_pages, page_list); list_splice(&ret_pages, page_list);
if (pagevec_count(&freed_pvec)) if (pagevec_count(&freed_pvec))
...@@ -603,7 +603,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, ...@@ -603,7 +603,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
page = lru_to_page(src); page = lru_to_page(src);
prefetchw_prev_lru_page(page, src, flags); prefetchw_prev_lru_page(page, src, flags);
BUG_ON(!PageLRU(page)); VM_BUG_ON(!PageLRU(page));
list_del(&page->lru); list_del(&page->lru);
target = src; target = src;
...@@ -674,7 +674,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, ...@@ -674,7 +674,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
*/ */
while (!list_empty(&page_list)) { while (!list_empty(&page_list)) {
page = lru_to_page(&page_list); page = lru_to_page(&page_list);
BUG_ON(PageLRU(page)); VM_BUG_ON(PageLRU(page));
SetPageLRU(page); SetPageLRU(page);
list_del(&page->lru); list_del(&page->lru);
if (PageActive(page)) if (PageActive(page))
...@@ -797,9 +797,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, ...@@ -797,9 +797,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
while (!list_empty(&l_inactive)) { while (!list_empty(&l_inactive)) {
page = lru_to_page(&l_inactive); page = lru_to_page(&l_inactive);
prefetchw_prev_lru_page(page, &l_inactive, flags); prefetchw_prev_lru_page(page, &l_inactive, flags);
BUG_ON(PageLRU(page)); VM_BUG_ON(PageLRU(page));
SetPageLRU(page); SetPageLRU(page);
BUG_ON(!PageActive(page)); VM_BUG_ON(!PageActive(page));
ClearPageActive(page); ClearPageActive(page);
list_move(&page->lru, &zone->inactive_list); list_move(&page->lru, &zone->inactive_list);
...@@ -827,9 +827,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, ...@@ -827,9 +827,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
while (!list_empty(&l_active)) { while (!list_empty(&l_active)) {
page = lru_to_page(&l_active); page = lru_to_page(&l_active);
prefetchw_prev_lru_page(page, &l_active, flags); prefetchw_prev_lru_page(page, &l_active, flags);
BUG_ON(PageLRU(page)); VM_BUG_ON(PageLRU(page));
SetPageLRU(page); SetPageLRU(page);
BUG_ON(!PageActive(page)); VM_BUG_ON(!PageActive(page));
list_move(&page->lru, &zone->active_list); list_move(&page->lru, &zone->active_list);
pgmoved++; pgmoved++;
if (!pagevec_add(&pvec, page)) { if (!pagevec_add(&pvec, page)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment