Commit 12d9986b authored by Martin J. Bligh's avatar Martin J. Bligh Committed by Linus Torvalds

[PATCH] make __free_pages_bulk more comprehensible

I find __free_pages_bulk very hard to understand ...  (I was trying to mod
it for the non MAX_ORDER aligned zones, and cleaned it up first).  This
should make it much more comprehensible to mortal man ...  I benchmarked
the changes on the big 16x and it's no slower (actually it's about 0.5%
faster, but that's within experimental error).

I moved the creation of mask into __free_pages_bulk from the caller - it
seems to really belong inside there.  Then instead of doing wierd limbo
dances with mask, I made it use order instead where it's more intuitive.
Personally I find this makes the whole thing a damned sight easier to
understand.
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent dd47df98
......@@ -179,20 +179,20 @@ static void destroy_compound_page(struct page *page, unsigned long order)
*/
static inline void __free_pages_bulk (struct page *page, struct page *base,
struct zone *zone, struct free_area *area, unsigned long mask,
unsigned int order)
struct zone *zone, struct free_area *area, unsigned int order)
{
unsigned long page_idx, index;
unsigned long page_idx, index, mask;
if (order)
destroy_compound_page(page, order);
mask = (~0UL) << order;
page_idx = page - base;
if (page_idx & ~mask)
BUG();
index = page_idx >> (1 + order);
zone->free_pages -= mask;
while (mask + (1 << (MAX_ORDER-1))) {
zone->free_pages += 1 << order;
while (order < MAX_ORDER-1) {
struct page *buddy1, *buddy2;
BUG_ON(area >= zone->free_area + MAX_ORDER);
......@@ -201,17 +201,15 @@ static inline void __free_pages_bulk (struct page *page, struct page *base,
* the buddy page is still allocated.
*/
break;
/*
* Move the buddy up one level.
* This code is taking advantage of the identity:
* -mask = 1+~mask
*/
buddy1 = base + (page_idx ^ -mask);
/* Move the buddy up one level. */
buddy1 = base + (page_idx ^ (1 << order));
buddy2 = base + page_idx;
BUG_ON(bad_range(zone, buddy1));
BUG_ON(bad_range(zone, buddy2));
list_del(&buddy1->lru);
mask <<= 1;
order++;
area++;
index >>= 1;
page_idx &= mask;
......@@ -255,12 +253,11 @@ static int
free_pages_bulk(struct zone *zone, int count,
struct list_head *list, unsigned int order)
{
unsigned long mask, flags;
unsigned long flags;
struct free_area *area;
struct page *base, *page = NULL;
int ret = 0;
mask = (~0UL) << order;
base = zone->zone_mem_map;
area = zone->free_area + order;
spin_lock_irqsave(&zone->lock, flags);
......@@ -270,7 +267,7 @@ free_pages_bulk(struct zone *zone, int count,
page = list_entry(list->prev, struct page, lru);
/* have to delete it as __free_pages_bulk list manipulates */
list_del(&page->lru);
__free_pages_bulk(page, base, zone, area, mask, order);
__free_pages_bulk(page, base, zone, area, order);
ret++;
}
spin_unlock_irqrestore(&zone->lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment