Commit 4504a57e authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] alloc_pages cleanup

Cleanup patch from Martin Bligh: convert some loops which want to be
`for' loops into that, and add some commentary.
parent 15a37ba2
...@@ -323,22 +323,23 @@ balance_classzone(zone_t * classzone, unsigned int gfp_mask, ...@@ -323,22 +323,23 @@ balance_classzone(zone_t * classzone, unsigned int gfp_mask,
struct page * __alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_t *zonelist) struct page * __alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_t *zonelist)
{ {
unsigned long min; unsigned long min;
zone_t **zone, * classzone; zone_t **zones, *classzone;
struct page * page; struct page * page;
int freed; int freed, i;
KERNEL_STAT_ADD(pgalloc, 1<<order); KERNEL_STAT_ADD(pgalloc, 1<<order);
zone = zonelist->zones; zones = zonelist->zones; /* the list of zones suitable for gfp_mask */
classzone = *zone; classzone = zones[0];
if (classzone == NULL) if (classzone == NULL) /* no zones in the zonelist */
return NULL; return NULL;
/* Go through the zonelist once, looking for a zone with enough free */
min = 1UL << order; min = 1UL << order;
for (;;) { for (i = 0; zones[i] != NULL; i++) {
zone_t *z = *(zone++); zone_t *z = zones[i];
if (!z)
break;
/* the incremental min is allegedly to discourage fallback */
min += z->pages_low; min += z->pages_low;
if (z->free_pages > min) { if (z->free_pages > min) {
page = rmqueue(z, order); page = rmqueue(z, order);
...@@ -349,16 +350,15 @@ struct page * __alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_ ...@@ -349,16 +350,15 @@ struct page * __alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_
classzone->need_balance = 1; classzone->need_balance = 1;
mb(); mb();
/* we're somewhat low on memory, failed to find what we needed */
if (waitqueue_active(&kswapd_wait)) if (waitqueue_active(&kswapd_wait))
wake_up_interruptible(&kswapd_wait); wake_up_interruptible(&kswapd_wait);
zone = zonelist->zones; /* Go through the zonelist again, taking __GFP_HIGH into account */
min = 1UL << order; min = 1UL << order;
for (;;) { for (i = 0; zones[i] != NULL; i++) {
unsigned long local_min; unsigned long local_min;
zone_t *z = *(zone++); zone_t *z = zones[i];
if (!z)
break;
local_min = z->pages_min; local_min = z->pages_min;
if (gfp_mask & __GFP_HIGH) if (gfp_mask & __GFP_HIGH)
...@@ -375,11 +375,9 @@ struct page * __alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_ ...@@ -375,11 +375,9 @@ struct page * __alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_
rebalance: rebalance:
if (current->flags & (PF_MEMALLOC | PF_MEMDIE)) { if (current->flags & (PF_MEMALLOC | PF_MEMDIE)) {
zone = zonelist->zones; /* go through the zonelist yet again, ignoring mins */
for (;;) { for (i = 0; zones[i] != NULL; i++) {
zone_t *z = *(zone++); zone_t *z = zones[i];
if (!z)
break;
page = rmqueue(z, order); page = rmqueue(z, order);
if (page) if (page)
...@@ -403,12 +401,10 @@ struct page * __alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_ ...@@ -403,12 +401,10 @@ struct page * __alloc_pages(unsigned int gfp_mask, unsigned int order, zonelist_
if (page) if (page)
return page; return page;
zone = zonelist->zones; /* go through the zonelist yet one more time */
min = 1UL << order; min = 1UL << order;
for (;;) { for (i = 0; zones[i] != NULL; i++) {
zone_t *z = *(zone++); zone_t *z = zones[i];
if (!z)
break;
min += z->pages_min; min += z->pages_min;
if (z->free_pages > min) { if (z->free_pages > min) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment