Commit fa83523f authored by John Dykstra's avatar John Dykstra Committed by Ingo Molnar

x86/mm/pat: Improve scaling of pat_pagerange_is_ram()

Function pat_pagerange_is_ram() scales poorly to large address
ranges, because it probes the resource tree for each page.

On a 2.6 GHz Opteron, this function consumes 34 ms for a 1 GB range.

It is called twice during untrack_pfn_vma(), slowing process
cleanup and handicapping the OOM killer.

This replacement consumes less than 1ms, under the same conditions.

Signed-off-by: John Dykstra <jdykstra@cray.com> on behalf of Cray Inc.
Acked-by: default avatarSuresh Siddha <suresh.b.siddha@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1337980366.1979.6.camel@redwood
[ Small stylistic cleanups and renames ]
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 1b38a3a1
...@@ -158,13 +158,31 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type) ...@@ -158,13 +158,31 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
return req_type; return req_type;
} }
struct pagerange_state {
unsigned long cur_pfn;
int ram;
int not_ram;
};
static int
pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
{
struct pagerange_state *state = arg;
state->not_ram |= initial_pfn > state->cur_pfn;
state->ram |= total_nr_pages > 0;
state->cur_pfn = initial_pfn + total_nr_pages;
return state->ram && state->not_ram;
}
static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end) static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
{ {
int ram_page = 0, not_rampage = 0; int ret = 0;
unsigned long page_nr; unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
struct pagerange_state state = {start_pfn, 0, 0};
for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
++page_nr) {
/* /*
* For legacy reasons, physical address range in the legacy ISA * For legacy reasons, physical address range in the legacy ISA
* region is tracked as non-RAM. This will allow users of * region is tracked as non-RAM. This will allow users of
...@@ -172,17 +190,15 @@ static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end) ...@@ -172,17 +190,15 @@ static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
* some of those portions are listed(or not even listed) with * some of those portions are listed(or not even listed) with
* different e820 types(RAM/reserved/..) * different e820 types(RAM/reserved/..)
*/ */
if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) && if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
page_is_ram(page_nr)) start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;
ram_page = 1;
else
not_rampage = 1;
if (ram_page == not_rampage) if (start_pfn < end_pfn) {
return -1; ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
&state, pagerange_is_ram_callback);
} }
return ram_page; return (ret > 0) ? -1 : (state.ram ? 1 : 0);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment