Commit 9958e810 authored by Yinghai Lu's avatar Yinghai Lu Committed by Ingo Molnar

x86: max_low_pfn_mapped fix, #3

optimization: try to merge the range with same page size in
init_memory_mapping, to get the best possible linear mappings set up.

thus when GBpages is not there, we could do 2M pages.
Signed-off-by: default avatarYinghai Lu <yhlu.kernel@gmail.com>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 965194c1
......@@ -763,6 +763,20 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
end_pfn = end>>PAGE_SHIFT;
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
/* try to merge same page size and continuous */
for (i = 0; nr_range > 1 && i < nr_range - 1; i++) {
unsigned long old_start;
if (mr[i].end != mr[i+1].start ||
mr[i].page_size_mask != mr[i+1].page_size_mask)
continue;
/* move it */
old_start = mr[i].start;
memmove(&mr[i], &mr[i+1],
(nr_range - 1 - i) * sizeof (struct map_range));
mr[i].start = old_start;
nr_range--;
}
for (i = 0; i < nr_range; i++)
printk(KERN_DEBUG " %010lx - %010lx page %s\n",
mr[i].start, mr[i].end,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment