Commit 719272c4 authored by Yinghai Lu's avatar Yinghai Lu Committed by H. Peter Anvin

x86, mm: only call early_ioremap_page_table_range_init() once

On 32bit, before patcheset that only set page table for ram, we only
call that one time.

Now, we are calling that during every init_memory_mapping if we have holes
under max_low_pfn.

We should only call it one time after all ranges under max_low_page get
mapped just like we did before.

Also that could avoid the risk to run out of pgt_buf in BRK.

Need to update page_table_range_init() to count the pages for kmap page table
at first, and use new added alloc_low_pages() to get pages in sequence.
That will conform to the requirement that pages need to be in low to high order.
Signed-off-by: default avatarYinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1353123563-3103-30-git-send-email-yinghai@kernel.orgSigned-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent ddd3509d
...@@ -343,14 +343,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, ...@@ -343,14 +343,6 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
mr[i].page_size_mask); mr[i].page_size_mask);
#ifdef CONFIG_X86_32
early_ioremap_page_table_range_init();
load_cr3(swapper_pg_dir);
#endif
__flush_tlb_all();
add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT); add_pfn_range_mapped(start >> PAGE_SHIFT, ret >> PAGE_SHIFT);
return ret >> PAGE_SHIFT; return ret >> PAGE_SHIFT;
...@@ -447,7 +439,12 @@ void __init init_mem_mapping(void) ...@@ -447,7 +439,12 @@ void __init init_mem_mapping(void)
/* can we preseve max_low_pfn ?*/ /* can we preseve max_low_pfn ?*/
max_low_pfn = max_pfn; max_low_pfn = max_pfn;
} }
#else
early_ioremap_page_table_range_init();
load_cr3(swapper_pg_dir);
__flush_tlb_all();
#endif #endif
early_memtest(0, max_pfn_mapped << PAGE_SHIFT); early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
} }
......
...@@ -135,8 +135,39 @@ pte_t * __init populate_extra_pte(unsigned long vaddr) ...@@ -135,8 +135,39 @@ pte_t * __init populate_extra_pte(unsigned long vaddr)
return one_page_table_init(pmd) + pte_idx; return one_page_table_init(pmd) + pte_idx;
} }
static unsigned long __init
page_table_range_init_count(unsigned long start, unsigned long end)
{
unsigned long count = 0;
#ifdef CONFIG_HIGHMEM
int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
int pgd_idx, pmd_idx;
unsigned long vaddr;
if (pmd_idx_kmap_begin == pmd_idx_kmap_end)
return 0;
vaddr = start;
pgd_idx = pgd_index(vaddr);
for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
pmd_idx++) {
if ((vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin &&
(vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end)
count++;
vaddr += PMD_SIZE;
}
pmd_idx = 0;
}
#endif
return count;
}
static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
unsigned long vaddr, pte_t *lastpte) unsigned long vaddr, pte_t *lastpte,
void **adr)
{ {
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
/* /*
...@@ -150,16 +181,15 @@ static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, ...@@ -150,16 +181,15 @@ static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
if (pmd_idx_kmap_begin != pmd_idx_kmap_end if (pmd_idx_kmap_begin != pmd_idx_kmap_end
&& (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
&& (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) {
&& ((__pa(pte) >> PAGE_SHIFT) < pgt_buf_start
|| (__pa(pte) >> PAGE_SHIFT) >= pgt_buf_end)) {
pte_t *newpte; pte_t *newpte;
int i; int i;
BUG_ON(after_bootmem); BUG_ON(after_bootmem);
newpte = alloc_low_page(); newpte = *adr;
for (i = 0; i < PTRS_PER_PTE; i++) for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(newpte + i, pte[i]); set_pte(newpte + i, pte[i]);
*adr = (void *)(((unsigned long)(*adr)) + PAGE_SIZE);
paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
...@@ -193,6 +223,11 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) ...@@ -193,6 +223,11 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
pgd_t *pgd; pgd_t *pgd;
pmd_t *pmd; pmd_t *pmd;
pte_t *pte = NULL; pte_t *pte = NULL;
unsigned long count = page_table_range_init_count(start, end);
void *adr = NULL;
if (count)
adr = alloc_low_pages(count);
vaddr = start; vaddr = start;
pgd_idx = pgd_index(vaddr); pgd_idx = pgd_index(vaddr);
...@@ -205,7 +240,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) ...@@ -205,7 +240,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
pmd++, pmd_idx++) { pmd++, pmd_idx++) {
pte = page_table_kmap_check(one_page_table_init(pmd), pte = page_table_kmap_check(one_page_table_init(pmd),
pmd, vaddr, pte); pmd, vaddr, pte, &adr);
vaddr += PMD_SIZE; vaddr += PMD_SIZE;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment