Commit 7a7039ee authored by Chris Metcalf's avatar Chris Metcalf

arch/tile: fix bug in loading kernels larger than 16 MB

Previously we only handled kernels up to a single huge page in size.
Now we create additional PTEs appropriately.
Signed-off-by: default avatarChris Metcalf <cmetcalf@tilera.com>
parent b230ff2d
...@@ -557,6 +557,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) ...@@ -557,6 +557,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
address = MEM_SV_INTRPT; address = MEM_SV_INTRPT;
pmd = get_pmd(pgtables, address); pmd = get_pmd(pgtables, address);
pfn = 0; /* code starts at PA 0 */
if (ktext_small) { if (ktext_small) {
/* Allocate an L2 PTE for the kernel text */ /* Allocate an L2 PTE for the kernel text */
int cpu = 0; int cpu = 0;
...@@ -579,10 +580,15 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) ...@@ -579,10 +580,15 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
} }
BUG_ON(address != (unsigned long)_stext); BUG_ON(address != (unsigned long)_stext);
pfn = 0; /* code starts at PA 0 */ pte = NULL;
pte = alloc_pte(); for (; address < (unsigned long)_einittext;
for (pte_ofs = 0; address < (unsigned long)_einittext; pfn++, address += PAGE_SIZE) {
pfn++, pte_ofs++, address += PAGE_SIZE) { pte_ofs = pte_index(address);
if (pte_ofs == 0) {
if (pte)
assign_pte(pmd++, pte);
pte = alloc_pte();
}
if (!ktext_local) { if (!ktext_local) {
prot = set_remote_cache_cpu(prot, cpu); prot = set_remote_cache_cpu(prot, cpu);
cpu = cpumask_next(cpu, &ktext_mask); cpu = cpumask_next(cpu, &ktext_mask);
...@@ -591,7 +597,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) ...@@ -591,7 +597,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
} }
pte[pte_ofs] = pfn_pte(pfn, prot); pte[pte_ofs] = pfn_pte(pfn, prot);
} }
assign_pte(pmd, pte); if (pte)
assign_pte(pmd, pte);
} else { } else {
pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC); pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
pteval = pte_mkhuge(pteval); pteval = pte_mkhuge(pteval);
...@@ -614,7 +621,9 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base) ...@@ -614,7 +621,9 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
else else
pteval = hv_pte_set_mode(pteval, pteval = hv_pte_set_mode(pteval,
HV_PTE_MODE_CACHE_NO_L3); HV_PTE_MODE_CACHE_NO_L3);
*(pte_t *)pmd = pteval; for (; address < (unsigned long)_einittext;
pfn += PFN_DOWN(HPAGE_SIZE), address += HPAGE_SIZE)
*(pte_t *)(pmd++) = pfn_pte(pfn, pteval);
} }
/* Set swapper_pgprot here so it is flushed to memory right away. */ /* Set swapper_pgprot here so it is flushed to memory right away. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment