Commit a5f4c561 authored by Stefan Agner's avatar Stefan Agner Committed by Russell King

ARM: 8415/1: early fixmap support for earlycon

Add early fixmap support, initially to support permanent, fixed
mapping support for early console. A temporary, early pte is
created which is migrated to a permanent mapping in paging_init.
This is also needed since the attributes may change as the memory
types are initialized. The 3MiB range of fixmap spans two pte
tables, but currently only one pte is created for early fixmap
support.

Re-add FIX_KMAP_BEGIN to the index calculation in highmem.c since
the index for kmap does not start at zero anymore. This reverts
4221e2e6 ("ARM: 8031/1: fixmap: remove FIX_KMAP_BEGIN and
FIX_KMAP_END") to some extent.

Cc: Mark Salter <msalter@redhat.com>
Cc: Kees Cook <keescook@chromium.org>
Cc: Laura Abbott <lauraa@codeaurora.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: default avatarRob Herring <robh@kernel.org>
Signed-off-by: default avatarStefan Agner <stefan@agner.ch>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent efaa6e26
...@@ -188,6 +188,9 @@ config ARCH_HAS_ILOG2_U64 ...@@ -188,6 +188,9 @@ config ARCH_HAS_ILOG2_U64
config ARCH_HAS_BANDGAP config ARCH_HAS_BANDGAP
bool bool
config FIX_EARLYCON_MEM
def_bool y if MMU
config GENERIC_HWEIGHT config GENERIC_HWEIGHT
bool bool
default y default y
......
...@@ -6,9 +6,13 @@ ...@@ -6,9 +6,13 @@
#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE) #define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
#include <asm/kmap_types.h> #include <asm/kmap_types.h>
#include <asm/pgtable.h>
enum fixed_addresses { enum fixed_addresses {
FIX_KMAP_BEGIN, FIX_EARLYCON_MEM_BASE,
__end_of_permanent_fixed_addresses,
FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses,
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
/* Support writing RO kernel text via kprobes, jump labels, etc. */ /* Support writing RO kernel text via kprobes, jump labels, etc. */
...@@ -18,7 +22,16 @@ enum fixed_addresses { ...@@ -18,7 +22,16 @@ enum fixed_addresses {
__end_of_fixed_addresses __end_of_fixed_addresses
}; };
#define FIXMAP_PAGE_COMMON (L_PTE_YOUNG | L_PTE_PRESENT | L_PTE_XN | L_PTE_DIRTY)
#define FIXMAP_PAGE_NORMAL (FIXMAP_PAGE_COMMON | L_PTE_MT_WRITEBACK)
/* Used by set_fixmap_(io|nocache), both meant for mapping a device */
#define FIXMAP_PAGE_IO (FIXMAP_PAGE_COMMON | L_PTE_MT_DEV_SHARED | L_PTE_SHARED)
#define FIXMAP_PAGE_NOCACHE FIXMAP_PAGE_IO
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot); void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
void __init early_fixmap_init(void);
#include <asm-generic/fixmap.h> #include <asm-generic/fixmap.h>
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/elf.h> #include <asm/elf.h>
#include <asm/fixmap.h>
#include <asm/procinfo.h> #include <asm/procinfo.h>
#include <asm/psci.h> #include <asm/psci.h>
#include <asm/sections.h> #include <asm/sections.h>
...@@ -954,6 +955,9 @@ void __init setup_arch(char **cmdline_p) ...@@ -954,6 +955,9 @@ void __init setup_arch(char **cmdline_p)
strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = cmd_line; *cmdline_p = cmd_line;
if (IS_ENABLED(CONFIG_FIX_EARLYCON_MEM))
early_fixmap_init();
parse_early_param(); parse_early_param();
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
......
...@@ -79,7 +79,7 @@ void *kmap_atomic(struct page *page) ...@@ -79,7 +79,7 @@ void *kmap_atomic(struct page *page)
type = kmap_atomic_idx_push(); type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id(); idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(idx); vaddr = __fix_to_virt(idx);
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
/* /*
...@@ -106,7 +106,7 @@ void __kunmap_atomic(void *kvaddr) ...@@ -106,7 +106,7 @@ void __kunmap_atomic(void *kvaddr)
if (kvaddr >= (void *)FIXADDR_START) { if (kvaddr >= (void *)FIXADDR_START) {
type = kmap_atomic_idx(); type = kmap_atomic_idx();
idx = type + KM_TYPE_NR * smp_processor_id(); idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
if (cache_is_vivt()) if (cache_is_vivt())
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
...@@ -138,7 +138,7 @@ void *kmap_atomic_pfn(unsigned long pfn) ...@@ -138,7 +138,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
return page_address(page); return page_address(page);
type = kmap_atomic_idx_push(); type = kmap_atomic_idx_push();
idx = type + KM_TYPE_NR * smp_processor_id(); idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
vaddr = __fix_to_virt(idx); vaddr = __fix_to_virt(idx);
#ifdef CONFIG_DEBUG_HIGHMEM #ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(get_fixmap_pte(vaddr))); BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
......
...@@ -357,6 +357,47 @@ const struct mem_type *get_mem_type(unsigned int type) ...@@ -357,6 +357,47 @@ const struct mem_type *get_mem_type(unsigned int type)
} }
EXPORT_SYMBOL(get_mem_type); EXPORT_SYMBOL(get_mem_type);
static pte_t *(*pte_offset_fixmap)(pmd_t *dir, unsigned long addr);
static pte_t bm_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
__aligned(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE) __initdata;
static pte_t * __init pte_offset_early_fixmap(pmd_t *dir, unsigned long addr)
{
return &bm_pte[pte_index(addr)];
}
static pte_t *pte_offset_late_fixmap(pmd_t *dir, unsigned long addr)
{
return pte_offset_kernel(dir, addr);
}
static inline pmd_t * __init fixmap_pmd(unsigned long addr)
{
pgd_t *pgd = pgd_offset_k(addr);
pud_t *pud = pud_offset(pgd, addr);
pmd_t *pmd = pmd_offset(pud, addr);
return pmd;
}
void __init early_fixmap_init(void)
{
pmd_t *pmd;
/*
* The early fixmap range spans multiple pmds, for which
* we are not prepared:
*/
BUILD_BUG_ON((__fix_to_virt(__end_of_permanent_fixed_addresses) >> PMD_SHIFT)
!= FIXADDR_TOP >> PMD_SHIFT);
pmd = fixmap_pmd(FIXADDR_TOP);
pmd_populate_kernel(&init_mm, pmd, bm_pte);
pte_offset_fixmap = pte_offset_early_fixmap;
}
/* /*
* To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range(). * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
* As a result, this can only be called with preemption disabled, as under * As a result, this can only be called with preemption disabled, as under
...@@ -365,7 +406,7 @@ EXPORT_SYMBOL(get_mem_type); ...@@ -365,7 +406,7 @@ EXPORT_SYMBOL(get_mem_type);
void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot) void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
{ {
unsigned long vaddr = __fix_to_virt(idx); unsigned long vaddr = __fix_to_virt(idx);
pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr); pte_t *pte = pte_offset_fixmap(pmd_off_k(vaddr), vaddr);
/* Make sure fixmap region does not exceed available allocation. */ /* Make sure fixmap region does not exceed available allocation. */
BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) > BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
...@@ -855,7 +896,7 @@ static void __init create_mapping(struct map_desc *md) ...@@ -855,7 +896,7 @@ static void __init create_mapping(struct map_desc *md)
} }
if ((md->type == MT_DEVICE || md->type == MT_ROM) && if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
md->virtual >= PAGE_OFFSET && md->virtual >= PAGE_OFFSET && md->virtual < FIXADDR_START &&
(md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n", pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
(long long)__pfn_to_phys((u64)md->pfn), md->virtual); (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
...@@ -1213,10 +1254,10 @@ void __init arm_mm_memblock_reserve(void) ...@@ -1213,10 +1254,10 @@ void __init arm_mm_memblock_reserve(void)
/* /*
* Set up the device mappings. Since we clear out the page tables for all * Set up the device mappings. Since we clear out the page tables for all
* mappings above VMALLOC_START, we will remove any debug device mappings. * mappings above VMALLOC_START, except early fixmap, we might remove debug
* This means you have to be careful how you debug this function, or any * device mappings. This means earlycon can be used to debug this function
* called function. This means you can't use any function or debugging * Any other function or debugging method which may touch any device _will_
* method which may touch any device, otherwise the kernel _will_ crash. * crash the kernel.
*/ */
static void __init devicemaps_init(const struct machine_desc *mdesc) static void __init devicemaps_init(const struct machine_desc *mdesc)
{ {
...@@ -1231,7 +1272,10 @@ static void __init devicemaps_init(const struct machine_desc *mdesc) ...@@ -1231,7 +1272,10 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
early_trap_init(vectors); early_trap_init(vectors);
for (addr = VMALLOC_START; addr; addr += PMD_SIZE) /*
* Clear page table except top pmd used by early fixmaps
*/
for (addr = VMALLOC_START; addr < (FIXADDR_TOP & PMD_MASK); addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr)); pmd_clear(pmd_off_k(addr));
/* /*
...@@ -1483,6 +1527,35 @@ void __init early_paging_init(const struct machine_desc *mdesc) ...@@ -1483,6 +1527,35 @@ void __init early_paging_init(const struct machine_desc *mdesc)
#endif #endif
static void __init early_fixmap_shutdown(void)
{
int i;
unsigned long va = fix_to_virt(__end_of_permanent_fixed_addresses - 1);
pte_offset_fixmap = pte_offset_late_fixmap;
pmd_clear(fixmap_pmd(va));
local_flush_tlb_kernel_page(va);
for (i = 0; i < __end_of_permanent_fixed_addresses; i++) {
pte_t *pte;
struct map_desc map;
map.virtual = fix_to_virt(i);
pte = pte_offset_early_fixmap(pmd_off_k(map.virtual), map.virtual);
/* Only i/o device mappings are supported ATM */
if (pte_none(*pte) ||
(pte_val(*pte) & L_PTE_MT_MASK) != L_PTE_MT_DEV_SHARED)
continue;
map.pfn = pte_pfn(*pte);
map.type = MT_DEVICE;
map.length = PAGE_SIZE;
create_mapping(&map);
}
}
/* /*
* paging_init() sets up the page tables, initialises the zone memory * paging_init() sets up the page tables, initialises the zone memory
* maps, and sets up the zero page, bad page and bad page tables. * maps, and sets up the zero page, bad page and bad page tables.
...@@ -1495,6 +1568,7 @@ void __init paging_init(const struct machine_desc *mdesc) ...@@ -1495,6 +1568,7 @@ void __init paging_init(const struct machine_desc *mdesc)
prepare_page_table(); prepare_page_table();
map_lowmem(); map_lowmem();
dma_contiguous_remap(); dma_contiguous_remap();
early_fixmap_shutdown();
devicemaps_init(mdesc); devicemaps_init(mdesc);
kmap_init(); kmap_init();
tcm_init(); tcm_init();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment