Commit fe122b89 authored by Björn Töpel's avatar Björn Töpel Committed by Palmer Dabbelt

riscv: mm: Change attribute from __init to __meminit for page functions

Prepare for memory hotplugging support by changing from __init to
__meminit for the page table functions that are used by the upcoming
architecture specific callbacks.

Changing the __init attribute to __meminit, avoids that the functions
are removed after init. The __meminit attribute makes sure the
functions are kept in the kernel text post init, but only if memory
hotplugging is enabled for the build.
Reviewed-by: default avatarAlexandre Ghiti <alexghiti@rivosinc.com>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Reviewed-by: default avatarOscar Salvador <osalvador@suse.de>
Signed-off-by: default avatarBjörn Töpel <bjorn@rivosinc.com>
Link: https://lore.kernel.org/r/20240605114100.315918-4-bjorn@kernel.orgSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parent 66673099
......@@ -31,8 +31,8 @@ typedef struct {
#define cntx2asid(cntx) ((cntx) & SATP_ASID_MASK)
#define cntx2version(cntx) ((cntx) & ~SATP_ASID_MASK)
void __init create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot);
void __meminit create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa, phys_addr_t sz,
pgprot_t prot);
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_MMU_H */
......@@ -165,7 +165,7 @@ struct pt_alloc_ops {
#endif
};
extern struct pt_alloc_ops pt_ops __initdata;
extern struct pt_alloc_ops pt_ops __meminitdata;
#ifdef CONFIG_MMU
/* Number of PGD entries that a user-mode program can use */
......
......@@ -296,7 +296,7 @@ static void __init setup_bootmem(void)
}
#ifdef CONFIG_MMU
struct pt_alloc_ops pt_ops __initdata;
struct pt_alloc_ops pt_ops __meminitdata;
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
......@@ -358,7 +358,7 @@ static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
}
static inline pte_t *__init get_pte_virt_late(phys_addr_t pa)
static inline pte_t *__meminit get_pte_virt_late(phys_addr_t pa)
{
return (pte_t *) __va(pa);
}
......@@ -377,7 +377,7 @@ static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}
static phys_addr_t __init alloc_pte_late(uintptr_t va)
static phys_addr_t __meminit alloc_pte_late(uintptr_t va)
{
struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0);
......@@ -385,9 +385,8 @@ static phys_addr_t __init alloc_pte_late(uintptr_t va)
return __pa((pte_t *)ptdesc_address(ptdesc));
}
static void __init create_pte_mapping(pte_t *ptep,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
static void __meminit create_pte_mapping(pte_t *ptep, uintptr_t va, phys_addr_t pa, phys_addr_t sz,
pgprot_t prot)
{
uintptr_t pte_idx = pte_index(va);
......@@ -441,7 +440,7 @@ static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
}
static pmd_t *__init get_pmd_virt_late(phys_addr_t pa)
static pmd_t *__meminit get_pmd_virt_late(phys_addr_t pa)
{
return (pmd_t *) __va(pa);
}
......@@ -458,7 +457,7 @@ static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}
static phys_addr_t __init alloc_pmd_late(uintptr_t va)
static phys_addr_t __meminit alloc_pmd_late(uintptr_t va)
{
struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0);
......@@ -466,9 +465,9 @@ static phys_addr_t __init alloc_pmd_late(uintptr_t va)
return __pa((pmd_t *)ptdesc_address(ptdesc));
}
static void __init create_pmd_mapping(pmd_t *pmdp,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
static void __meminit create_pmd_mapping(pmd_t *pmdp,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
{
pte_t *ptep;
phys_addr_t pte_phys;
......@@ -504,7 +503,7 @@ static pud_t *__init get_pud_virt_fixmap(phys_addr_t pa)
return (pud_t *)set_fixmap_offset(FIX_PUD, pa);
}
static pud_t *__init get_pud_virt_late(phys_addr_t pa)
static pud_t *__meminit get_pud_virt_late(phys_addr_t pa)
{
return (pud_t *)__va(pa);
}
......@@ -522,7 +521,7 @@ static phys_addr_t __init alloc_pud_fixmap(uintptr_t va)
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}
static phys_addr_t alloc_pud_late(uintptr_t va)
static phys_addr_t __meminit alloc_pud_late(uintptr_t va)
{
unsigned long vaddr;
......@@ -542,7 +541,7 @@ static p4d_t *__init get_p4d_virt_fixmap(phys_addr_t pa)
return (p4d_t *)set_fixmap_offset(FIX_P4D, pa);
}
static p4d_t *__init get_p4d_virt_late(phys_addr_t pa)
static p4d_t *__meminit get_p4d_virt_late(phys_addr_t pa)
{
return (p4d_t *)__va(pa);
}
......@@ -560,7 +559,7 @@ static phys_addr_t __init alloc_p4d_fixmap(uintptr_t va)
return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
}
static phys_addr_t alloc_p4d_late(uintptr_t va)
static phys_addr_t __meminit alloc_p4d_late(uintptr_t va)
{
unsigned long vaddr;
......@@ -569,9 +568,8 @@ static phys_addr_t alloc_p4d_late(uintptr_t va)
return __pa(vaddr);
}
static void __init create_pud_mapping(pud_t *pudp,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
static void __meminit create_pud_mapping(pud_t *pudp, uintptr_t va, phys_addr_t pa, phys_addr_t sz,
pgprot_t prot)
{
pmd_t *nextp;
phys_addr_t next_phys;
......@@ -596,9 +594,8 @@ static void __init create_pud_mapping(pud_t *pudp,
create_pmd_mapping(nextp, va, pa, sz, prot);
}
static void __init create_p4d_mapping(p4d_t *p4dp,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
static void __meminit create_p4d_mapping(p4d_t *p4dp, uintptr_t va, phys_addr_t pa, phys_addr_t sz,
pgprot_t prot)
{
pud_t *nextp;
phys_addr_t next_phys;
......@@ -654,9 +651,8 @@ static void __init create_p4d_mapping(p4d_t *p4dp,
#define create_pmd_mapping(__pmdp, __va, __pa, __sz, __prot) do {} while(0)
#endif /* __PAGETABLE_PMD_FOLDED */
void __init create_pgd_mapping(pgd_t *pgdp,
uintptr_t va, phys_addr_t pa,
phys_addr_t sz, pgprot_t prot)
void __meminit create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa, phys_addr_t sz,
pgprot_t prot)
{
pgd_next_t *nextp;
phys_addr_t next_phys;
......@@ -681,8 +677,7 @@ void __init create_pgd_mapping(pgd_t *pgdp,
create_pgd_next_mapping(nextp, va, pa, sz, prot);
}
static uintptr_t __init best_map_size(phys_addr_t pa, uintptr_t va,
phys_addr_t size)
static uintptr_t __meminit best_map_size(phys_addr_t pa, uintptr_t va, phys_addr_t size)
{
if (debug_pagealloc_enabled())
return PAGE_SIZE;
......@@ -718,7 +713,7 @@ asmlinkage void __init __copy_data(void)
#endif
#ifdef CONFIG_STRICT_KERNEL_RWX
static __init pgprot_t pgprot_from_va(uintptr_t va)
static __meminit pgprot_t pgprot_from_va(uintptr_t va)
{
if (is_va_kernel_text(va))
return PAGE_KERNEL_READ_EXEC;
......@@ -743,7 +738,7 @@ void mark_rodata_ro(void)
set_memory_ro);
}
#else
static __init pgprot_t pgprot_from_va(uintptr_t va)
static __meminit pgprot_t pgprot_from_va(uintptr_t va)
{
if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va))
return PAGE_KERNEL;
......@@ -1235,9 +1230,8 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
pt_ops_set_fixmap();
}
static void __init create_linear_mapping_range(phys_addr_t start,
phys_addr_t end,
uintptr_t fixed_map_size)
static void __meminit create_linear_mapping_range(phys_addr_t start, phys_addr_t end,
uintptr_t fixed_map_size)
{
phys_addr_t pa;
uintptr_t va, map_size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment