Commit 1bb31cc7 authored by Will Deacon's avatar Will Deacon

Merge branch 'for-next/mm' into for-next/core

* for-next/mm:
  arm64: mm: always map fixmap at page granularity
  arm64: mm: move fixmap code to its own file
  arm64: add FIXADDR_TOT_{START,SIZE}
  Revert "Revert "arm64: dma: Drop cache invalidation from arch_dma_prep_coherent()""
  arm: uaccess: Remove memcpy_page_flushcache()
  mm,kfence: decouple kfence from page granularity mapping judgement
parents 81444b77 414c109b
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/math.h>
#include <linux/sizes.h> #include <linux/sizes.h>
#include <asm/boot.h> #include <asm/boot.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -36,17 +37,13 @@ enum fixed_addresses { ...@@ -36,17 +37,13 @@ enum fixed_addresses {
FIX_HOLE, FIX_HOLE,
/* /*
* Reserve a virtual window for the FDT that is 2 MB larger than the * Reserve a virtual window for the FDT that is a page bigger than the
* maximum supported size, and put it at the top of the fixmap region. * maximum supported size. The additional space ensures that any FDT
* The additional space ensures that any FDT that does not exceed * that does not exceed MAX_FDT_SIZE can be mapped regardless of
* MAX_FDT_SIZE can be mapped regardless of whether it crosses any * whether it crosses any page boundary.
* 2 MB alignment boundaries.
*
* Keep this at the top so it remains 2 MB aligned.
*/ */
#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M)
FIX_FDT_END, FIX_FDT_END,
FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1, FIX_FDT = FIX_FDT_END + DIV_ROUND_UP(MAX_FDT_SIZE, PAGE_SIZE) + 1,
FIX_EARLYCON_MEM_BASE, FIX_EARLYCON_MEM_BASE,
FIX_TEXT_POKE0, FIX_TEXT_POKE0,
...@@ -95,12 +92,15 @@ enum fixed_addresses { ...@@ -95,12 +92,15 @@ enum fixed_addresses {
__end_of_fixed_addresses __end_of_fixed_addresses
}; };
#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT) #define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
#define FIXADDR_TOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_TOT_START (FIXADDR_TOP - FIXADDR_TOT_SIZE)
#define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE) #define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE)
void __init early_fixmap_init(void); void __init early_fixmap_init(void);
void __init fixmap_copy(pgd_t *pgdir);
#define __early_set_fixmap __set_fixmap #define __early_set_fixmap __set_fixmap
......
...@@ -59,8 +59,11 @@ ...@@ -59,8 +59,11 @@
#define EARLY_KASLR (0) #define EARLY_KASLR (0)
#endif #endif
#define SPAN_NR_ENTRIES(vstart, vend, shift) \
((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1)
#define EARLY_ENTRIES(vstart, vend, shift, add) \ #define EARLY_ENTRIES(vstart, vend, shift, add) \
((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + add) (SPAN_NR_ENTRIES(vstart, vend, shift) + (add))
#define EARLY_PGDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT, add)) #define EARLY_PGDS(vstart, vend, add) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT, add))
......
...@@ -19,4 +19,14 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) ...@@ -19,4 +19,14 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
return true; return true;
} }
#ifdef CONFIG_KFENCE
extern bool kfence_early_init;
static inline bool arm64_kfence_can_set_direct_map(void)
{
return !kfence_early_init;
}
#else /* CONFIG_KFENCE */
static inline bool arm64_kfence_can_set_direct_map(void) { return false; }
#endif /* CONFIG_KFENCE */
#endif /* __ASM_KFENCE_H */ #endif /* __ASM_KFENCE_H */
...@@ -65,6 +65,8 @@ extern void paging_init(void); ...@@ -65,6 +65,8 @@ extern void paging_init(void);
extern void bootmem_init(void); extern void bootmem_init(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
extern void init_mem_pgprot(void); extern void init_mem_pgprot(void);
extern void create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
phys_addr_t size, pgprot_t prot);
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
unsigned long virt, phys_addr_t size, unsigned long virt, phys_addr_t size,
pgprot_t prot, bool page_mappings_only); pgprot_t prot, bool page_mappings_only);
......
...@@ -449,8 +449,6 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count); ...@@ -449,8 +449,6 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count);
extern __must_check long strnlen_user(const char __user *str, long n); extern __must_check long strnlen_user(const char __user *str, long n);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
struct page;
void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n); extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
......
...@@ -19,12 +19,6 @@ void memcpy_flushcache(void *dst, const void *src, size_t cnt) ...@@ -19,12 +19,6 @@ void memcpy_flushcache(void *dst, const void *src, size_t cnt)
} }
EXPORT_SYMBOL_GPL(memcpy_flushcache); EXPORT_SYMBOL_GPL(memcpy_flushcache);
void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
size_t len)
{
memcpy_flushcache(to, page_address(page) + offset, len);
}
unsigned long __copy_user_flushcache(void *to, const void __user *from, unsigned long __copy_user_flushcache(void *to, const void __user *from,
unsigned long n) unsigned long n)
{ {
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
obj-y := dma-mapping.o extable.o fault.o init.o \ obj-y := dma-mapping.o extable.o fault.o init.o \
cache.o copypage.o flush.o \ cache.o copypage.o flush.o \
ioremap.o mmap.o pgd.o mmu.o \ ioremap.o mmap.o pgd.o mmu.o \
context.o proc.o pageattr.o context.o proc.o pageattr.o fixmap.o
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_PTDUMP_CORE) += ptdump.o obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o
......
...@@ -36,22 +36,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) ...@@ -36,22 +36,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
{ {
unsigned long start = (unsigned long)page_address(page); unsigned long start = (unsigned long)page_address(page);
/* dcache_clean_poc(start, start + size);
* The architecture only requires a clean to the PoC here in order to
* meet the requirements of the DMA API. However, some vendors (i.e.
* Qualcomm) abuse the DMA API for transferring buffers from the
* non-secure to the secure world, resetting the system if a non-secure
* access shows up after the buffer has been transferred:
*
* https://lore.kernel.org/r/20221114110329.68413-1-manivannan.sadhasivam@linaro.org
*
* Using clean+invalidate appears to make this issue less likely, but
* the drivers themselves still need fixing as the CPU could issue a
* speculative read from the buffer via the linear mapping irrespective
* of the cache maintenance we use. Once the drivers are fixed, we can
* relax this to a clean operation.
*/
dcache_clean_inval_poc(start, start + size);
} }
#ifdef CONFIG_IOMMU_DMA #ifdef CONFIG_IOMMU_DMA
......
// SPDX-License-Identifier: GPL-2.0-only
/*
* Fixmap manipulation code
*/
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/libfdt.h>
#include <linux/memory.h>
#include <linux/mm.h>
#include <linux/sizes.h>
#include <asm/fixmap.h>
#include <asm/kernel-pgtable.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#define NR_BM_PTE_TABLES \
SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PMD_SHIFT)
#define NR_BM_PMD_TABLES \
SPAN_NR_ENTRIES(FIXADDR_TOT_START, FIXADDR_TOP, PUD_SHIFT)
static_assert(NR_BM_PMD_TABLES == 1);
#define __BM_TABLE_IDX(addr, shift) \
(((addr) >> (shift)) - (FIXADDR_TOT_START >> (shift)))
#define BM_PTE_TABLE_IDX(addr) __BM_TABLE_IDX(addr, PMD_SHIFT)
static pte_t bm_pte[NR_BM_PTE_TABLES][PTRS_PER_PTE] __page_aligned_bss;
static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused;
static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused;
static inline pte_t *fixmap_pte(unsigned long addr)
{
return &bm_pte[BM_PTE_TABLE_IDX(addr)][pte_index(addr)];
}
static void __init early_fixmap_init_pte(pmd_t *pmdp, unsigned long addr)
{
pmd_t pmd = READ_ONCE(*pmdp);
pte_t *ptep;
if (pmd_none(pmd)) {
ptep = bm_pte[BM_PTE_TABLE_IDX(addr)];
__pmd_populate(pmdp, __pa_symbol(ptep), PMD_TYPE_TABLE);
}
}
static void __init early_fixmap_init_pmd(pud_t *pudp, unsigned long addr,
unsigned long end)
{
unsigned long next;
pud_t pud = READ_ONCE(*pudp);
pmd_t *pmdp;
if (pud_none(pud))
__pud_populate(pudp, __pa_symbol(bm_pmd), PUD_TYPE_TABLE);
pmdp = pmd_offset_kimg(pudp, addr);
do {
next = pmd_addr_end(addr, end);
early_fixmap_init_pte(pmdp, addr);
} while (pmdp++, addr = next, addr != end);
}
static void __init early_fixmap_init_pud(p4d_t *p4dp, unsigned long addr,
unsigned long end)
{
p4d_t p4d = READ_ONCE(*p4dp);
pud_t *pudp;
if (CONFIG_PGTABLE_LEVELS > 3 && !p4d_none(p4d) &&
p4d_page_paddr(p4d) != __pa_symbol(bm_pud)) {
/*
* We only end up here if the kernel mapping and the fixmap
* share the top level pgd entry, which should only happen on
* 16k/4 levels configurations.
*/
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
}
if (p4d_none(p4d))
__p4d_populate(p4dp, __pa_symbol(bm_pud), P4D_TYPE_TABLE);
pudp = pud_offset_kimg(p4dp, addr);
early_fixmap_init_pmd(pudp, addr, end);
}
/*
* The p*d_populate functions call virt_to_phys implicitly so they can't be used
* directly on kernel symbols (bm_p*d). This function is called too early to use
* lm_alias so __p*d_populate functions must be used to populate with the
* physical address from __pa_symbol.
*/
void __init early_fixmap_init(void)
{
unsigned long addr = FIXADDR_TOT_START;
unsigned long end = FIXADDR_TOP;
pgd_t *pgdp = pgd_offset_k(addr);
p4d_t *p4dp = p4d_offset(pgdp, addr);
early_fixmap_init_pud(p4dp, addr, end);
}
/*
* Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we
* ever need to use IPIs for TLB broadcasting, then we're in trouble here.
*/
void __set_fixmap(enum fixed_addresses idx,
phys_addr_t phys, pgprot_t flags)
{
unsigned long addr = __fix_to_virt(idx);
pte_t *ptep;
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
ptep = fixmap_pte(addr);
if (pgprot_val(flags)) {
set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
} else {
pte_clear(&init_mm, addr, ptep);
flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
}
}
void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot)
{
const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
phys_addr_t dt_phys_base;
int offset;
void *dt_virt;
/*
* Check whether the physical FDT address is set and meets the minimum
* alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
* at least 8 bytes so that we can always access the magic and size
* fields of the FDT header after mapping the first chunk, double check
* here if that is indeed the case.
*/
BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
return NULL;
dt_phys_base = round_down(dt_phys, PAGE_SIZE);
offset = dt_phys % PAGE_SIZE;
dt_virt = (void *)dt_virt_base + offset;
/* map the first chunk so we can read the size from the header */
create_mapping_noalloc(dt_phys_base, dt_virt_base, PAGE_SIZE, prot);
if (fdt_magic(dt_virt) != FDT_MAGIC)
return NULL;
*size = fdt_totalsize(dt_virt);
if (*size > MAX_FDT_SIZE)
return NULL;
if (offset + *size > PAGE_SIZE) {
create_mapping_noalloc(dt_phys_base, dt_virt_base,
offset + *size, prot);
}
return dt_virt;
}
/*
* Copy the fixmap region into a new pgdir.
*/
void __init fixmap_copy(pgd_t *pgdir)
{
if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdir, FIXADDR_TOT_START)))) {
/*
* The fixmap falls in a separate pgd to the kernel, and doesn't
* live in the carveout for the swapper_pg_dir. We can simply
* re-use the existing dir for the fixmap.
*/
set_pgd(pgd_offset_pgd(pgdir, FIXADDR_TOT_START),
READ_ONCE(*pgd_offset_k(FIXADDR_TOT_START)));
} else if (CONFIG_PGTABLE_LEVELS > 3) {
pgd_t *bm_pgdp;
p4d_t *bm_p4dp;
pud_t *bm_pudp;
/*
* The fixmap shares its top level pgd entry with the kernel
* mapping. This can really only occur when we are running
* with 16k/4 levels, so we can simply reuse the pud level
* entry instead.
*/
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
bm_pgdp = pgd_offset_pgd(pgdir, FIXADDR_TOT_START);
bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_TOT_START);
bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_TOT_START);
pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd));
pud_clear_fixmap();
} else {
BUG();
}
}
This diff is collapsed.
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/set_memory.h> #include <asm/set_memory.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/kfence.h>
struct page_change_data { struct page_change_data {
pgprot_t set_mask; pgprot_t set_mask;
...@@ -22,12 +23,14 @@ bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED ...@@ -22,12 +23,14 @@ bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED
bool can_set_direct_map(void) bool can_set_direct_map(void)
{ {
/* /*
* rodata_full, DEBUG_PAGEALLOC and KFENCE require linear map to be * rodata_full and DEBUG_PAGEALLOC require linear map to be
* mapped at page granularity, so that it is possible to * mapped at page granularity, so that it is possible to
* protect/unprotect single pages. * protect/unprotect single pages.
*
* KFENCE pool requires page-granular mapping if initialized late.
*/ */
return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() || return (rodata_enabled && rodata_full) || debug_pagealloc_enabled() ||
IS_ENABLED(CONFIG_KFENCE); arm64_kfence_can_set_direct_map();
} }
static int change_page_range(pte_t *ptep, unsigned long addr, void *data) static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
......
...@@ -45,7 +45,7 @@ static struct addr_marker address_markers[] = { ...@@ -45,7 +45,7 @@ static struct addr_marker address_markers[] = {
{ MODULES_END, "Modules end" }, { MODULES_END, "Modules end" },
{ VMALLOC_START, "vmalloc() area" }, { VMALLOC_START, "vmalloc() area" },
{ VMALLOC_END, "vmalloc() end" }, { VMALLOC_END, "vmalloc() end" },
{ FIXADDR_START, "Fixmap start" }, { FIXADDR_TOT_START, "Fixmap start" },
{ FIXADDR_TOP, "Fixmap end" }, { FIXADDR_TOP, "Fixmap end" },
{ PCI_IO_START, "PCI I/O start" }, { PCI_IO_START, "PCI I/O start" },
{ PCI_IO_END, "PCI I/O end" }, { PCI_IO_END, "PCI I/O end" },
......
...@@ -814,6 +814,10 @@ void __init kfence_alloc_pool(void) ...@@ -814,6 +814,10 @@ void __init kfence_alloc_pool(void)
if (!kfence_sample_interval) if (!kfence_sample_interval)
return; return;
/* if the pool has already been initialized by arch, skip the below. */
if (__kfence_pool)
return;
__kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
if (!__kfence_pool) if (!__kfence_pool)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment