Commit f04de6d8 authored by Huacai Chen's avatar Huacai Chen

LoongArch: Add ARCH_HAS_SET_DIRECT_MAP support

Add set_direct_map_*() functions for setting the direct map alias for
the page to its default permissions and to an invalid state that cannot
be cached in a TLB. (See d253ca0c ("x86/mm/cpa: Add set_direct_map_*()
functions")) Add a similar implementation for LoongArch.

This fixes the KFENCE warnings during hibernation:

 ==================================================================
 BUG: KFENCE: invalid read in swsusp_save+0x368/0x4d8

 Invalid read at 0x00000000f7b89a3c:
  swsusp_save+0x368/0x4d8
  hibernation_snapshot+0x3f0/0x4e0
  hibernate+0x20c/0x440
  state_store+0x128/0x140
  kernfs_fop_write_iter+0x160/0x260
  vfs_write+0x2c0/0x520
  ksys_write+0x74/0x160
  do_syscall+0xb0/0x160

 CPU: 0 UID: 0 PID: 812 Comm: bash Tainted: G    B              6.11.0-rc1+ #1566
 Tainted: [B]=BAD_PAGE
 Hardware name: Loongson-LS3A5000-7A1000-1w-CRB, BIOS vUDK2018-LoongArch-V2.0.0 10/21/2022
 ==================================================================

Note: We can only set permissions for KVRANGE/XKVRANGE kernel addresses.
Signed-off-by: default avatarHuacai Chen <chenhuacai@loongson.cn>
parent e86935f7
......@@ -26,6 +26,7 @@ config LOONGARCH
select ARCH_HAS_PTE_DEVMAP
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_INLINE_READ_LOCK if !PREEMPTION
select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION
......
......@@ -14,4 +14,8 @@ int set_memory_nx(unsigned long addr, int numpages);
int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
bool kernel_page_present(struct page *page);
int set_direct_map_default_noflush(struct page *page);
int set_direct_map_invalid_noflush(struct page *page);
#endif /* _ASM_LOONGARCH_SET_MEMORY_H */
......@@ -156,3 +156,63 @@ int set_memory_rw(unsigned long addr, int numpages)
return __set_memory(addr, numpages, __pgprot(_PAGE_WRITE | _PAGE_DIRTY), __pgprot(0));
}
bool kernel_page_present(struct page *page)
{
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
unsigned long addr = (unsigned long)page_address(page);
if (addr < vm_map_base)
return true;
pgd = pgd_offset_k(addr);
if (pgd_none(pgdp_get(pgd)))
return false;
if (pgd_leaf(pgdp_get(pgd)))
return true;
p4d = p4d_offset(pgd, addr);
if (p4d_none(p4dp_get(p4d)))
return false;
if (p4d_leaf(p4dp_get(p4d)))
return true;
pud = pud_offset(p4d, addr);
if (pud_none(pudp_get(pud)))
return false;
if (pud_leaf(pudp_get(pud)))
return true;
pmd = pmd_offset(pud, addr);
if (pmd_none(pmdp_get(pmd)))
return false;
if (pmd_leaf(pmdp_get(pmd)))
return true;
pte = pte_offset_kernel(pmd, addr);
return pte_present(ptep_get(pte));
}
int set_direct_map_default_noflush(struct page *page)
{
unsigned long addr = (unsigned long)page_address(page);
if (addr < vm_map_base)
return 0;
return __set_memory(addr, 1, PAGE_KERNEL, __pgprot(0));
}
int set_direct_map_invalid_noflush(struct page *page)
{
unsigned long addr = (unsigned long)page_address(page);
if (addr < vm_map_base)
return 0;
return __set_memory(addr, 1, __pgprot(0), __pgprot(_PAGE_PRESENT | _PAGE_VALID));
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment