Commit cf6d429e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'loongarch-fixes-6.11-1' of...

Merge tag 'loongarch-fixes-6.11-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson

Pull LoongArch fixes from Huacai Chen:
 "Enable general EFI poweroff method to make poweroff usable on
  hardwares which lack ACPI S5, use accessors to page table entries
  instead of direct dereference to avoid potential problems, and two
  trivial kvm cleanups"

* tag 'loongarch-fixes-6.11-1' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson:
  LoongArch: KVM: Remove undefined a6 argument comment for kvm_hypercall()
  LoongArch: KVM: Remove unnecessary definition of KVM_PRIVATE_MEM_SLOTS
  LoongArch: Use accessors to page table entries instead of direct dereference
  LoongArch: Enable general EFI poweroff method
parents 660e4b18 494b0792
...@@ -34,7 +34,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, ...@@ -34,7 +34,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep) unsigned long addr, pte_t *ptep)
{ {
pte_t clear; pte_t clear;
pte_t pte = *ptep; pte_t pte = ptep_get(ptep);
pte_val(clear) = (unsigned long)invalid_pte_table; pte_val(clear) = (unsigned long)invalid_pte_table;
set_pte_at(mm, addr, ptep, clear); set_pte_at(mm, addr, ptep, clear);
...@@ -65,7 +65,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, ...@@ -65,7 +65,7 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
pte_t *ptep, pte_t pte, pte_t *ptep, pte_t pte,
int dirty) int dirty)
{ {
int changed = !pte_same(*ptep, pte); int changed = !pte_same(ptep_get(ptep), pte);
if (changed) { if (changed) {
set_pte_at(vma->vm_mm, addr, ptep, pte); set_pte_at(vma->vm_mm, addr, ptep, pte);
......
...@@ -53,13 +53,13 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) ...@@ -53,13 +53,13 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
{ {
pte_t *pte = virt_to_kpte(addr); pte_t *pte = virt_to_kpte(addr);
if (WARN_ON(!pte) || pte_none(*pte)) if (WARN_ON(!pte) || pte_none(ptep_get(pte)))
return false; return false;
if (protect) if (protect)
set_pte(pte, __pte(pte_val(*pte) & ~(_PAGE_VALID | _PAGE_PRESENT))); set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~(_PAGE_VALID | _PAGE_PRESENT)));
else else
set_pte(pte, __pte(pte_val(*pte) | (_PAGE_VALID | _PAGE_PRESENT))); set_pte(pte, __pte(pte_val(ptep_get(pte)) | (_PAGE_VALID | _PAGE_PRESENT)));
preempt_disable(); preempt_disable();
local_flush_tlb_one(addr); local_flush_tlb_one(addr);
......
...@@ -26,8 +26,6 @@ ...@@ -26,8 +26,6 @@
#define KVM_MAX_VCPUS 256 #define KVM_MAX_VCPUS 256
#define KVM_MAX_CPUCFG_REGS 21 #define KVM_MAX_CPUCFG_REGS 21
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 0
#define KVM_HALT_POLL_NS_DEFAULT 500000 #define KVM_HALT_POLL_NS_DEFAULT 500000
#define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0) #define KVM_REQ_TLB_FLUSH_GPA KVM_ARCH_REQ(0)
......
...@@ -39,9 +39,9 @@ struct kvm_steal_time { ...@@ -39,9 +39,9 @@ struct kvm_steal_time {
* Hypercall interface for KVM hypervisor * Hypercall interface for KVM hypervisor
* *
* a0: function identifier * a0: function identifier
* a1-a6: args * a1-a5: args
* Return value will be placed in a0. * Return value will be placed in a0.
* Up to 6 arguments are passed in a1, a2, a3, a4, a5, a6. * Up to 5 arguments are passed in a1, a2, a3, a4, a5.
*/ */
static __always_inline long kvm_hypercall0(u64 fid) static __always_inline long kvm_hypercall0(u64 fid)
{ {
......
...@@ -106,6 +106,9 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; ...@@ -106,6 +106,9 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#define KFENCE_AREA_START (VMEMMAP_END + 1) #define KFENCE_AREA_START (VMEMMAP_END + 1)
#define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1) #define KFENCE_AREA_END (KFENCE_AREA_START + KFENCE_AREA_SIZE - 1)
#define ptep_get(ptep) READ_ONCE(*(ptep))
#define pmdp_get(pmdp) READ_ONCE(*(pmdp))
#define pte_ERROR(e) \ #define pte_ERROR(e) \
pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
#ifndef __PAGETABLE_PMD_FOLDED #ifndef __PAGETABLE_PMD_FOLDED
...@@ -147,11 +150,6 @@ static inline int p4d_present(p4d_t p4d) ...@@ -147,11 +150,6 @@ static inline int p4d_present(p4d_t p4d)
return p4d_val(p4d) != (unsigned long)invalid_pud_table; return p4d_val(p4d) != (unsigned long)invalid_pud_table;
} }
static inline void p4d_clear(p4d_t *p4dp)
{
p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
}
static inline pud_t *p4d_pgtable(p4d_t p4d) static inline pud_t *p4d_pgtable(p4d_t p4d)
{ {
return (pud_t *)p4d_val(p4d); return (pud_t *)p4d_val(p4d);
...@@ -159,7 +157,12 @@ static inline pud_t *p4d_pgtable(p4d_t p4d) ...@@ -159,7 +157,12 @@ static inline pud_t *p4d_pgtable(p4d_t p4d)
static inline void set_p4d(p4d_t *p4d, p4d_t p4dval) static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
{ {
*p4d = p4dval; WRITE_ONCE(*p4d, p4dval);
}
static inline void p4d_clear(p4d_t *p4dp)
{
set_p4d(p4dp, __p4d((unsigned long)invalid_pud_table));
} }
#define p4d_phys(p4d) PHYSADDR(p4d_val(p4d)) #define p4d_phys(p4d) PHYSADDR(p4d_val(p4d))
...@@ -193,17 +196,20 @@ static inline int pud_present(pud_t pud) ...@@ -193,17 +196,20 @@ static inline int pud_present(pud_t pud)
return pud_val(pud) != (unsigned long)invalid_pmd_table; return pud_val(pud) != (unsigned long)invalid_pmd_table;
} }
static inline void pud_clear(pud_t *pudp) static inline pmd_t *pud_pgtable(pud_t pud)
{ {
pud_val(*pudp) = ((unsigned long)invalid_pmd_table); return (pmd_t *)pud_val(pud);
} }
static inline pmd_t *pud_pgtable(pud_t pud) static inline void set_pud(pud_t *pud, pud_t pudval)
{ {
return (pmd_t *)pud_val(pud); WRITE_ONCE(*pud, pudval);
} }
#define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while (0) static inline void pud_clear(pud_t *pudp)
{
set_pud(pudp, __pud((unsigned long)invalid_pmd_table));
}
#define pud_phys(pud) PHYSADDR(pud_val(pud)) #define pud_phys(pud) PHYSADDR(pud_val(pud))
#define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT)) #define pud_page(pud) (pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
...@@ -231,12 +237,15 @@ static inline int pmd_present(pmd_t pmd) ...@@ -231,12 +237,15 @@ static inline int pmd_present(pmd_t pmd)
return pmd_val(pmd) != (unsigned long)invalid_pte_table; return pmd_val(pmd) != (unsigned long)invalid_pte_table;
} }
static inline void pmd_clear(pmd_t *pmdp) static inline void set_pmd(pmd_t *pmd, pmd_t pmdval)
{ {
pmd_val(*pmdp) = ((unsigned long)invalid_pte_table); WRITE_ONCE(*pmd, pmdval);
} }
#define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while (0) static inline void pmd_clear(pmd_t *pmdp)
{
set_pmd(pmdp, __pmd((unsigned long)invalid_pte_table));
}
#define pmd_phys(pmd) PHYSADDR(pmd_val(pmd)) #define pmd_phys(pmd) PHYSADDR(pmd_val(pmd))
...@@ -314,7 +323,8 @@ extern void paging_init(void); ...@@ -314,7 +323,8 @@ extern void paging_init(void);
static inline void set_pte(pte_t *ptep, pte_t pteval) static inline void set_pte(pte_t *ptep, pte_t pteval)
{ {
*ptep = pteval; WRITE_ONCE(*ptep, pteval);
if (pte_val(pteval) & _PAGE_GLOBAL) { if (pte_val(pteval) & _PAGE_GLOBAL) {
pte_t *buddy = ptep_buddy(ptep); pte_t *buddy = ptep_buddy(ptep);
/* /*
...@@ -341,8 +351,8 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) ...@@ -341,8 +351,8 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp) : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
: [global] "r" (page_global)); : [global] "r" (page_global));
#else /* !CONFIG_SMP */ #else /* !CONFIG_SMP */
if (pte_none(*buddy)) if (pte_none(ptep_get(buddy)))
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; WRITE_ONCE(*buddy, __pte(pte_val(ptep_get(buddy)) | _PAGE_GLOBAL));
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
} }
} }
...@@ -350,7 +360,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) ...@@ -350,7 +360,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{ {
/* Preserve global status for the pair */ /* Preserve global status for the pair */
if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) if (pte_val(ptep_get(ptep_buddy(ptep))) & _PAGE_GLOBAL)
set_pte(ptep, __pte(_PAGE_GLOBAL)); set_pte(ptep, __pte(_PAGE_GLOBAL));
else else
set_pte(ptep, __pte(0)); set_pte(ptep, __pte(0));
...@@ -603,7 +613,7 @@ static inline pmd_t pmd_mkinvalid(pmd_t pmd) ...@@ -603,7 +613,7 @@ static inline pmd_t pmd_mkinvalid(pmd_t pmd)
static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp) unsigned long address, pmd_t *pmdp)
{ {
pmd_t old = *pmdp; pmd_t old = pmdp_get(pmdp);
pmd_clear(pmdp); pmd_clear(pmdp);
......
...@@ -66,6 +66,12 @@ void __init efi_runtime_init(void) ...@@ -66,6 +66,12 @@ void __init efi_runtime_init(void)
set_bit(EFI_RUNTIME_SERVICES, &efi.flags); set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
} }
bool efi_poweroff_required(void)
{
return efi_enabled(EFI_RUNTIME_SERVICES) &&
(acpi_gbl_reduced_hardware || acpi_no_s5);
}
unsigned long __initdata screen_info_table = EFI_INVALID_TABLE_ADDR; unsigned long __initdata screen_info_table = EFI_INVALID_TABLE_ADDR;
#if defined(CONFIG_SYSFB) || defined(CONFIG_EFI_EARLYCON) #if defined(CONFIG_SYSFB) || defined(CONFIG_EFI_EARLYCON)
......
...@@ -714,19 +714,19 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, ...@@ -714,19 +714,19 @@ static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn,
* value) and then p*d_offset() walks into the target huge page instead * value) and then p*d_offset() walks into the target huge page instead
* of the old page table (sees the new value). * of the old page table (sees the new value).
*/ */
pgd = READ_ONCE(*pgd_offset(kvm->mm, hva)); pgd = pgdp_get(pgd_offset(kvm->mm, hva));
if (pgd_none(pgd)) if (pgd_none(pgd))
goto out; goto out;
p4d = READ_ONCE(*p4d_offset(&pgd, hva)); p4d = p4dp_get(p4d_offset(&pgd, hva));
if (p4d_none(p4d) || !p4d_present(p4d)) if (p4d_none(p4d) || !p4d_present(p4d))
goto out; goto out;
pud = READ_ONCE(*pud_offset(&p4d, hva)); pud = pudp_get(pud_offset(&p4d, hva));
if (pud_none(pud) || !pud_present(pud)) if (pud_none(pud) || !pud_present(pud))
goto out; goto out;
pmd = READ_ONCE(*pmd_offset(&pud, hva)); pmd = pmdp_get(pmd_offset(&pud, hva));
if (pmd_none(pmd) || !pmd_present(pmd)) if (pmd_none(pmd) || !pmd_present(pmd))
goto out; goto out;
......
...@@ -39,11 +39,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, ...@@ -39,11 +39,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
pmd_t *pmd = NULL; pmd_t *pmd = NULL;
pgd = pgd_offset(mm, addr); pgd = pgd_offset(mm, addr);
if (pgd_present(*pgd)) { if (pgd_present(pgdp_get(pgd))) {
p4d = p4d_offset(pgd, addr); p4d = p4d_offset(pgd, addr);
if (p4d_present(*p4d)) { if (p4d_present(p4dp_get(p4d))) {
pud = pud_offset(p4d, addr); pud = pud_offset(p4d, addr);
if (pud_present(*pud)) if (pud_present(pudp_get(pud)))
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
} }
} }
......
...@@ -141,7 +141,7 @@ void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, ...@@ -141,7 +141,7 @@ void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
int __meminit vmemmap_check_pmd(pmd_t *pmd, int node, int __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
unsigned long addr, unsigned long next) unsigned long addr, unsigned long next)
{ {
int huge = pmd_val(*pmd) & _PAGE_HUGE; int huge = pmd_val(pmdp_get(pmd)) & _PAGE_HUGE;
if (huge) if (huge)
vmemmap_verify((pte_t *)pmd, node, addr, next); vmemmap_verify((pte_t *)pmd, node, addr, next);
...@@ -173,7 +173,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr) ...@@ -173,7 +173,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
if (p4d_none(*p4d)) { if (p4d_none(p4dp_get(p4d))) {
pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE); pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!pud) if (!pud)
panic("%s: Failed to allocate memory\n", __func__); panic("%s: Failed to allocate memory\n", __func__);
...@@ -184,7 +184,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr) ...@@ -184,7 +184,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
} }
pud = pud_offset(p4d, addr); pud = pud_offset(p4d, addr);
if (pud_none(*pud)) { if (pud_none(pudp_get(pud))) {
pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE); pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!pmd) if (!pmd)
panic("%s: Failed to allocate memory\n", __func__); panic("%s: Failed to allocate memory\n", __func__);
...@@ -195,7 +195,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr) ...@@ -195,7 +195,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr)
} }
pmd = pmd_offset(pud, addr); pmd = pmd_offset(pud, addr);
if (!pmd_present(*pmd)) { if (!pmd_present(pmdp_get(pmd))) {
pte_t *pte; pte_t *pte;
pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
...@@ -216,7 +216,7 @@ void __init __set_fixmap(enum fixed_addresses idx, ...@@ -216,7 +216,7 @@ void __init __set_fixmap(enum fixed_addresses idx,
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
ptep = populate_kernel_pte(addr); ptep = populate_kernel_pte(addr);
if (!pte_none(*ptep)) { if (!pte_none(ptep_get(ptep))) {
pte_ERROR(*ptep); pte_ERROR(*ptep);
return; return;
} }
......
...@@ -105,7 +105,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node) ...@@ -105,7 +105,7 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early) static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early)
{ {
if (__pmd_none(early, READ_ONCE(*pmdp))) { if (__pmd_none(early, pmdp_get(pmdp))) {
phys_addr_t pte_phys = early ? phys_addr_t pte_phys = early ?
__pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node); __pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node);
if (!early) if (!early)
...@@ -118,7 +118,7 @@ static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, ...@@ -118,7 +118,7 @@ static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early) static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early)
{ {
if (__pud_none(early, READ_ONCE(*pudp))) { if (__pud_none(early, pudp_get(pudp))) {
phys_addr_t pmd_phys = early ? phys_addr_t pmd_phys = early ?
__pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node); __pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node);
if (!early) if (!early)
...@@ -131,7 +131,7 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, ...@@ -131,7 +131,7 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early) static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early)
{ {
if (__p4d_none(early, READ_ONCE(*p4dp))) { if (__p4d_none(early, p4dp_get(p4dp))) {
phys_addr_t pud_phys = early ? phys_addr_t pud_phys = early ?
__pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node); __pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node);
if (!early) if (!early)
...@@ -154,7 +154,7 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, ...@@ -154,7 +154,7 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
: kasan_alloc_zeroed_page(node); : kasan_alloc_zeroed_page(node);
next = addr + PAGE_SIZE; next = addr + PAGE_SIZE;
set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
} while (ptep++, addr = next, addr != end && __pte_none(early, READ_ONCE(*ptep))); } while (ptep++, addr = next, addr != end && __pte_none(early, ptep_get(ptep)));
} }
static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
...@@ -166,7 +166,7 @@ static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, ...@@ -166,7 +166,7 @@ static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
do { do {
next = pmd_addr_end(addr, end); next = pmd_addr_end(addr, end);
kasan_pte_populate(pmdp, addr, next, node, early); kasan_pte_populate(pmdp, addr, next, node, early);
} while (pmdp++, addr = next, addr != end && __pmd_none(early, READ_ONCE(*pmdp))); } while (pmdp++, addr = next, addr != end && __pmd_none(early, pmdp_get(pmdp)));
} }
static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr, static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
......
...@@ -128,7 +128,7 @@ pmd_t mk_pmd(struct page *page, pgprot_t prot) ...@@ -128,7 +128,7 @@ pmd_t mk_pmd(struct page *page, pgprot_t prot)
void set_pmd_at(struct mm_struct *mm, unsigned long addr, void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd) pmd_t *pmdp, pmd_t pmd)
{ {
*pmdp = pmd; WRITE_ONCE(*pmdp, pmd);
flush_tlb_all(); flush_tlb_all();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment