Commit cd16ed33 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'riscv-for-linus-5.8-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull more RISC-V updates from Palmer Dabbelt:

 - Kconfig select statements are now sorted alphanumerically

 - first-level interrupts are now handled via a full irqchip driver

 - CPU hotplug is fixed

 - vDSO calls now use the common vDSO infrastructure

* tag 'riscv-for-linus-5.8-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  riscv: set the permission of vdso_data to read-only
  riscv: use vDSO common flow to reduce the latency of the time-related functions
  riscv: fix build warning of missing prototypes
  RISC-V: Don't mark init section as non-executable
  RISC-V: Force select RISCV_INTC for CONFIG_RISCV
  RISC-V: Remove do_IRQ() function
  clocksource/drivers/timer-riscv: Use per-CPU timer interrupt
  irqchip: RISC-V per-HART local interrupt controller driver
  RISC-V: Rename and move plic_find_hart_id() to arch directory
  RISC-V: self-contained IPI handling routine
  RISC-V: Sort select statements alphanumerically
parents 55d728b2 01f76386
...@@ -12,64 +12,70 @@ config 32BIT ...@@ -12,64 +12,70 @@ config 32BIT
config RISCV config RISCV
def_bool y def_bool y
select OF select ARCH_CLOCKSOURCE_INIT
select OF_EARLY_FLATTREE
select OF_IRQ
select ARCH_HAS_BINFMT_FLAT select ARCH_HAS_BINFMT_FLAT
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEBUG_WX select ARCH_HAS_DEBUG_WX
select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_MMIOWB
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX if MMU
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select CLONE_BACKWARDS select CLONE_BACKWARDS
select COMMON_CLK select COMMON_CLK
select EDAC_SUPPORT
select GENERIC_ARCH_TOPOLOGY if SMP
select GENERIC_ATOMIC64 if !64BIT
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO
select GENERIC_IOREMAP
select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select GENERIC_PCI_IOMAP select GENERIC_PCI_IOMAP
select GENERIC_PTDUMP if MMU
select GENERIC_SCHED_CLOCK select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select GENERIC_STRNCPY_FROM_USER if MMU select GENERIC_STRNCPY_FROM_USER if MMU
select GENERIC_STRNLEN_USER if MMU select GENERIC_STRNLEN_USER if MMU
select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL if MMU && 64BIT
select GENERIC_ATOMIC64 if !64BIT select HANDLE_DOMAIN_IRQ
select GENERIC_IOREMAP
select GENERIC_PTDUMP if MMU
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_KASAN if MMU && 64BIT
select HAVE_ARCH_KGDB
select HAVE_ARCH_KGDB_QXFER_PKT
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_ASM_MODVERSIONS select HAVE_ASM_MODVERSIONS
select HAVE_COPY_THREAD_TLS
select HAVE_DMA_CONTIGUOUS if MMU select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_EBPF_JIT if MMU
select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GENERIC_VDSO if MMU && 64BIT
select HAVE_PCI
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_PERF_REGS select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP select HAVE_PERF_USER_STACK_DUMP
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN select IRQ_DOMAIN
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
select HAVE_ARCH_TRACEHOOK
select HAVE_PCI
select MODULES_USE_ELF_RELA if MODULES select MODULES_USE_ELF_RELA if MODULES
select MODULE_SECTIONS if MODULES select MODULE_SECTIONS if MODULES
select THREAD_INFO_IN_TASK select OF
select OF_EARLY_FLATTREE
select OF_IRQ
select PCI_DOMAINS_GENERIC if PCI select PCI_DOMAINS_GENERIC if PCI
select PCI_MSI if PCI select PCI_MSI if PCI
select RISCV_INTC
select RISCV_TIMER select RISCV_TIMER
select GENERIC_IRQ_MULTI_HANDLER
select GENERIC_ARCH_TOPOLOGY if SMP
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_MMIOWB
select ARCH_HAS_DEBUG_VIRTUAL if MMU
select HAVE_EBPF_JIT if MMU
select EDAC_SUPPORT
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_HAS_SET_DIRECT_MAP
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_STRICT_KERNEL_RWX if MMU
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select SPARSEMEM_STATIC if 32BIT select SPARSEMEM_STATIC if 32BIT
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select SPARSE_IRQ
select HAVE_ARCH_MMAP_RND_BITS if MMU select SYSCTL_EXCEPTION_TRACE
select ARCH_HAS_GCOV_PROFILE_ALL select THREAD_INFO_IN_TASK
select HAVE_COPY_THREAD_TLS
select HAVE_ARCH_KASAN if MMU && 64BIT
select HAVE_ARCH_KGDB
select HAVE_ARCH_KGDB_QXFER_PKT
config ARCH_MMAP_RND_BITS_MIN config ARCH_MMAP_RND_BITS_MIN
default 18 if 64BIT default 18 if 64BIT
...@@ -196,11 +202,11 @@ config ARCH_RV64I ...@@ -196,11 +202,11 @@ config ARCH_RV64I
bool "RV64I" bool "RV64I"
select 64BIT select 64BIT
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000 select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_DYNAMIC_FTRACE if MMU select HAVE_DYNAMIC_FTRACE if MMU
select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select SWIOTLB if MMU select SWIOTLB if MMU
endchoice endchoice
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_CLOCKSOURCE_H
#define _ASM_CLOCKSOURCE_H
#include <asm/vdso/clocksource.h>
#endif
...@@ -10,11 +10,6 @@ ...@@ -10,11 +10,6 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#define NR_IRQS 0
void riscv_timer_interrupt(void);
void riscv_software_interrupt(void);
#include <asm-generic/irq.h> #include <asm-generic/irq.h>
#endif /* _ASM_RISCV_IRQ_H */ #endif /* _ASM_RISCV_IRQ_H */
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
#include <linux/const.h> #include <linux/const.h>
#include <vdso/processor.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
/* /*
...@@ -58,16 +60,6 @@ static inline void release_thread(struct task_struct *dead_task) ...@@ -58,16 +60,6 @@ static inline void release_thread(struct task_struct *dead_task)
extern unsigned long get_wchan(struct task_struct *p); extern unsigned long get_wchan(struct task_struct *p);
static inline void cpu_relax(void)
{
#ifdef __riscv_muldiv
int dummy;
/* In lieu of a halt instruction, induce a long-latency stall. */
__asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
#endif
barrier();
}
static inline void wait_for_interrupt(void) static inline void wait_for_interrupt(void)
{ {
__asm__ __volatile__ ("wfi"); __asm__ __volatile__ ("wfi");
...@@ -75,6 +67,7 @@ static inline void wait_for_interrupt(void) ...@@ -75,6 +67,7 @@ static inline void wait_for_interrupt(void)
struct device_node; struct device_node;
int riscv_of_processor_hartid(struct device_node *node); int riscv_of_processor_hartid(struct device_node *node);
int riscv_of_parent_hartid(struct device_node *node);
extern void riscv_fill_hwcap(void); extern void riscv_fill_hwcap(void);
......
...@@ -28,6 +28,9 @@ void show_ipi_stats(struct seq_file *p, int prec); ...@@ -28,6 +28,9 @@ void show_ipi_stats(struct seq_file *p, int prec);
/* SMP initialization hook for setup_arch */ /* SMP initialization hook for setup_arch */
void __init setup_smp(void); void __init setup_smp(void);
/* Called from C code, this handles an IPI. */
void handle_IPI(struct pt_regs *regs);
/* Hook for the generic smp_call_function_many() routine. */ /* Hook for the generic smp_call_function_many() routine. */
void arch_send_call_function_ipi_mask(struct cpumask *mask); void arch_send_call_function_ipi_mask(struct cpumask *mask);
......
...@@ -10,8 +10,10 @@ ...@@ -10,8 +10,10 @@
#include <linux/types.h> #include <linux/types.h>
#ifndef GENERIC_TIME_VSYSCALL
struct vdso_data { struct vdso_data {
}; };
#endif
/* /*
* The VDSO symbols are mapped into Linux so we can just use regular symbol * The VDSO symbols are mapped into Linux so we can just use regular symbol
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_VDSOCLOCKSOURCE_H
#define __ASM_VDSOCLOCKSOURCE_H
#define VDSO_ARCH_CLOCKMODES \
VDSO_CLOCKMODE_ARCHTIMER
#endif
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_VDSO_GETTIMEOFDAY_H
#define __ASM_VDSO_GETTIMEOFDAY_H
#ifndef __ASSEMBLY__
#include <asm/unistd.h>
#include <asm/csr.h>
#include <uapi/linux/time.h>
#define VDSO_HAS_CLOCK_GETRES 1
static __always_inline
int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
struct timezone *_tz)
{
register struct __kernel_old_timeval *tv asm("a0") = _tv;
register struct timezone *tz asm("a1") = _tz;
register long ret asm("a0");
register long nr asm("a7") = __NR_gettimeofday;
asm volatile ("ecall\n"
: "=r" (ret)
: "r"(tv), "r"(tz), "r"(nr)
: "memory");
return ret;
}
static __always_inline
long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
{
register clockid_t clkid asm("a0") = _clkid;
register struct __kernel_timespec *ts asm("a1") = _ts;
register long ret asm("a0");
register long nr asm("a7") = __NR_clock_gettime;
asm volatile ("ecall\n"
: "=r" (ret)
: "r"(clkid), "r"(ts), "r"(nr)
: "memory");
return ret;
}
static __always_inline
int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
{
register clockid_t clkid asm("a0") = _clkid;
register struct __kernel_timespec *ts asm("a1") = _ts;
register long ret asm("a0");
register long nr asm("a7") = __NR_clock_getres;
asm volatile ("ecall\n"
: "=r" (ret)
: "r"(clkid), "r"(ts), "r"(nr)
: "memory");
return ret;
}
static __always_inline u64 __arch_get_hw_counter(s32 clock_mode)
{
/*
* The purpose of csr_read(CSR_TIME) is to trap the system into
* M-mode to obtain the value of CSR_TIME. Hence, unlike other
* architecture, no fence instructions surround the csr_read()
*/
return csr_read(CSR_TIME);
}
static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
{
return _vdso_data;
}
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_VDSO_PROCESSOR_H
#define __ASM_VDSO_PROCESSOR_H
#ifndef __ASSEMBLY__
static inline void cpu_relax(void)
{
#ifdef __riscv_muldiv
int dummy;
/* In lieu of a halt instruction, induce a long-latency stall. */
__asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
#endif
barrier();
}
#endif /* __ASSEMBLY__ */
#endif /* __ASM_VDSO_PROCESSOR_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_VDSO_VSYSCALL_H
#define __ASM_VDSO_VSYSCALL_H
#ifndef __ASSEMBLY__
#include <linux/timekeeper_internal.h>
#include <vdso/datapage.h>
extern struct vdso_data *vdso_data;
/*
* Update the vDSO data page to keep in sync with kernel timekeeping.
*/
static __always_inline struct vdso_data *__riscv_get_k_vdso_data(void)
{
return vdso_data;
}
#define __arch_get_k_vdso_data __riscv_get_k_vdso_data
/* The asm-generic header needs to be included after the definitions above */
#include <asm-generic/vdso/vsyscall.h>
#endif /* !__ASSEMBLY__ */
#endif /* __ASM_VDSO_VSYSCALL_H */
...@@ -44,6 +44,22 @@ int riscv_of_processor_hartid(struct device_node *node) ...@@ -44,6 +44,22 @@ int riscv_of_processor_hartid(struct device_node *node)
return hart; return hart;
} }
/*
* Find hart ID of the CPU DT node under which given DT node falls.
*
* To achieve this, we walk up the DT tree until we find an active
* RISC-V core (HART) node and extract the cpuid from it.
*/
int riscv_of_parent_hartid(struct device_node *node)
{
for (; node; node = node->parent) {
if (of_device_is_compatible(node, "riscv"))
return riscv_of_processor_hartid(node);
}
return -1;
}
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
static void print_isa(struct seq_file *f, const char *isa) static void print_isa(struct seq_file *f, const char *isa)
......
...@@ -106,7 +106,9 @@ _save_context: ...@@ -106,7 +106,9 @@ _save_context:
/* Handle interrupts */ /* Handle interrupts */
move a0, sp /* pt_regs */ move a0, sp /* pt_regs */
tail do_IRQ la a1, handle_arch_irq
REG_L a1, (a1)
jr a1
1: 1:
/* /*
* Exceptions run with interrupts enabled or disabled depending on the * Exceptions run with interrupts enabled or disabled depending on the
......
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/irqchip.h> #include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <asm/smp.h> #include <asm/smp.h>
...@@ -17,37 +16,9 @@ int arch_show_interrupts(struct seq_file *p, int prec) ...@@ -17,37 +16,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
return 0; return 0;
} }
asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
irq_enter();
switch (regs->cause & ~CAUSE_IRQ_FLAG) {
case RV_IRQ_TIMER:
riscv_timer_interrupt();
break;
#ifdef CONFIG_SMP
case RV_IRQ_SOFT:
/*
* We only use software interrupts to pass IPIs, so if a non-SMP
* system gets one, then we don't know what to do.
*/
riscv_software_interrupt();
break;
#endif
case RV_IRQ_EXT:
handle_arch_irq(regs);
break;
default:
pr_alert("unexpected interrupt cause 0x%lx", regs->cause);
BUG();
}
irq_exit();
set_irq_regs(old_regs);
}
void __init init_IRQ(void) void __init init_IRQ(void)
{ {
irqchip_init(); irqchip_init();
if (!handle_arch_irq)
panic("No interrupt controller found.");
} }
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#include <asm/kprobes.h> #include <asm/kprobes.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/patch.h>
struct patch_insn { struct patch_insn {
void *addr; void *addr;
......
...@@ -123,11 +123,14 @@ static inline void clear_ipi(void) ...@@ -123,11 +123,14 @@ static inline void clear_ipi(void)
clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id())); clint_clear_ipi(cpuid_to_hartid_map(smp_processor_id()));
} }
void riscv_software_interrupt(void) void handle_IPI(struct pt_regs *regs)
{ {
struct pt_regs *old_regs = set_irq_regs(regs);
unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits; unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
unsigned long *stats = ipi_data[smp_processor_id()].stats; unsigned long *stats = ipi_data[smp_processor_id()].stats;
irq_enter();
clear_ipi(); clear_ipi();
while (true) { while (true) {
...@@ -138,7 +141,7 @@ void riscv_software_interrupt(void) ...@@ -138,7 +141,7 @@ void riscv_software_interrupt(void)
ops = xchg(pending_ipis, 0); ops = xchg(pending_ipis, 0);
if (ops == 0) if (ops == 0)
return; goto done;
if (ops & (1 << IPI_RESCHEDULE)) { if (ops & (1 << IPI_RESCHEDULE)) {
stats[IPI_RESCHEDULE]++; stats[IPI_RESCHEDULE]++;
...@@ -160,6 +163,10 @@ void riscv_software_interrupt(void) ...@@ -160,6 +163,10 @@ void riscv_software_interrupt(void)
/* Order data access and bit testing. */ /* Order data access and bit testing. */
mb(); mb();
} }
done:
irq_exit();
set_irq_regs(old_regs);
} }
static const char * const ipi_names[] = { static const char * const ipi_names[] = {
......
...@@ -26,3 +26,12 @@ void __init time_init(void) ...@@ -26,3 +26,12 @@ void __init time_init(void)
lpj_fine = riscv_timebase / HZ; lpj_fine = riscv_timebase / HZ;
timer_probe(); timer_probe();
} }
void clocksource_arch_init(struct clocksource *cs)
{
#ifdef CONFIG_GENERIC_GETTIMEOFDAY
cs->vdso_clock_mode = VDSO_CLOCKMODE_ARCHTIMER;
#else
cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
#endif
}
...@@ -183,6 +183,4 @@ void trap_init(void) ...@@ -183,6 +183,4 @@ void trap_init(void)
csr_write(CSR_SCRATCH, 0); csr_write(CSR_SCRATCH, 0);
/* Set the exception vector address */ /* Set the exception vector address */
csr_write(CSR_TVEC, &handle_exception); csr_write(CSR_TVEC, &handle_exception);
/* Enable interrupts */
csr_write(CSR_IE, IE_SIE);
} }
...@@ -11,8 +11,12 @@ ...@@ -11,8 +11,12 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/binfmts.h> #include <linux/binfmts.h>
#include <linux/err.h> #include <linux/err.h>
#include <asm/page.h>
#ifdef GENERIC_TIME_VSYSCALL
#include <vdso/datapage.h>
#else
#include <asm/vdso.h> #include <asm/vdso.h>
#endif
extern char vdso_start[], vdso_end[]; extern char vdso_start[], vdso_end[];
...@@ -26,7 +30,7 @@ static union { ...@@ -26,7 +30,7 @@ static union {
struct vdso_data data; struct vdso_data data;
u8 page[PAGE_SIZE]; u8 page[PAGE_SIZE];
} vdso_data_store __page_aligned_data; } vdso_data_store __page_aligned_data;
static struct vdso_data *vdso_data = &vdso_data_store.data; struct vdso_data *vdso_data = &vdso_data_store.data;
static int __init vdso_init(void) static int __init vdso_init(void)
{ {
...@@ -75,13 +79,22 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, ...@@ -75,13 +79,22 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
*/ */
mm->context.vdso = (void *)vdso_base; mm->context.vdso = (void *)vdso_base;
ret = install_special_mapping(mm, vdso_base, vdso_len, ret =
install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
(VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC), (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
vdso_pagelist); vdso_pagelist);
if (unlikely(ret)) if (unlikely(ret)) {
mm->context.vdso = NULL; mm->context.vdso = NULL;
goto end;
}
vdso_base += (vdso_pages << PAGE_SHIFT);
ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
(VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
if (unlikely(ret))
mm->context.vdso = NULL;
end: end:
mmap_write_unlock(mm); mmap_write_unlock(mm);
return ret; return ret;
...@@ -91,5 +104,8 @@ const char *arch_vma_name(struct vm_area_struct *vma) ...@@ -91,5 +104,8 @@ const char *arch_vma_name(struct vm_area_struct *vma)
{ {
if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso)) if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
return "[vdso]"; return "[vdso]";
if (vma->vm_mm && (vma->vm_start ==
(long)vma->vm_mm->context.vdso + PAGE_SIZE))
return "[vdso_data]";
return NULL; return NULL;
} }
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
# Copied from arch/tile/kernel/vdso/Makefile # Copied from arch/tile/kernel/vdso/Makefile
# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before
# the inclusion of generic Makefile.
ARCH_REL_TYPE_ABS := R_RISCV_32|R_RISCV_64|R_RISCV_JUMP_SLOT
include $(srctree)/lib/vdso/Makefile
# Symbols present in the vdso # Symbols present in the vdso
vdso-syms = rt_sigreturn vdso-syms = rt_sigreturn
ifdef CONFIG_64BIT ifdef CONFIG_64BIT
vdso-syms += gettimeofday vdso-syms += vgettimeofday
vdso-syms += clock_gettime
vdso-syms += clock_getres
endif endif
vdso-syms += getcpu vdso-syms += getcpu
vdso-syms += flush_icache vdso-syms += flush_icache
...@@ -14,6 +16,10 @@ vdso-syms += flush_icache ...@@ -14,6 +16,10 @@ vdso-syms += flush_icache
# Files to link into the vdso # Files to link into the vdso
obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o
ifneq ($(c-gettimeofday-y),)
CFLAGS_vgettimeofday.o += -include $(c-gettimeofday-y)
endif
# Build rules # Build rules
targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-dummy.o
obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2017 SiFive
*/
#include <linux/linkage.h>
#include <asm/unistd.h>
.text
/* int __vdso_clock_getres(clockid_t clock_id, struct timespec *res); */
ENTRY(__vdso_clock_getres)
.cfi_startproc
/* For now, just do the syscall. */
li a7, __NR_clock_getres
ecall
ret
.cfi_endproc
ENDPROC(__vdso_clock_getres)
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2017 SiFive
*/
#include <linux/linkage.h>
#include <asm/unistd.h>
.text
/* int __vdso_clock_gettime(clockid_t clock_id, struct timespec *tp); */
ENTRY(__vdso_clock_gettime)
.cfi_startproc
/* For now, just do the syscall. */
li a7, __NR_clock_gettime
ecall
ret
.cfi_endproc
ENDPROC(__vdso_clock_gettime)
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2017 SiFive
*/
#include <linux/linkage.h>
#include <asm/unistd.h>
.text
/* int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz); */
ENTRY(__vdso_gettimeofday)
.cfi_startproc
/* For now, just do the syscall. */
li a7, __NR_gettimeofday
ecall
ret
.cfi_endproc
ENDPROC(__vdso_gettimeofday)
...@@ -2,11 +2,13 @@ ...@@ -2,11 +2,13 @@
/* /*
* Copyright (C) 2012 Regents of the University of California * Copyright (C) 2012 Regents of the University of California
*/ */
#include <asm/page.h>
OUTPUT_ARCH(riscv) OUTPUT_ARCH(riscv)
SECTIONS SECTIONS
{ {
PROVIDE(_vdso_data = . + PAGE_SIZE);
. = SIZEOF_HEADERS; . = SIZEOF_HEADERS;
.hash : { *(.hash) } :text .hash : { *(.hash) } :text
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copied from arch/arm64/kernel/vdso/vgettimeofday.c
*
* Copyright (C) 2018 ARM Ltd.
* Copyright (C) 2020 SiFive
*/
#include <linux/time.h>
#include <linux/types.h>
int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
{
return __cvdso_clock_gettime(clock, ts);
}
int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
{
return __cvdso_gettimeofday(tv, tz);
}
int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res)
{
return __cvdso_clock_getres(clock_id, res);
}
...@@ -480,17 +480,6 @@ static void __init setup_vm_final(void) ...@@ -480,17 +480,6 @@ static void __init setup_vm_final(void)
csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE); csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
local_flush_tlb_all(); local_flush_tlb_all();
} }
void free_initmem(void)
{
unsigned long init_begin = (unsigned long)__init_begin;
unsigned long init_end = (unsigned long)__init_end;
/* Make the region as non-execuatble. */
set_memory_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT);
free_initmem_default(POISON_FREE_INITMEM);
}
#else #else
asmlinkage void __init setup_vm(uintptr_t dtb_pa) asmlinkage void __init setup_vm(uintptr_t dtb_pa)
{ {
......
...@@ -12,8 +12,11 @@ ...@@ -12,8 +12,11 @@
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/sched_clock.h> #include <linux/sched_clock.h>
#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/sbi.h> #include <asm/sbi.h>
...@@ -39,6 +42,7 @@ static int riscv_clock_next_event(unsigned long delta, ...@@ -39,6 +42,7 @@ static int riscv_clock_next_event(unsigned long delta,
return 0; return 0;
} }
static unsigned int riscv_clock_event_irq;
static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = { static DEFINE_PER_CPU(struct clock_event_device, riscv_clock_event) = {
.name = "riscv_timer_clockevent", .name = "riscv_timer_clockevent",
.features = CLOCK_EVT_FEAT_ONESHOT, .features = CLOCK_EVT_FEAT_ONESHOT,
...@@ -74,30 +78,36 @@ static int riscv_timer_starting_cpu(unsigned int cpu) ...@@ -74,30 +78,36 @@ static int riscv_timer_starting_cpu(unsigned int cpu)
struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu); struct clock_event_device *ce = per_cpu_ptr(&riscv_clock_event, cpu);
ce->cpumask = cpumask_of(cpu); ce->cpumask = cpumask_of(cpu);
ce->irq = riscv_clock_event_irq;
clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff); clockevents_config_and_register(ce, riscv_timebase, 100, 0x7fffffff);
csr_set(CSR_IE, IE_TIE); enable_percpu_irq(riscv_clock_event_irq,
irq_get_trigger_type(riscv_clock_event_irq));
return 0; return 0;
} }
static int riscv_timer_dying_cpu(unsigned int cpu) static int riscv_timer_dying_cpu(unsigned int cpu)
{ {
csr_clear(CSR_IE, IE_TIE); disable_percpu_irq(riscv_clock_event_irq);
return 0; return 0;
} }
/* called directly from the low-level interrupt handler */ /* called directly from the low-level interrupt handler */
void riscv_timer_interrupt(void) static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id)
{ {
struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event); struct clock_event_device *evdev = this_cpu_ptr(&riscv_clock_event);
csr_clear(CSR_IE, IE_TIE); csr_clear(CSR_IE, IE_TIE);
evdev->event_handler(evdev); evdev->event_handler(evdev);
return IRQ_HANDLED;
} }
static int __init riscv_timer_init_dt(struct device_node *n) static int __init riscv_timer_init_dt(struct device_node *n)
{ {
int cpuid, hartid, error; int cpuid, hartid, error;
struct device_node *child;
struct irq_domain *domain;
hartid = riscv_of_processor_hartid(n); hartid = riscv_of_processor_hartid(n);
if (hartid < 0) { if (hartid < 0) {
...@@ -115,6 +125,25 @@ static int __init riscv_timer_init_dt(struct device_node *n) ...@@ -115,6 +125,25 @@ static int __init riscv_timer_init_dt(struct device_node *n)
if (cpuid != smp_processor_id()) if (cpuid != smp_processor_id())
return 0; return 0;
domain = NULL;
child = of_get_compatible_child(n, "riscv,cpu-intc");
if (!child) {
pr_err("Failed to find INTC node [%pOF]\n", n);
return -ENODEV;
}
domain = irq_find_host(child);
of_node_put(child);
if (!domain) {
pr_err("Failed to find IRQ domain for node [%pOF]\n", n);
return -ENODEV;
}
riscv_clock_event_irq = irq_create_mapping(domain, RV_IRQ_TIMER);
if (!riscv_clock_event_irq) {
pr_err("Failed to map timer interrupt for node [%pOF]\n", n);
return -ENODEV;
}
pr_info("%s: Registering clocksource cpuid [%d] hartid [%d]\n", pr_info("%s: Registering clocksource cpuid [%d] hartid [%d]\n",
__func__, cpuid, hartid); __func__, cpuid, hartid);
error = clocksource_register_hz(&riscv_clocksource, riscv_timebase); error = clocksource_register_hz(&riscv_clocksource, riscv_timebase);
...@@ -126,6 +155,14 @@ static int __init riscv_timer_init_dt(struct device_node *n) ...@@ -126,6 +155,14 @@ static int __init riscv_timer_init_dt(struct device_node *n)
sched_clock_register(riscv_sched_clock, 64, riscv_timebase); sched_clock_register(riscv_sched_clock, 64, riscv_timebase);
error = request_percpu_irq(riscv_clock_event_irq,
riscv_timer_interrupt,
"riscv-timer", &riscv_clock_event);
if (error) {
pr_err("registering percpu irq failed [%d]\n", error);
return error;
}
error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING, error = cpuhp_setup_state(CPUHP_AP_RISCV_TIMER_STARTING,
"clockevents/riscv/timer:starting", "clockevents/riscv/timer:starting",
riscv_timer_starting_cpu, riscv_timer_dying_cpu); riscv_timer_starting_cpu, riscv_timer_dying_cpu);
......
...@@ -493,6 +493,19 @@ config TI_SCI_INTA_IRQCHIP ...@@ -493,6 +493,19 @@ config TI_SCI_INTA_IRQCHIP
If you wish to use interrupt aggregator irq resources managed by the If you wish to use interrupt aggregator irq resources managed by the
TI System Controller, say Y here. Otherwise, say N. TI System Controller, say Y here. Otherwise, say N.
config RISCV_INTC
bool "RISC-V Local Interrupt Controller"
depends on RISCV
default y
help
This enables support for the per-HART local interrupt controller
found in standard RISC-V systems. The per-HART local interrupt
controller handles timer interrupts, software interrupts, and
hardware interrupts. Without a per-HART local interrupt controller,
a RISC-V system will be unable to handle any interrupts.
If you don't know what to do here, say Y.
config SIFIVE_PLIC config SIFIVE_PLIC
bool "SiFive Platform-Level Interrupt Controller" bool "SiFive Platform-Level Interrupt Controller"
depends on RISCV depends on RISCV
......
...@@ -98,6 +98,7 @@ obj-$(CONFIG_NDS32) += irq-ativic32.o ...@@ -98,6 +98,7 @@ obj-$(CONFIG_NDS32) += irq-ativic32.o
obj-$(CONFIG_QCOM_PDC) += qcom-pdc.o obj-$(CONFIG_QCOM_PDC) += qcom-pdc.o
obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o
obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o
obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o
obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o obj-$(CONFIG_IMX_INTMUX) += irq-imx-intmux.o
......
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017-2018 SiFive
* Copyright (C) 2020 Western Digital Corporation or its affiliates.
*/
#define pr_fmt(fmt) "riscv-intc: " fmt
#include <linux/atomic.h>
#include <linux/bits.h>
#include <linux/cpu.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/smp.h>
static struct irq_domain *intc_domain;
static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
{
unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG;
if (unlikely(cause >= BITS_PER_LONG))
panic("unexpected interrupt cause");
switch (cause) {
#ifdef CONFIG_SMP
case RV_IRQ_SOFT:
/*
* We only use software interrupts to pass IPIs, so if a
* non-SMP system gets one, then we don't know what to do.
*/
handle_IPI(regs);
break;
#endif
default:
handle_domain_irq(intc_domain, cause, regs);
break;
}
}
/*
* On RISC-V systems local interrupts are masked or unmasked by writing
* the SIE (Supervisor Interrupt Enable) CSR. As CSRs can only be written
* on the local hart, these functions can only be called on the hart that
* corresponds to the IRQ chip.
*/
static void riscv_intc_irq_mask(struct irq_data *d)
{
csr_clear(CSR_IE, BIT(d->hwirq));
}
static void riscv_intc_irq_unmask(struct irq_data *d)
{
csr_set(CSR_IE, BIT(d->hwirq));
}
static int riscv_intc_cpu_starting(unsigned int cpu)
{
csr_set(CSR_IE, BIT(RV_IRQ_SOFT));
return 0;
}
static int riscv_intc_cpu_dying(unsigned int cpu)
{
csr_clear(CSR_IE, BIT(RV_IRQ_SOFT));
return 0;
}
static struct irq_chip riscv_intc_chip = {
.name = "RISC-V INTC",
.irq_mask = riscv_intc_irq_mask,
.irq_unmask = riscv_intc_irq_unmask,
};
static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
irq_set_percpu_devid(irq);
irq_domain_set_info(d, irq, hwirq, &riscv_intc_chip, d->host_data,
handle_percpu_devid_irq, NULL, NULL);
return 0;
}
static const struct irq_domain_ops riscv_intc_domain_ops = {
.map = riscv_intc_domain_map,
.xlate = irq_domain_xlate_onecell,
};
static int __init riscv_intc_init(struct device_node *node,
struct device_node *parent)
{
int rc, hartid;
hartid = riscv_of_parent_hartid(node);
if (hartid < 0) {
pr_warn("unable to fine hart id for %pOF\n", node);
return 0;
}
/*
* The DT will have one INTC DT node under each CPU (or HART)
* DT node so riscv_intc_init() function will be called once
* for each INTC DT node. We only need to do INTC initialization
* for the INTC DT node belonging to boot CPU (or boot HART).
*/
if (riscv_hartid_to_cpuid(hartid) != smp_processor_id())
return 0;
intc_domain = irq_domain_add_linear(node, BITS_PER_LONG,
&riscv_intc_domain_ops, NULL);
if (!intc_domain) {
pr_err("unable to add IRQ domain\n");
return -ENXIO;
}
rc = set_handle_irq(&riscv_intc_irq);
if (rc) {
pr_err("failed to set irq handler\n");
return rc;
}
cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_STARTING,
"irqchip/riscv/intc:starting",
riscv_intc_cpu_starting,
riscv_intc_cpu_dying);
pr_info("%d local interrupts mapped\n", BITS_PER_LONG);
return 0;
}
IRQCHIP_DECLARE(riscv, "riscv,cpu-intc", riscv_intc_init);
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/irqchip.h> #include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h> #include <linux/irqdomain.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of.h> #include <linux/of.h>
...@@ -76,6 +77,7 @@ struct plic_handler { ...@@ -76,6 +77,7 @@ struct plic_handler {
void __iomem *enable_base; void __iomem *enable_base;
struct plic_priv *priv; struct plic_priv *priv;
}; };
static int plic_parent_irq;
static bool plic_cpuhp_setup_done; static bool plic_cpuhp_setup_done;
static DEFINE_PER_CPU(struct plic_handler, plic_handlers); static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
...@@ -219,15 +221,17 @@ static const struct irq_domain_ops plic_irqdomain_ops = { ...@@ -219,15 +221,17 @@ static const struct irq_domain_ops plic_irqdomain_ops = {
* that source ID back to the same claim register. This automatically enables * that source ID back to the same claim register. This automatically enables
* and disables the interrupt, so there's nothing else to do. * and disables the interrupt, so there's nothing else to do.
*/ */
static void plic_handle_irq(struct pt_regs *regs) static void plic_handle_irq(struct irq_desc *desc)
{ {
struct plic_handler *handler = this_cpu_ptr(&plic_handlers); struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
struct irq_chip *chip = irq_desc_get_chip(desc);
void __iomem *claim = handler->hart_base + CONTEXT_CLAIM; void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
irq_hw_number_t hwirq; irq_hw_number_t hwirq;
WARN_ON_ONCE(!handler->present); WARN_ON_ONCE(!handler->present);
csr_clear(CSR_IE, IE_EIE); chained_irq_enter(chip, desc);
while ((hwirq = readl(claim))) { while ((hwirq = readl(claim))) {
int irq = irq_find_mapping(handler->priv->irqdomain, hwirq); int irq = irq_find_mapping(handler->priv->irqdomain, hwirq);
...@@ -237,21 +241,8 @@ static void plic_handle_irq(struct pt_regs *regs) ...@@ -237,21 +241,8 @@ static void plic_handle_irq(struct pt_regs *regs)
else else
generic_handle_irq(irq); generic_handle_irq(irq);
} }
csr_set(CSR_IE, IE_EIE);
}
/*
* Walk up the DT tree until we find an active RISC-V core (HART) node and
* extract the cpuid from it.
*/
static int plic_find_hart_id(struct device_node *node)
{
for (; node; node = node->parent) {
if (of_device_is_compatible(node, "riscv"))
return riscv_of_processor_hartid(node);
}
return -1; chained_irq_exit(chip, desc);
} }
static void plic_set_threshold(struct plic_handler *handler, u32 threshold) static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
...@@ -262,10 +253,8 @@ static void plic_set_threshold(struct plic_handler *handler, u32 threshold) ...@@ -262,10 +253,8 @@ static void plic_set_threshold(struct plic_handler *handler, u32 threshold)
static int plic_dying_cpu(unsigned int cpu) static int plic_dying_cpu(unsigned int cpu)
{ {
struct plic_handler *handler = this_cpu_ptr(&plic_handlers); if (plic_parent_irq)
disable_percpu_irq(plic_parent_irq);
csr_clear(CSR_IE, IE_EIE);
plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD);
return 0; return 0;
} }
...@@ -274,7 +263,11 @@ static int plic_starting_cpu(unsigned int cpu) ...@@ -274,7 +263,11 @@ static int plic_starting_cpu(unsigned int cpu)
{ {
struct plic_handler *handler = this_cpu_ptr(&plic_handlers); struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
csr_set(CSR_IE, IE_EIE); if (plic_parent_irq)
enable_percpu_irq(plic_parent_irq,
irq_get_trigger_type(plic_parent_irq));
else
pr_warn("cpu%d: parent irq not available\n", cpu);
plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD); plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD);
return 0; return 0;
...@@ -330,7 +323,7 @@ static int __init plic_init(struct device_node *node, ...@@ -330,7 +323,7 @@ static int __init plic_init(struct device_node *node,
if (parent.args[0] != RV_IRQ_EXT) if (parent.args[0] != RV_IRQ_EXT)
continue; continue;
hartid = plic_find_hart_id(parent.np); hartid = riscv_of_parent_hartid(parent.np);
if (hartid < 0) { if (hartid < 0) {
pr_warn("failed to parse hart ID for context %d.\n", i); pr_warn("failed to parse hart ID for context %d.\n", i);
continue; continue;
...@@ -342,6 +335,14 @@ static int __init plic_init(struct device_node *node, ...@@ -342,6 +335,14 @@ static int __init plic_init(struct device_node *node,
continue; continue;
} }
/* Find parent domain and register chained handler */
if (!plic_parent_irq && irq_find_host(parent.np)) {
plic_parent_irq = irq_of_parse_and_map(node, i);
if (plic_parent_irq)
irq_set_chained_handler(plic_parent_irq,
plic_handle_irq);
}
/* /*
* When running in M-mode we need to ignore the S-mode handler. * When running in M-mode we need to ignore the S-mode handler.
* Here we assume it always comes later, but that might be a * Here we assume it always comes later, but that might be a
...@@ -382,7 +383,6 @@ static int __init plic_init(struct device_node *node, ...@@ -382,7 +383,6 @@ static int __init plic_init(struct device_node *node,
pr_info("%pOFP: mapped %d interrupts with %d handlers for" pr_info("%pOFP: mapped %d interrupts with %d handlers for"
" %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts); " %d contexts.\n", node, nr_irqs, nr_handlers, nr_contexts);
set_handle_irq(plic_handle_irq);
return 0; return 0;
out_iounmap: out_iounmap:
......
...@@ -102,6 +102,7 @@ enum cpuhp_state { ...@@ -102,6 +102,7 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_XP_STARTING, CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING, CPUHP_AP_IRQ_MIPS_GIC_STARTING,
CPUHP_AP_IRQ_RISCV_STARTING,
CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_MICROCODE_LOADER, CPUHP_AP_MICROCODE_LOADER,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment