Commit 46859ac8 authored by Huacai Chen's avatar Huacai Chen

LoongArch: Add multi-processor (SMP) support

LoongArch-based procesors have 4, 8 or 16 cores per package. This patch
adds multi-processor (SMP) support for LoongArch.
Reviewed-by: default avatarWANG Xuerui <git@xen0n.name>
Reviewed-by: default avatarJiaxun Yang <jiaxun.yang@flygoat.com>
Signed-off-by: default avatarHuacai Chen <chenhuacai@loongson.cn>
parent c6b99bed
...@@ -64,6 +64,7 @@ config LOONGARCH ...@@ -64,6 +64,7 @@ config LOONGARCH
select GENERIC_LIB_UCMPDI2 select GENERIC_LIB_UCMPDI2
select GENERIC_PCI_IOMAP select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL select GENERIC_TIME_VSYSCALL
select GPIOLIB select GPIOLIB
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
...@@ -92,7 +93,7 @@ config LOONGARCH ...@@ -92,7 +93,7 @@ config LOONGARCH
select HAVE_RSEQ select HAVE_RSEQ
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_TIF_NOHZ select HAVE_TIF_NOHZ
select HAVE_VIRT_CPU_ACCOUNTING_GEN select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
select IRQ_FORCED_THREADING select IRQ_FORCED_THREADING
select IRQ_LOONGARCH_CPU select IRQ_LOONGARCH_CPU
select MODULES_USE_ELF_RELA if MODULES select MODULES_USE_ELF_RELA if MODULES
...@@ -297,6 +298,43 @@ config EFI ...@@ -297,6 +298,43 @@ config EFI
This enables the kernel to use EFI runtime services that are This enables the kernel to use EFI runtime services that are
available (such as the EFI variable services). available (such as the EFI variable services).
config SMP
bool "Multi-Processing support"
help
This enables support for systems with more than one CPU. If you have
a system with only one CPU, say N. If you have a system with more
than one CPU, say Y.
If you say N here, the kernel will run on uni- and multiprocessor
machines, but will use only one CPU of a multiprocessor machine. If
you say Y here, the kernel will run on many, but not all,
uniprocessor machines. On a uniprocessor machine, the kernel
will run faster if you say N here.
See also the SMP-HOWTO available at <http://www.tldp.org/docs.html#howto>.
If you don't know what to do here, say N.
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
depends on SMP
select GENERIC_IRQ_MIGRATION
help
Say Y here to allow turning CPUs off and on. CPUs can be
controlled through /sys/devices/system/cpu.
(Note: power management support will enable this option
automatically on SMP systems. )
Say N if you want to disable CPU hotplug.
config NR_CPUS
int "Maximum number of CPUs (2-256)"
range 2 256
depends on SMP
default "64"
help
This allows you to specify the maximum number of CPUs which this
kernel will support.
config FORCE_MAX_ZONEORDER config FORCE_MAX_ZONEORDER
int "Maximum zone order" int "Maximum zone order"
range 14 64 if PAGE_SIZE_64KB range 14 64 if PAGE_SIZE_64KB
......
...@@ -162,6 +162,7 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v) ...@@ -162,6 +162,7 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
" sc.w %1, %2 \n" " sc.w %1, %2 \n"
" beq $zero, %1, 1b \n" " beq $zero, %1, 1b \n"
"2: \n" "2: \n"
__WEAK_LLSC_MB
: "=&r" (result), "=&r" (temp), : "=&r" (result), "=&r" (temp),
"+" GCC_OFF_SMALL_ASM() (v->counter) "+" GCC_OFF_SMALL_ASM() (v->counter)
: "I" (-i)); : "I" (-i));
...@@ -174,6 +175,7 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v) ...@@ -174,6 +175,7 @@ static inline int arch_atomic_sub_if_positive(int i, atomic_t *v)
" sc.w %1, %2 \n" " sc.w %1, %2 \n"
" beq $zero, %1, 1b \n" " beq $zero, %1, 1b \n"
"2: \n" "2: \n"
__WEAK_LLSC_MB
: "=&r" (result), "=&r" (temp), : "=&r" (result), "=&r" (temp),
"+" GCC_OFF_SMALL_ASM() (v->counter) "+" GCC_OFF_SMALL_ASM() (v->counter)
: "r" (i)); : "r" (i));
...@@ -323,6 +325,7 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v) ...@@ -323,6 +325,7 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
" sc.d %1, %2 \n" " sc.d %1, %2 \n"
" beq %1, $zero, 1b \n" " beq %1, $zero, 1b \n"
"2: \n" "2: \n"
__WEAK_LLSC_MB
: "=&r" (result), "=&r" (temp), : "=&r" (result), "=&r" (temp),
"+" GCC_OFF_SMALL_ASM() (v->counter) "+" GCC_OFF_SMALL_ASM() (v->counter)
: "I" (-i)); : "I" (-i));
...@@ -335,6 +338,7 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v) ...@@ -335,6 +338,7 @@ static inline long arch_atomic64_sub_if_positive(long i, atomic64_t *v)
" sc.d %1, %2 \n" " sc.d %1, %2 \n"
" beq %1, $zero, 1b \n" " beq %1, $zero, 1b \n"
"2: \n" "2: \n"
__WEAK_LLSC_MB
: "=&r" (result), "=&r" (temp), : "=&r" (result), "=&r" (temp),
"+" GCC_OFF_SMALL_ASM() (v->counter) "+" GCC_OFF_SMALL_ASM() (v->counter)
: "r" (i)); : "r" (i));
......
...@@ -18,6 +18,19 @@ ...@@ -18,6 +18,19 @@
#define mb() fast_mb() #define mb() fast_mb()
#define iob() fast_iob() #define iob() fast_iob()
#define __smp_mb() __asm__ __volatile__("dbar 0" : : : "memory")
#define __smp_rmb() __asm__ __volatile__("dbar 0" : : : "memory")
#define __smp_wmb() __asm__ __volatile__("dbar 0" : : : "memory")
#ifdef CONFIG_SMP
#define __WEAK_LLSC_MB " dbar 0 \n"
#else
#define __WEAK_LLSC_MB " \n"
#endif
#define __smp_mb__before_atomic() barrier()
#define __smp_mb__after_atomic() barrier()
/** /**
* array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise
* @index: array element index * @index: array element index
...@@ -46,6 +59,101 @@ static inline unsigned long array_index_mask_nospec(unsigned long index, ...@@ -46,6 +59,101 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
return mask; return mask;
} }
#define __smp_load_acquire(p) \
({ \
union { typeof(*p) __val; char __c[1]; } __u; \
unsigned long __tmp = 0; \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
case 1: \
*(__u8 *)__u.__c = *(volatile __u8 *)p; \
__smp_mb(); \
break; \
case 2: \
*(__u16 *)__u.__c = *(volatile __u16 *)p; \
__smp_mb(); \
break; \
case 4: \
__asm__ __volatile__( \
"amor_db.w %[val], %[tmp], %[mem] \n" \
: [val] "=&r" (*(__u32 *)__u.__c) \
: [mem] "ZB" (*(u32 *) p), [tmp] "r" (__tmp) \
: "memory"); \
break; \
case 8: \
__asm__ __volatile__( \
"amor_db.d %[val], %[tmp], %[mem] \n" \
: [val] "=&r" (*(__u64 *)__u.__c) \
: [mem] "ZB" (*(u64 *) p), [tmp] "r" (__tmp) \
: "memory"); \
break; \
} \
(typeof(*p))__u.__val; \
})
#define __smp_store_release(p, v) \
do { \
union { typeof(*p) __val; char __c[1]; } __u = \
{ .__val = (__force typeof(*p)) (v) }; \
unsigned long __tmp; \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
case 1: \
__smp_mb(); \
*(volatile __u8 *)p = *(__u8 *)__u.__c; \
break; \
case 2: \
__smp_mb(); \
*(volatile __u16 *)p = *(__u16 *)__u.__c; \
break; \
case 4: \
__asm__ __volatile__( \
"amswap_db.w %[tmp], %[val], %[mem] \n" \
: [mem] "+ZB" (*(u32 *)p), [tmp] "=&r" (__tmp) \
: [val] "r" (*(__u32 *)__u.__c) \
: ); \
break; \
case 8: \
__asm__ __volatile__( \
"amswap_db.d %[tmp], %[val], %[mem] \n" \
: [mem] "+ZB" (*(u64 *)p), [tmp] "=&r" (__tmp) \
: [val] "r" (*(__u64 *)__u.__c) \
: ); \
break; \
} \
} while (0)
#define __smp_store_mb(p, v) \
do { \
union { typeof(p) __val; char __c[1]; } __u = \
{ .__val = (__force typeof(p)) (v) }; \
unsigned long __tmp; \
switch (sizeof(p)) { \
case 1: \
*(volatile __u8 *)&p = *(__u8 *)__u.__c; \
__smp_mb(); \
break; \
case 2: \
*(volatile __u16 *)&p = *(__u16 *)__u.__c; \
__smp_mb(); \
break; \
case 4: \
__asm__ __volatile__( \
"amswap_db.w %[tmp], %[val], %[mem] \n" \
: [mem] "+ZB" (*(u32 *)&p), [tmp] "=&r" (__tmp) \
: [val] "r" (*(__u32 *)__u.__c) \
: ); \
break; \
case 8: \
__asm__ __volatile__( \
"amswap_db.d %[tmp], %[val], %[mem] \n" \
: [mem] "+ZB" (*(u64 *)&p), [tmp] "=&r" (__tmp) \
: [val] "r" (*(__u64 *)__u.__c) \
: ); \
break; \
} \
} while (0)
#include <asm-generic/barrier.h> #include <asm-generic/barrier.h>
#endif /* __ASM_BARRIER_H */ #endif /* __ASM_BARRIER_H */
...@@ -59,6 +59,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x, ...@@ -59,6 +59,7 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
" " st " $t0, %1 \n" \ " " st " $t0, %1 \n" \
" beq $zero, $t0, 1b \n" \ " beq $zero, $t0, 1b \n" \
"2: \n" \ "2: \n" \
__WEAK_LLSC_MB \
: "=&r" (__ret), "=ZB"(*m) \ : "=&r" (__ret), "=ZB"(*m) \
: "ZB"(*m), "Jr" (old), "Jr" (new) \ : "ZB"(*m), "Jr" (old), "Jr" (new) \
: "t0", "memory"); \ : "t0", "memory"); \
......
...@@ -86,6 +86,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newv ...@@ -86,6 +86,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval, u32 newv
"2: sc.w $t0, %2 \n" "2: sc.w $t0, %2 \n"
" beq $zero, $t0, 1b \n" " beq $zero, $t0, 1b \n"
"3: \n" "3: \n"
__WEAK_LLSC_MB
" .section .fixup,\"ax\" \n" " .section .fixup,\"ax\" \n"
"4: li.d %0, %6 \n" "4: li.d %0, %6 \n"
" b 3b \n" " b 3b \n"
......
...@@ -21,4 +21,6 @@ typedef struct { ...@@ -21,4 +21,6 @@ typedef struct {
DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat); DECLARE_PER_CPU_ALIGNED(irq_cpustat_t, irq_stat);
#define __ARCH_IRQ_STAT
#endif /* _ASM_HARDIRQ_H */ #endif /* _ASM_HARDIRQ_H */
...@@ -125,6 +125,8 @@ extern struct irq_domain *pch_lpc_domain; ...@@ -125,6 +125,8 @@ extern struct irq_domain *pch_lpc_domain;
extern struct irq_domain *pch_msi_domain[MAX_IO_PICS]; extern struct irq_domain *pch_msi_domain[MAX_IO_PICS];
extern struct irq_domain *pch_pic_domain[MAX_IO_PICS]; extern struct irq_domain *pch_pic_domain[MAX_IO_PICS];
extern irqreturn_t loongson3_ipi_interrupt(int irq, void *dev);
#include <asm-generic/irq.h> #include <asm-generic/irq.h>
#endif /* _ASM_IRQ_H */ #endif /* _ASM_IRQ_H */
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
#ifndef __ASM_PERCPU_H #ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H #define __ASM_PERCPU_H
#include <asm/cmpxchg.h>
/* Use r21 for fast access */ /* Use r21 for fast access */
register unsigned long __my_cpu_offset __asm__("$r21"); register unsigned long __my_cpu_offset __asm__("$r21");
...@@ -15,6 +17,198 @@ static inline void set_my_cpu_offset(unsigned long off) ...@@ -15,6 +17,198 @@ static inline void set_my_cpu_offset(unsigned long off)
} }
#define __my_cpu_offset __my_cpu_offset #define __my_cpu_offset __my_cpu_offset
#define PERCPU_OP(op, asm_op, c_op) \
static inline unsigned long __percpu_##op(void *ptr, \
unsigned long val, int size) \
{ \
unsigned long ret; \
\
switch (size) { \
case 4: \
__asm__ __volatile__( \
"am"#asm_op".w" " %[ret], %[val], %[ptr] \n" \
: [ret] "=&r" (ret), [ptr] "+ZB"(*(u32 *)ptr) \
: [val] "r" (val)); \
break; \
case 8: \
__asm__ __volatile__( \
"am"#asm_op".d" " %[ret], %[val], %[ptr] \n" \
: [ret] "=&r" (ret), [ptr] "+ZB"(*(u64 *)ptr) \
: [val] "r" (val)); \
break; \
default: \
ret = 0; \
BUILD_BUG(); \
} \
\
return ret c_op val; \
}
PERCPU_OP(add, add, +)
PERCPU_OP(and, and, &)
PERCPU_OP(or, or, |)
#undef PERCPU_OP
static inline unsigned long __percpu_read(void *ptr, int size)
{
unsigned long ret;
switch (size) {
case 1:
__asm__ __volatile__ ("ldx.b %[ret], $r21, %[ptr] \n"
: [ret] "=&r"(ret)
: [ptr] "r"(ptr)
: "memory");
break;
case 2:
__asm__ __volatile__ ("ldx.h %[ret], $r21, %[ptr] \n"
: [ret] "=&r"(ret)
: [ptr] "r"(ptr)
: "memory");
break;
case 4:
__asm__ __volatile__ ("ldx.w %[ret], $r21, %[ptr] \n"
: [ret] "=&r"(ret)
: [ptr] "r"(ptr)
: "memory");
break;
case 8:
__asm__ __volatile__ ("ldx.d %[ret], $r21, %[ptr] \n"
: [ret] "=&r"(ret)
: [ptr] "r"(ptr)
: "memory");
break;
default:
ret = 0;
BUILD_BUG();
}
return ret;
}
static inline void __percpu_write(void *ptr, unsigned long val, int size)
{
switch (size) {
case 1:
__asm__ __volatile__("stx.b %[val], $r21, %[ptr] \n"
:
: [val] "r" (val), [ptr] "r" (ptr)
: "memory");
break;
case 2:
__asm__ __volatile__("stx.h %[val], $r21, %[ptr] \n"
:
: [val] "r" (val), [ptr] "r" (ptr)
: "memory");
break;
case 4:
__asm__ __volatile__("stx.w %[val], $r21, %[ptr] \n"
:
: [val] "r" (val), [ptr] "r" (ptr)
: "memory");
break;
case 8:
__asm__ __volatile__("stx.d %[val], $r21, %[ptr] \n"
:
: [val] "r" (val), [ptr] "r" (ptr)
: "memory");
break;
default:
BUILD_BUG();
}
}
static inline unsigned long __percpu_xchg(void *ptr, unsigned long val,
int size)
{
switch (size) {
case 4:
return __xchg_asm("amswap.w", (volatile u32 *)ptr, (u32)val);
case 8:
return __xchg_asm("amswap.d", (volatile u64 *)ptr, (u64)val);
default:
BUILD_BUG();
}
return 0;
}
/* this_cpu_cmpxchg */
#define _protect_cmpxchg_local(pcp, o, n) \
({ \
typeof(*raw_cpu_ptr(&(pcp))) __ret; \
preempt_disable_notrace(); \
__ret = cmpxchg_local(raw_cpu_ptr(&(pcp)), o, n); \
preempt_enable_notrace(); \
__ret; \
})
#define _percpu_read(pcp) \
({ \
typeof(pcp) __retval; \
__retval = (typeof(pcp))__percpu_read(&(pcp), sizeof(pcp)); \
__retval; \
})
#define _percpu_write(pcp, val) \
do { \
__percpu_write(&(pcp), (unsigned long)(val), sizeof(pcp)); \
} while (0) \
#define _pcp_protect(operation, pcp, val) \
({ \
typeof(pcp) __retval; \
preempt_disable_notrace(); \
__retval = (typeof(pcp))operation(raw_cpu_ptr(&(pcp)), \
(val), sizeof(pcp)); \
preempt_enable_notrace(); \
__retval; \
})
#define _percpu_add(pcp, val) \
_pcp_protect(__percpu_add, pcp, val)
#define _percpu_add_return(pcp, val) _percpu_add(pcp, val)
#define _percpu_and(pcp, val) \
_pcp_protect(__percpu_and, pcp, val)
#define _percpu_or(pcp, val) \
_pcp_protect(__percpu_or, pcp, val)
#define _percpu_xchg(pcp, val) ((typeof(pcp)) \
_pcp_protect(__percpu_xchg, pcp, (unsigned long)(val)))
#define this_cpu_add_4(pcp, val) _percpu_add(pcp, val)
#define this_cpu_add_8(pcp, val) _percpu_add(pcp, val)
#define this_cpu_add_return_4(pcp, val) _percpu_add_return(pcp, val)
#define this_cpu_add_return_8(pcp, val) _percpu_add_return(pcp, val)
#define this_cpu_and_4(pcp, val) _percpu_and(pcp, val)
#define this_cpu_and_8(pcp, val) _percpu_and(pcp, val)
#define this_cpu_or_4(pcp, val) _percpu_or(pcp, val)
#define this_cpu_or_8(pcp, val) _percpu_or(pcp, val)
#define this_cpu_read_1(pcp) _percpu_read(pcp)
#define this_cpu_read_2(pcp) _percpu_read(pcp)
#define this_cpu_read_4(pcp) _percpu_read(pcp)
#define this_cpu_read_8(pcp) _percpu_read(pcp)
#define this_cpu_write_1(pcp, val) _percpu_write(pcp, val)
#define this_cpu_write_2(pcp, val) _percpu_write(pcp, val)
#define this_cpu_write_4(pcp, val) _percpu_write(pcp, val)
#define this_cpu_write_8(pcp, val) _percpu_write(pcp, val)
#define this_cpu_xchg_4(pcp, val) _percpu_xchg(pcp, val)
#define this_cpu_xchg_8(pcp, val) _percpu_xchg(pcp, val)
#define this_cpu_cmpxchg_4(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#define this_cpu_cmpxchg_8(ptr, o, n) _protect_cmpxchg_local(ptr, o, n)
#include <asm-generic/percpu.h> #include <asm-generic/percpu.h>
#endif /* __ASM_PERCPU_H */ #endif /* __ASM_PERCPU_H */
...@@ -279,8 +279,29 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) ...@@ -279,8 +279,29 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
* Make sure the buddy is global too (if it's !none, * Make sure the buddy is global too (if it's !none,
* it better already be global) * it better already be global)
*/ */
#ifdef CONFIG_SMP
/*
* For SMP, multiple CPUs can race, so we need to do
* this atomically.
*/
unsigned long page_global = _PAGE_GLOBAL;
unsigned long tmp;
__asm__ __volatile__ (
"1:" __LL "%[tmp], %[buddy] \n"
" bnez %[tmp], 2f \n"
" or %[tmp], %[tmp], %[global] \n"
__SC "%[tmp], %[buddy] \n"
" beqz %[tmp], 1b \n"
" nop \n"
"2: \n"
__WEAK_LLSC_MB
: [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
: [global] "r" (page_global));
#else /* !CONFIG_SMP */
if (pte_none(*buddy)) if (pte_none(*buddy))
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
#endif /* CONFIG_SMP */
} }
} }
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Author: Huacai Chen <chenhuacai@loongson.cn>
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
*/
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/linkage.h>
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
void loongson3_smp_setup(void);
void loongson3_prepare_cpus(unsigned int max_cpus);
void loongson3_boot_secondary(int cpu, struct task_struct *idle);
void loongson3_init_secondary(void);
void loongson3_smp_finish(void);
void loongson3_send_ipi_single(int cpu, unsigned int action);
void loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action);
#ifdef CONFIG_HOTPLUG_CPU
int loongson3_cpu_disable(void);
void loongson3_cpu_die(unsigned int cpu);
#endif
#ifdef CONFIG_SMP
static inline void plat_smp_setup(void)
{
loongson3_smp_setup();
}
#else /* !CONFIG_SMP */
static inline void plat_smp_setup(void) { }
#endif /* !CONFIG_SMP */
extern int smp_num_siblings;
extern int num_processors;
extern int disabled_cpus;
extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_core_map[];
extern cpumask_t cpu_foreign_map[];
static inline int raw_smp_processor_id(void)
{
#if defined(__VDSO__)
extern int vdso_smp_processor_id(void)
__compiletime_error("VDSO should not call smp_processor_id()");
return vdso_smp_processor_id();
#else
return current_thread_info()->cpu;
#endif
}
#define raw_smp_processor_id raw_smp_processor_id
/* Map from cpu id to sequential logical cpu number. This will only
* not be idempotent when cpus failed to come on-line. */
extern int __cpu_number_map[NR_CPUS];
#define cpu_number_map(cpu) __cpu_number_map[cpu]
/* The reverse map from sequential logical cpu number to cpu id. */
extern int __cpu_logical_map[NR_CPUS];
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
#define cpu_physical_id(cpu) cpu_logical_map(cpu)
#define SMP_BOOT_CPU 0x1
#define SMP_RESCHEDULE 0x2
#define SMP_CALL_FUNCTION 0x4
struct secondary_data {
unsigned long stack;
unsigned long thread_info;
};
extern struct secondary_data cpuboot_data;
extern asmlinkage void smpboot_entry(void);
extern void calculate_cpu_foreign_map(void);
/*
* Generate IPI list text
*/
extern void show_ipi_list(struct seq_file *p, int prec);
/*
* This function sends a 'reschedule' IPI to another CPU.
* it goes straight through and wastes no time serializing
* anything. Worst case is that we lose a reschedule ...
*/
static inline void smp_send_reschedule(int cpu)
{
loongson3_send_ipi_single(cpu, SMP_RESCHEDULE);
}
static inline void arch_send_call_function_single_ipi(int cpu)
{
loongson3_send_ipi_single(cpu, SMP_CALL_FUNCTION);
}
static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
loongson3_send_ipi_mask(mask, SMP_CALL_FUNCTION);
}
#ifdef CONFIG_HOTPLUG_CPU
static inline int __cpu_disable(void)
{
return loongson3_cpu_disable();
}
static inline void __cpu_die(unsigned int cpu)
{
loongson3_cpu_die(cpu);
}
extern void play_dead(void);
#endif
#endif /* __ASM_SMP_H */
...@@ -78,6 +78,10 @@ ...@@ -78,6 +78,10 @@
*/ */
.macro get_saved_sp docfi=0 .macro get_saved_sp docfi=0
la.abs t1, kernelsp la.abs t1, kernelsp
#ifdef CONFIG_SMP
csrrd t0, PERCPU_BASE_KS
LONG_ADD t1, t1, t0
#endif
move t0, sp move t0, sp
.if \docfi .if \docfi
.cfi_register sp, t0 .cfi_register sp, t0
...@@ -87,6 +91,9 @@ ...@@ -87,6 +91,9 @@
.macro set_saved_sp stackp temp temp2 .macro set_saved_sp stackp temp temp2
la.abs \temp, kernelsp la.abs \temp, kernelsp
#ifdef CONFIG_SMP
LONG_ADD \temp, \temp, u0
#endif
LONG_S \stackp, \temp, 0 LONG_S \stackp, \temp, 0
.endm .endm
......
...@@ -25,6 +25,17 @@ extern void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) ...@@ -25,6 +25,17 @@ extern void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
extern void local_flush_tlb_one(unsigned long vaddr); extern void local_flush_tlb_one(unsigned long vaddr);
#ifdef CONFIG_SMP
extern void flush_tlb_all(void);
extern void flush_tlb_mm(struct mm_struct *);
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long, unsigned long);
extern void flush_tlb_kernel_range(unsigned long, unsigned long);
extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
extern void flush_tlb_one(unsigned long vaddr);
#else /* CONFIG_SMP */
#define flush_tlb_all() local_flush_tlb_all() #define flush_tlb_all() local_flush_tlb_all()
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) #define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end) #define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, end)
...@@ -32,4 +43,6 @@ extern void local_flush_tlb_one(unsigned long vaddr); ...@@ -32,4 +43,6 @@ extern void local_flush_tlb_one(unsigned long vaddr);
#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
#define flush_tlb_one(vaddr) local_flush_tlb_one(vaddr) #define flush_tlb_one(vaddr) local_flush_tlb_one(vaddr)
#endif /* CONFIG_SMP */
#endif /* __ASM_TLBFLUSH_H */ #endif /* __ASM_TLBFLUSH_H */
...@@ -7,7 +7,12 @@ ...@@ -7,7 +7,12 @@
#include <linux/smp.h> #include <linux/smp.h>
#define cpu_logical_map(cpu) 0 #ifdef CONFIG_SMP
#define topology_physical_package_id(cpu) (cpu_data[cpu].package)
#define topology_core_id(cpu) (cpu_data[cpu].core)
#define topology_core_cpumask(cpu) (&cpu_core_map[cpu])
#define topology_sibling_cpumask(cpu) (&cpu_sibling_map[cpu])
#endif
#include <asm-generic/topology.h> #include <asm-generic/topology.h>
......
...@@ -18,4 +18,6 @@ obj-$(CONFIG_MODULES) += module.o module-sections.o ...@@ -18,4 +18,6 @@ obj-$(CONFIG_MODULES) += module.o module-sections.o
obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SMP) += smp.o
CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS) CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
...@@ -137,8 +137,44 @@ void __init acpi_boot_table_init(void) ...@@ -137,8 +137,44 @@ void __init acpi_boot_table_init(void)
} }
} }
static int set_processor_mask(u32 id, u32 flags)
{
int cpu, cpuid = id;
if (num_processors >= nr_cpu_ids) {
pr_warn(PREFIX "nr_cpus/possible_cpus limit of %i reached."
" processor 0x%x ignored.\n", nr_cpu_ids, cpuid);
return -ENODEV;
}
if (cpuid == loongson_sysconf.boot_cpu_id)
cpu = 0;
else
cpu = cpumask_next_zero(-1, cpu_present_mask);
if (flags & ACPI_MADT_ENABLED) {
num_processors++;
set_cpu_possible(cpu, true);
set_cpu_present(cpu, true);
__cpu_number_map[cpuid] = cpu;
__cpu_logical_map[cpu] = cpuid;
} else
disabled_cpus++;
return cpu;
}
static void __init acpi_process_madt(void) static void __init acpi_process_madt(void)
{ {
int i;
for (i = 0; i < NR_CPUS; i++) {
__cpu_number_map[i] = -1;
__cpu_logical_map[i] = -1;
}
loongson_sysconf.nr_cpus = num_processors; loongson_sysconf.nr_cpus = num_processors;
} }
...@@ -167,3 +203,36 @@ void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size) ...@@ -167,3 +203,36 @@ void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
{ {
memblock_reserve(addr, size); memblock_reserve(addr, size);
} }
#ifdef CONFIG_ACPI_HOTPLUG_CPU
#include <acpi/processor.h>
int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu)
{
int cpu;
cpu = set_processor_mask(physid, ACPI_MADT_ENABLED);
if (cpu < 0) {
pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
return cpu;
}
*pcpu = cpu;
return 0;
}
EXPORT_SYMBOL(acpi_map_cpu);
int acpi_unmap_cpu(int cpu)
{
set_cpu_present(cpu, false);
num_processors--;
pr_info("cpu%d hot remove!\n", cpu);
return 0;
}
EXPORT_SYMBOL(acpi_unmap_cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
...@@ -252,3 +252,13 @@ void output_signal_defines(void) ...@@ -252,3 +252,13 @@ void output_signal_defines(void)
DEFINE(_SIGXFSZ, SIGXFSZ); DEFINE(_SIGXFSZ, SIGXFSZ);
BLANK(); BLANK();
} }
#ifdef CONFIG_SMP
void output_smpboot_defines(void)
{
COMMENT("Linux smp cpu boot offsets.");
OFFSET(CPU_BOOT_STACK, secondary_data, stack);
OFFSET(CPU_BOOT_TINFO, secondary_data, thread_info);
BLANK();
}
#endif
...@@ -65,4 +65,34 @@ SYM_CODE_START(kernel_entry) # kernel entry point ...@@ -65,4 +65,34 @@ SYM_CODE_START(kernel_entry) # kernel entry point
SYM_CODE_END(kernel_entry) SYM_CODE_END(kernel_entry)
#ifdef CONFIG_SMP
/*
* SMP slave cpus entry point. Board specific code for bootstrap calls this
* function after setting up the stack and tp registers.
*/
SYM_CODE_START(smpboot_entry)
li.d t0, CSR_DMW0_INIT # UC, PLV0
csrwr t0, LOONGARCH_CSR_DMWIN0
li.d t0, CSR_DMW1_INIT # CA, PLV0
csrwr t0, LOONGARCH_CSR_DMWIN1
li.w t0, 0xb0 # PLV=0, IE=0, PG=1
csrwr t0, LOONGARCH_CSR_CRMD
li.w t0, 0x04 # PLV=0, PIE=1, PWE=0
csrwr t0, LOONGARCH_CSR_PRMD
li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0
csrwr t0, LOONGARCH_CSR_EUEN
la.abs t0, cpuboot_data
ld.d sp, t0, CPU_BOOT_STACK
ld.d tp, t0, CPU_BOOT_TINFO
la.abs t0, 0f
jirl zero, t0, 0
0:
bl start_secondary
SYM_CODE_END(smpboot_entry)
#endif /* CONFIG_SMP */
SYM_ENTRY(kernel_entry_end, SYM_L_GLOBAL, SYM_A_NONE) SYM_ENTRY(kernel_entry_end, SYM_L_GLOBAL, SYM_A_NONE)
...@@ -47,13 +47,17 @@ asmlinkage void spurious_interrupt(void) ...@@ -47,13 +47,17 @@ asmlinkage void spurious_interrupt(void)
int arch_show_interrupts(struct seq_file *p, int prec) int arch_show_interrupts(struct seq_file *p, int prec)
{ {
#ifdef CONFIG_SMP
show_ipi_list(p, prec);
#endif
seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
return 0; return 0;
} }
void __init init_IRQ(void) void __init init_IRQ(void)
{ {
int i; int i, r, ipi_irq;
static int ipi_dummy_dev;
unsigned int order = get_order(IRQ_STACK_SIZE); unsigned int order = get_order(IRQ_STACK_SIZE);
struct page *page; struct page *page;
...@@ -61,6 +65,13 @@ void __init init_IRQ(void) ...@@ -61,6 +65,13 @@ void __init init_IRQ(void)
clear_csr_estat(ESTATF_IP); clear_csr_estat(ESTATF_IP);
irqchip_init(); irqchip_init();
#ifdef CONFIG_SMP
ipi_irq = EXCCODE_IPI - EXCCODE_INT_START;
irq_set_percpu_devid(ipi_irq);
r = request_percpu_irq(ipi_irq, loongson3_ipi_interrupt, "IPI", &ipi_dummy_dev);
if (r < 0)
panic("IPI IRQ request failed\n");
#endif
for (i = 0; i < NR_IRQS; i++) for (i = 0; i < NR_IRQS; i++)
irq_set_noprobe(i); irq_set_noprobe(i);
......
...@@ -35,6 +35,11 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -35,6 +35,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
unsigned int fp_version = cpu_data[n].fpu_vers; unsigned int fp_version = cpu_data[n].fpu_vers;
struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args; struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
#ifdef CONFIG_SMP
if (!cpu_online(n))
return 0;
#endif
/* /*
* For the first processor also print the system type * For the first processor also print the system type
*/ */
......
...@@ -53,6 +53,13 @@ ...@@ -53,6 +53,13 @@
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE; unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override); EXPORT_SYMBOL(boot_option_idle_override);
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
{
play_dead();
}
#endif
asmlinkage void ret_from_fork(void); asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void); asmlinkage void ret_from_kernel_thread(void);
......
...@@ -65,16 +65,28 @@ EXPORT_SYMBOL(pm_power_off); ...@@ -65,16 +65,28 @@ EXPORT_SYMBOL(pm_power_off);
void machine_halt(void) void machine_halt(void)
{ {
#ifdef CONFIG_SMP
preempt_disable();
smp_send_stop();
#endif
default_halt(); default_halt();
} }
void machine_power_off(void) void machine_power_off(void)
{ {
#ifdef CONFIG_SMP
preempt_disable();
smp_send_stop();
#endif
pm_power_off(); pm_power_off();
} }
void machine_restart(char *command) void machine_restart(char *command)
{ {
#ifdef CONFIG_SMP
preempt_disable();
smp_send_stop();
#endif
do_kernel_restart(command); do_kernel_restart(command);
pm_restart(); pm_restart();
} }
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/smp.h>
#include <asm/time.h> #include <asm/time.h>
#define SMBIOS_BIOSSIZE_OFFSET 0x09 #define SMBIOS_BIOSSIZE_OFFSET 0x09
...@@ -322,6 +323,29 @@ static int __init reserve_memblock_reserved_regions(void) ...@@ -322,6 +323,29 @@ static int __init reserve_memblock_reserved_regions(void)
} }
arch_initcall(reserve_memblock_reserved_regions); arch_initcall(reserve_memblock_reserved_regions);
#ifdef CONFIG_SMP
static void __init prefill_possible_map(void)
{
int i, possible;
possible = num_processors + disabled_cpus;
if (possible > nr_cpu_ids)
possible = nr_cpu_ids;
pr_info("SMP: Allowing %d CPUs, %d hotplug CPUs\n",
possible, max((possible - num_processors), 0));
for (i = 0; i < possible; i++)
set_cpu_possible(i, true);
for (; i < NR_CPUS; i++)
set_cpu_possible(i, false);
nr_cpu_ids = possible;
}
#else
static inline void prefill_possible_map(void) {}
#endif
void __init setup_arch(char **cmdline_p) void __init setup_arch(char **cmdline_p)
{ {
cpu_probe(); cpu_probe();
...@@ -336,6 +360,8 @@ void __init setup_arch(char **cmdline_p) ...@@ -336,6 +360,8 @@ void __init setup_arch(char **cmdline_p)
arch_mem_init(cmdline_p); arch_mem_init(cmdline_p);
resource_init(); resource_init();
plat_smp_setup();
prefill_possible_map();
paging_init(); paging_init();
} }
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/node.h>
#include <linux/nodemask.h>
#include <linux/percpu.h> #include <linux/percpu.h>
static struct cpu cpu_device; static DEFINE_PER_CPU(struct cpu, cpu_devices);
#ifdef CONFIG_HOTPLUG_CPU
int arch_register_cpu(int cpu)
{
int ret;
struct cpu *c = &per_cpu(cpu_devices, cpu);
c->hotpluggable = 1;
ret = register_cpu(c, cpu);
if (ret < 0)
pr_warn("register_cpu %d failed (%d)\n", cpu, ret);
return ret;
}
EXPORT_SYMBOL(arch_register_cpu);
void arch_unregister_cpu(int cpu)
{
struct cpu *c = &per_cpu(cpu_devices, cpu);
c->hotpluggable = 0;
unregister_cpu(c);
}
EXPORT_SYMBOL(arch_unregister_cpu);
#endif
static int __init topology_init(void) static int __init topology_init(void)
{ {
return register_cpu(&cpu_device, 0); int i, ret;
for_each_present_cpu(i) {
struct cpu *c = &per_cpu(cpu_devices, i);
c->hotpluggable = !!i;
ret = register_cpu(c, i);
if (ret < 0)
pr_warn("topology_init: register_cpu %d failed (%d)\n", i, ret);
}
return 0;
} }
subsys_initcall(topology_init); subsys_initcall(topology_init);
...@@ -72,6 +72,10 @@ SECTIONS ...@@ -72,6 +72,10 @@ SECTIONS
EXIT_DATA EXIT_DATA
} }
#ifdef CONFIG_SMP
PERCPU_SECTION(1 << CONFIG_L1_CACHE_SHIFT)
#endif
.init.bss : { .init.bss : {
*(.init.bss) *(.init.bss)
} }
......
...@@ -88,7 +88,14 @@ vmalloc_done_load: ...@@ -88,7 +88,14 @@ vmalloc_done_load:
slli.d t0, t0, _PTE_T_LOG2 slli.d t0, t0, _PTE_T_LOG2
add.d t1, ra, t0 add.d t1, ra, t0
#ifdef CONFIG_SMP
smp_pgtable_change_load:
#endif
#ifdef CONFIG_SMP
ll.d t0, t1, 0
#else
ld.d t0, t1, 0 ld.d t0, t1, 0
#endif
tlbsrch tlbsrch
srli.d ra, t0, _PAGE_PRESENT_SHIFT srli.d ra, t0, _PAGE_PRESENT_SHIFT
...@@ -96,7 +103,12 @@ vmalloc_done_load: ...@@ -96,7 +103,12 @@ vmalloc_done_load:
beq ra, $r0, nopage_tlb_load beq ra, $r0, nopage_tlb_load
ori t0, t0, _PAGE_VALID ori t0, t0, _PAGE_VALID
#ifdef CONFIG_SMP
sc.d t0, t1, 0
beq t0, $r0, smp_pgtable_change_load
#else
st.d t0, t1, 0 st.d t0, t1, 0
#endif
ori t1, t1, 8 ori t1, t1, 8
xori t1, t1, 8 xori t1, t1, 8
ld.d t0, t1, 0 ld.d t0, t1, 0
...@@ -120,14 +132,24 @@ vmalloc_load: ...@@ -120,14 +132,24 @@ vmalloc_load:
* spots a huge page. * spots a huge page.
*/ */
tlb_huge_update_load: tlb_huge_update_load:
#ifdef CONFIG_SMP
ll.d t0, t1, 0
#else
ld.d t0, t1, 0 ld.d t0, t1, 0
#endif
srli.d ra, t0, _PAGE_PRESENT_SHIFT srli.d ra, t0, _PAGE_PRESENT_SHIFT
andi ra, ra, 1 andi ra, ra, 1
beq ra, $r0, nopage_tlb_load beq ra, $r0, nopage_tlb_load
tlbsrch tlbsrch
ori t0, t0, _PAGE_VALID ori t0, t0, _PAGE_VALID
#ifdef CONFIG_SMP
sc.d t0, t1, 0
beq t0, $r0, tlb_huge_update_load
ld.d t0, t1, 0
#else
st.d t0, t1, 0 st.d t0, t1, 0
#endif
addu16i.d t1, $r0, -(CSR_TLBIDX_EHINV >> 16) addu16i.d t1, $r0, -(CSR_TLBIDX_EHINV >> 16)
addi.d ra, t1, 0 addi.d ra, t1, 0
csrxchg ra, t1, LOONGARCH_CSR_TLBIDX csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
...@@ -173,6 +195,7 @@ tlb_huge_update_load: ...@@ -173,6 +195,7 @@ tlb_huge_update_load:
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
nopage_tlb_load: nopage_tlb_load:
dbar 0
csrrd ra, EXCEPTION_KS2 csrrd ra, EXCEPTION_KS2
la.abs t0, tlb_do_page_fault_0 la.abs t0, tlb_do_page_fault_0
jirl $r0, t0, 0 jirl $r0, t0, 0
...@@ -229,7 +252,14 @@ vmalloc_done_store: ...@@ -229,7 +252,14 @@ vmalloc_done_store:
slli.d t0, t0, _PTE_T_LOG2 slli.d t0, t0, _PTE_T_LOG2
add.d t1, ra, t0 add.d t1, ra, t0
#ifdef CONFIG_SMP
smp_pgtable_change_store:
#endif
#ifdef CONFIG_SMP
ll.d t0, t1, 0
#else
ld.d t0, t1, 0 ld.d t0, t1, 0
#endif
tlbsrch tlbsrch
srli.d ra, t0, _PAGE_PRESENT_SHIFT srli.d ra, t0, _PAGE_PRESENT_SHIFT
...@@ -238,7 +268,12 @@ vmalloc_done_store: ...@@ -238,7 +268,12 @@ vmalloc_done_store:
bne ra, $r0, nopage_tlb_store bne ra, $r0, nopage_tlb_store
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#ifdef CONFIG_SMP
sc.d t0, t1, 0
beq t0, $r0, smp_pgtable_change_store
#else
st.d t0, t1, 0 st.d t0, t1, 0
#endif
ori t1, t1, 8 ori t1, t1, 8
xori t1, t1, 8 xori t1, t1, 8
...@@ -263,7 +298,11 @@ vmalloc_store: ...@@ -263,7 +298,11 @@ vmalloc_store:
* spots a huge page. * spots a huge page.
*/ */
tlb_huge_update_store: tlb_huge_update_store:
#ifdef CONFIG_SMP
ll.d t0, t1, 0
#else
ld.d t0, t1, 0 ld.d t0, t1, 0
#endif
srli.d ra, t0, _PAGE_PRESENT_SHIFT srli.d ra, t0, _PAGE_PRESENT_SHIFT
andi ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT) andi ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
xori ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT) xori ra, ra, ((_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT)
...@@ -272,7 +311,13 @@ tlb_huge_update_store: ...@@ -272,7 +311,13 @@ tlb_huge_update_store:
tlbsrch tlbsrch
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#ifdef CONFIG_SMP
sc.d t0, t1, 0
beq t0, $r0, tlb_huge_update_store
ld.d t0, t1, 0
#else
st.d t0, t1, 0 st.d t0, t1, 0
#endif
addu16i.d t1, $r0, -(CSR_TLBIDX_EHINV >> 16) addu16i.d t1, $r0, -(CSR_TLBIDX_EHINV >> 16)
addi.d ra, t1, 0 addi.d ra, t1, 0
csrxchg ra, t1, LOONGARCH_CSR_TLBIDX csrxchg ra, t1, LOONGARCH_CSR_TLBIDX
...@@ -318,6 +363,7 @@ tlb_huge_update_store: ...@@ -318,6 +363,7 @@ tlb_huge_update_store:
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
nopage_tlb_store: nopage_tlb_store:
dbar 0
csrrd ra, EXCEPTION_KS2 csrrd ra, EXCEPTION_KS2
la.abs t0, tlb_do_page_fault_1 la.abs t0, tlb_do_page_fault_1
jirl $r0, t0, 0 jirl $r0, t0, 0
...@@ -373,7 +419,14 @@ vmalloc_done_modify: ...@@ -373,7 +419,14 @@ vmalloc_done_modify:
slli.d t0, t0, _PTE_T_LOG2 slli.d t0, t0, _PTE_T_LOG2
add.d t1, ra, t0 add.d t1, ra, t0
#ifdef CONFIG_SMP
smp_pgtable_change_modify:
#endif
#ifdef CONFIG_SMP
ll.d t0, t1, 0
#else
ld.d t0, t1, 0 ld.d t0, t1, 0
#endif
tlbsrch tlbsrch
srli.d ra, t0, _PAGE_WRITE_SHIFT srli.d ra, t0, _PAGE_WRITE_SHIFT
...@@ -381,7 +434,12 @@ vmalloc_done_modify: ...@@ -381,7 +434,12 @@ vmalloc_done_modify:
beq ra, $r0, nopage_tlb_modify beq ra, $r0, nopage_tlb_modify
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#ifdef CONFIG_SMP
sc.d t0, t1, 0
beq t0, $r0, smp_pgtable_change_modify
#else
st.d t0, t1, 0 st.d t0, t1, 0
#endif
ori t1, t1, 8 ori t1, t1, 8
xori t1, t1, 8 xori t1, t1, 8
ld.d t0, t1, 0 ld.d t0, t1, 0
...@@ -405,7 +463,11 @@ vmalloc_modify: ...@@ -405,7 +463,11 @@ vmalloc_modify:
* build_tlbchange_handler_head spots a huge page. * build_tlbchange_handler_head spots a huge page.
*/ */
tlb_huge_update_modify: tlb_huge_update_modify:
#ifdef CONFIG_SMP
ll.d t0, t1, 0
#else
ld.d t0, t1, 0 ld.d t0, t1, 0
#endif
srli.d ra, t0, _PAGE_WRITE_SHIFT srli.d ra, t0, _PAGE_WRITE_SHIFT
andi ra, ra, 1 andi ra, ra, 1
...@@ -414,7 +476,13 @@ tlb_huge_update_modify: ...@@ -414,7 +476,13 @@ tlb_huge_update_modify:
tlbsrch tlbsrch
ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
#ifdef CONFIG_SMP
sc.d t0, t1, 0
beq t0, $r0, tlb_huge_update_modify
ld.d t0, t1, 0
#else
st.d t0, t1, 0 st.d t0, t1, 0
#endif
/* /*
* A huge PTE describes an area the size of the * A huge PTE describes an area the size of the
* configured huge page size. This is twice the * configured huge page size. This is twice the
...@@ -454,6 +522,7 @@ tlb_huge_update_modify: ...@@ -454,6 +522,7 @@ tlb_huge_update_modify:
csrxchg t1, t0, LOONGARCH_CSR_TLBIDX csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
nopage_tlb_modify: nopage_tlb_modify:
dbar 0
csrrd ra, EXCEPTION_KS2 csrrd ra, EXCEPTION_KS2
la.abs t0, tlb_do_page_fault_1 la.abs t0, tlb_do_page_fault_1
jirl $r0, t0, 0 jirl $r0, t0, 0
......
...@@ -130,6 +130,7 @@ enum cpuhp_state { ...@@ -130,6 +130,7 @@ enum cpuhp_state {
CPUHP_ZCOMP_PREPARE, CPUHP_ZCOMP_PREPARE,
CPUHP_TIMERS_PREPARE, CPUHP_TIMERS_PREPARE,
CPUHP_MIPS_SOC_PREPARE, CPUHP_MIPS_SOC_PREPARE,
CPUHP_LOONGARCH_SOC_PREPARE,
CPUHP_BP_PREPARE_DYN, CPUHP_BP_PREPARE_DYN,
CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20, CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
CPUHP_BRINGUP_CPU, CPUHP_BRINGUP_CPU,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment