Commit 9ee3b3f4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'csky-for-linus-4.21' of git://github.com/c-sky/csky-linux

Pull arch/csky updates from Guo Ren:
 "Here are three main features (cpu_hotplug, basic ftrace, basic perf)
  and some bugfixes:

  Features:
   - Add CPU-hotplug support for SMP
   - Add ftrace with function trace and function graph trace
   - Add Perf support
   - Add EM_CSKY_OLD 39
   - optimize kernel panic print.
   - remove syscall_exit_work

  Bugfixes:
   - fix abiv2 mmap(... O_SYNC) failure
   - fix gdb coredump error
   - remove vdsp implement for kernel
   - fix qemu failure to bootup sometimes
   - fix ftrace call-graph panic
   - fix device tree node reference leak
   - remove meaningless header-y
   - fix save hi,lo,dspcr regs in switch_stack
   - remove unused members in processor.h"

* tag 'csky-for-linus-4.21' of git://github.com/c-sky/csky-linux:
  csky: Add perf support for C-SKY
  csky: Add EM_CSKY_OLD 39
  clocksource/drivers/c-sky: fixup ftrace call-graph panic
  csky: ftrace call graph supported.
  csky: basic ftrace supported
  csky: remove unused members in processor.h
  csky: optimize kernel panic print.
  csky: stacktrace supported.
  csky: CPU-hotplug supported for SMP
  clocksource/drivers/c-sky: fixup qemu fail to bootup sometimes.
  csky: fixup save hi,lo,dspcr regs in switch_stack.
  csky: remove syscall_exit_work
  csky: fixup remove vdsp implement for kernel.
  csky: bugfix gdb coredump error.
  csky: fixup abiv2 mmap(... O_SYNC) failed.
  csky: define syscall_get_arch()
  elf-em.h: add EM_CSKY
  csky: remove meaningless header-y
  csky: Don't leak device tree node reference
parents a6598110 f50fd2d8
......@@ -28,10 +28,13 @@ config CSKY
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
select HAVE_ARCH_TRACEHOOK
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_GENERIC_DMA_COHERENT
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO
select HAVE_KERNEL_LZMA
select HAVE_PERF_EVENTS
select HAVE_C_RECORDMCOUNT
select HAVE_DMA_API_DEBUG
select HAVE_DMA_CONTIGUOUS
......@@ -40,7 +43,7 @@ config CSKY
select OF
select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
select PERF_USE_VMALLOC
select PERF_USE_VMALLOC if CPU_CK610
select RTC_LIB
select TIMER_OF
select USB_ARCH_HAS_EHCI
......@@ -93,6 +96,9 @@ config MMU
config RWSEM_GENERIC_SPINLOCK
def_bool y
config STACKTRACE_SUPPORT
def_bool y
config TIME_LOW_RES
def_bool y
......@@ -144,6 +150,19 @@ config CPU_CK860
select CPU_HAS_FPUV2
endchoice
choice
prompt "C-SKY PMU type"
depends on PERF_EVENTS
depends on CPU_CK807 || CPU_CK810 || CPU_CK860
config CPU_PMU_NONE
bool "None"
config CSKY_PMU_V1
bool "Performance Monitoring Unit Ver.1"
endchoice
choice
prompt "Power Manager Instruction (wait/doze/stop)"
default CPU_PM_NONE
......@@ -197,6 +216,15 @@ config RAM_BASE
hex "DRAM start addr (the same with memory-section in dts)"
default 0x0
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
select GENERIC_IRQ_MIGRATION
depends on SMP
help
Say Y here to allow turning CPUs off and on. CPUs can be
controlled through /sys/devices/system/cpu/cpu1/hotplug/target.
Say N if you want to disable CPU hotplug.
endmenu
source "kernel/Kconfig.hz"
......@@ -47,6 +47,10 @@ ifeq ($(CSKYABI),abiv2)
KBUILD_CFLAGS += -mno-stack-size
endif
ifdef CONFIG_STACKTRACE
KBUILD_CFLAGS += -mbacktrace
endif
abidirs := $(patsubst %,arch/csky/%/,$(CSKYABI))
KBUILD_CFLAGS += $(patsubst %,-I$(srctree)/%inc,$(abidirs))
......
......@@ -26,6 +26,7 @@
#define _PAGE_CACHE (3<<9)
#define _PAGE_UNCACHE (2<<9)
#define _PAGE_SO _PAGE_UNCACHE
#define _CACHE_MASK (7<<9)
......
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ABI_CSKY_PTRACE_H
#define __ABI_CSKY_PTRACE_H
struct switch_stack {
unsigned long r8;
unsigned long r9;
unsigned long r10;
unsigned long r11;
unsigned long r12;
unsigned long r13;
unsigned long r14;
unsigned long r15;
};
#endif /* __ABI_CSKY_PTRACE_H */
......@@ -8,3 +8,4 @@ obj-y += strcmp.o
obj-y += strcpy.o
obj-y += strlen.o
obj-y += strksyms.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
......@@ -57,6 +57,8 @@
stw lr, (sp, 60)
mflo lr
stw lr, (sp, 64)
mfcr lr, cr14
stw lr, (sp, 68)
#endif
subi sp, 80
.endm
......@@ -77,6 +79,8 @@
mthi a0
ldw a0, (sp, 144)
mtlo a0
ldw a0, (sp, 148)
mtcr a0, cr14
#endif
ldw a0, (sp, 24)
......@@ -93,9 +97,9 @@
.endm
.macro SAVE_SWITCH_STACK
subi sp, 64
subi sp, 64
stm r4-r11, (sp)
stw r15, (sp, 32)
stw lr, (sp, 32)
stw r16, (sp, 36)
stw r17, (sp, 40)
stw r26, (sp, 44)
......@@ -103,11 +107,29 @@
stw r28, (sp, 52)
stw r29, (sp, 56)
stw r30, (sp, 60)
#ifdef CONFIG_CPU_HAS_HILO
subi sp, 16
mfhi lr
stw lr, (sp, 0)
mflo lr
stw lr, (sp, 4)
mfcr lr, cr14
stw lr, (sp, 8)
#endif
.endm
.macro RESTORE_SWITCH_STACK
#ifdef CONFIG_CPU_HAS_HILO
ldw lr, (sp, 0)
mthi lr
ldw lr, (sp, 4)
mtlo lr
ldw lr, (sp, 8)
mtcr lr, cr14
addi sp, 16
#endif
ldm r4-r11, (sp)
ldw r15, (sp, 32)
ldw lr, (sp, 32)
ldw r16, (sp, 36)
ldw r17, (sp, 40)
ldw r26, (sp, 44)
......
......@@ -32,6 +32,6 @@
#define _CACHE_MASK _PAGE_CACHE
#define _CACHE_CACHED (_PAGE_VALID | _PAGE_CACHE | _PAGE_BUF)
#define _CACHE_UNCACHED (_PAGE_VALID | _PAGE_SO)
#define _CACHE_UNCACHED (_PAGE_VALID)
#endif /* __ASM_CSKY_PGTABLE_BITS_H */
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ABI_CSKY_PTRACE_H
#define __ABI_CSKY_PTRACE_H
struct switch_stack {
#ifdef CONFIG_CPU_HAS_HILO
unsigned long rhi;
unsigned long rlo;
unsigned long cr14;
unsigned long pad;
#endif
unsigned long r4;
unsigned long r5;
unsigned long r6;
unsigned long r7;
unsigned long r8;
unsigned long r9;
unsigned long r10;
unsigned long r11;
unsigned long r15;
unsigned long r16;
unsigned long r17;
unsigned long r26;
unsigned long r27;
unsigned long r28;
unsigned long r29;
unsigned long r30;
};
#endif /* __ABI_CSKY_PTRACE_H */
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/linkage.h>
#include <asm/ftrace.h>
/*
* csky-gcc with -pg will put the following asm after prologue:
* push r15
* jsri _mcount
*
* stack layout after mcount_enter in _mcount():
*
* current sp => 0:+-------+
* | a0-a3 | -> must save all argument regs
* +16:+-------+
* | lr | -> _mcount lr (instrumente function's pc)
* +20:+-------+
* | fp=r8 | -> instrumented function fp
* +24:+-------+
* | plr | -> instrumented function lr (parent's pc)
* +-------+
*/
.macro mcount_enter
subi sp, 24
stw a0, (sp, 0)
stw a1, (sp, 4)
stw a2, (sp, 8)
stw a3, (sp, 12)
stw lr, (sp, 16)
stw r8, (sp, 20)
.endm
.macro mcount_exit
ldw a0, (sp, 0)
ldw a1, (sp, 4)
ldw a2, (sp, 8)
ldw a3, (sp, 12)
ldw t1, (sp, 16)
ldw r8, (sp, 20)
ldw lr, (sp, 24)
addi sp, 28
jmp t1
.endm
.macro save_return_regs
subi sp, 16
stw a0, (sp, 0)
stw a1, (sp, 4)
stw a2, (sp, 8)
stw a3, (sp, 12)
.endm
.macro restore_return_regs
mov lr, a0
ldw a0, (sp, 0)
ldw a1, (sp, 4)
ldw a2, (sp, 8)
ldw a3, (sp, 12)
addi sp, 16
.endm
ENTRY(ftrace_stub)
jmp lr
END(ftrace_stub)
ENTRY(_mcount)
mcount_enter
/* r26 is link register, only used with jsri translation */
lrw r26, ftrace_trace_function
ldw r26, (r26, 0)
lrw a1, ftrace_stub
cmpne r26, a1
bf skip_ftrace
mov a0, lr
subi a0, MCOUNT_INSN_SIZE
ldw a1, (sp, 24)
jsr r26
#ifndef CONFIG_FUNCTION_GRAPH_TRACER
skip_ftrace:
mcount_exit
#else
skip_ftrace:
lrw a0, ftrace_graph_return
ldw a0, (a0, 0)
lrw a1, ftrace_stub
cmpne a0, a1
bt ftrace_graph_caller
lrw a0, ftrace_graph_entry
ldw a0, (a0, 0)
lrw a1, ftrace_graph_entry_stub
cmpne a0, a1
bt ftrace_graph_caller
mcount_exit
#endif
END(_mcount)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
mov a0, sp
addi a0, 24
ldw a1, (sp, 16)
subi a1, MCOUNT_INSN_SIZE
mov a2, r8
lrw r26, prepare_ftrace_return
jsr r26
mcount_exit
END(ftrace_graph_caller)
ENTRY(return_to_handler)
save_return_regs
mov a0, r8
jsri ftrace_return_to_handler
restore_return_regs
jmp lr
END(return_to_handler)
#endif
......@@ -27,13 +27,7 @@ ENTRY(memcpy)
LABLE_ALIGN
.L_len_larger_16bytes:
#if defined(__CSKY_VDSPV2__)
vldx.8 vr0, (r1), r19
PRE_BNEZAD (r18)
addi r1, 16
vstx.8 vr0, (r0), r19
addi r0, 16
#elif defined(__CK860__)
#if defined(__CK860__)
ldw r3, (r1, 0)
stw r3, (r0, 0)
ldw r3, (r1, 4)
......
......@@ -7,7 +7,8 @@
#include <asm/ptrace.h>
#include <abi/regdef.h>
#define ELF_ARCH 252
#define ELF_ARCH EM_CSKY
#define EM_CSKY_OLD 39
/* CSKY Relocations */
#define R_CSKY_NONE 0
......@@ -31,14 +32,20 @@ typedef unsigned long elf_greg_t;
typedef struct user_fp elf_fpregset_t;
#define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t))
/*
* In gdb/bfd elf32-csky.c, csky_elf_grok_prstatus() use fixed size of
* elf_prstatus. It's 148 for abiv1 and 220 for abiv2, the size is enough
* for coredump and no need full sizeof(struct pt_regs).
*/
#define ELF_NGREG ((sizeof(struct pt_regs) / sizeof(elf_greg_t)) - 2)
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
#define elf_check_arch(x) ((x)->e_machine == ELF_ARCH)
#define elf_check_arch(x) (((x)->e_machine == ELF_ARCH) || \
((x)->e_machine == EM_CSKY_OLD))
/*
* These are used to set parameters in the core dumps.
......
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_FTRACE_H
#define __ASM_CSKY_FTRACE_H
#define MCOUNT_INSN_SIZE 4
#define HAVE_FUNCTION_GRAPH_FP_TEST
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
#endif /* __ASM_CSKY_FTRACE_H */
/* SPDX-License-Identifier: GPL-2.0 */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#ifndef __ASM_CSKY_PERF_EVENT_H
#define __ASM_CSKY_PERF_EVENT_H
#endif /* __ASM_PERF_EVENT_ELF_H */
......@@ -11,19 +11,13 @@
#include <asm/cache.h>
#include <abi/reg_ops.h>
#include <abi/regdef.h>
#include <abi/switch_context.h>
#ifdef CONFIG_CPU_HAS_FPU
#include <abi/fpu.h>
#endif
struct cpuinfo_csky {
unsigned long udelay_val;
unsigned long asid_cache;
/*
* Capability and feature descriptor structure for CSKY CPU
*/
unsigned long options;
unsigned int processor_id[4];
unsigned int fpu_id;
} __aligned(SMP_CACHE_BYTES);
extern struct cpuinfo_csky cpu_data[];
......@@ -49,13 +43,6 @@ extern struct cpuinfo_csky cpu_data[];
struct thread_struct {
unsigned long ksp; /* kernel stack pointer */
unsigned long sr; /* saved status register */
unsigned long esp0; /* points to SR of stack frame */
unsigned long hi;
unsigned long lo;
/* Other stuff associated with the thread. */
unsigned long address; /* Last user fault */
unsigned long error_code;
/* FPU regs */
struct user_fp __aligned(16) user_fp;
......
......@@ -21,6 +21,10 @@ void __init set_send_ipi(void (*func)(const struct cpumask *mask), int irq);
#define raw_smp_processor_id() (current_thread_info()->cpu)
int __cpu_disable(void);
void __cpu_die(unsigned int cpu);
#endif /* CONFIG_SMP */
#endif /* __ASM_CSKY_SMP_H */
......@@ -6,6 +6,7 @@
#include <linux/sched.h>
#include <linux/err.h>
#include <abi/regdef.h>
#include <uapi/linux/audit.h>
static inline int
syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
......@@ -68,4 +69,10 @@ syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
memcpy(&regs->a1 + i * sizeof(regs->a1), args, n * sizeof(regs->a0));
}
static inline int
syscall_get_arch(void)
{
return AUDIT_ARCH_CSKY;
}
#endif /* __ASM_SYSCALL_H */
......@@ -10,6 +10,7 @@
#include <asm/types.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <abi/switch_context.h>
struct thread_info {
struct task_struct *task;
......@@ -36,6 +37,9 @@ struct thread_info {
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
#define thread_saved_fp(tsk) \
((unsigned long)(((struct switch_stack *)(tsk->thread.ksp))->r8))
static inline struct thread_info *current_thread_info(void)
{
unsigned long sp;
......
include include/uapi/asm-generic/Kbuild.asm
header-y += cachectl.h
generic-y += auxvec.h
generic-y += param.h
generic-y += bpf_perf_event.h
......
......@@ -36,7 +36,7 @@ struct pt_regs {
unsigned long rhi;
unsigned long rlo;
unsigned long pad; /* reserved */
unsigned long dcsr;
#endif
};
......@@ -48,43 +48,6 @@ struct user_fp {
unsigned long reserved;
};
/*
* Switch stack for switch_to after push pt_regs.
*
* ABI_CSKYV2: r4 ~ r11, r15 ~ r17, r26 ~ r30;
* ABI_CSKYV1: r8 ~ r14, r15;
*/
struct switch_stack {
#if defined(__CSKYABIV2__)
unsigned long r4;
unsigned long r5;
unsigned long r6;
unsigned long r7;
unsigned long r8;
unsigned long r9;
unsigned long r10;
unsigned long r11;
#else
unsigned long r8;
unsigned long r9;
unsigned long r10;
unsigned long r11;
unsigned long r12;
unsigned long r13;
unsigned long r14;
#endif
unsigned long r15;
#if defined(__CSKYABIV2__)
unsigned long r16;
unsigned long r17;
unsigned long r26;
unsigned long r27;
unsigned long r28;
unsigned long r29;
unsigned long r30;
#endif
};
#ifdef __KERNEL__
#define PS_S 0x80000000 /* Supervisor Mode */
......
......@@ -6,3 +6,10 @@ obj-y += process.o cpu-probe.o ptrace.o dumpstack.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_CSKY_PMU_V1) += perf_event.o
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
endif
......@@ -20,12 +20,9 @@ int main(void)
/* offsets into the thread struct */
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
DEFINE(THREAD_FESR, offsetof(struct thread_struct, user_fp.fesr));
DEFINE(THREAD_FCR, offsetof(struct thread_struct, user_fp.fcr));
DEFINE(THREAD_FPREG, offsetof(struct thread_struct, user_fp.vr));
DEFINE(THREAD_DSPHI, offsetof(struct thread_struct, hi));
DEFINE(THREAD_DSPLO, offsetof(struct thread_struct, lo));
/* offsets into the thread_info struct */
DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags));
......
......@@ -7,60 +7,39 @@ int kstack_depth_to_print = 48;
void show_trace(unsigned long *stack)
{
unsigned long *endstack;
unsigned long *stack_end;
unsigned long *stack_start;
unsigned long *fp;
unsigned long addr;
int i;
pr_info("Call Trace:\n");
addr = (unsigned long)stack + THREAD_SIZE - 1;
endstack = (unsigned long *)(addr & -THREAD_SIZE);
i = 0;
while (stack + 1 <= endstack) {
addr = *stack++;
/*
* If the address is either in the text segment of the
* kernel, or in the region which contains vmalloc'ed
* memory, it *may* be the address of a calling
* routine; if so, print it so that someone tracing
* down the cause of the crash will be able to figure
* out the call path that was taken.
*/
if (__kernel_text_address(addr)) {
#ifndef CONFIG_KALLSYMS
if (i % 5 == 0)
pr_cont("\n ");
addr = (unsigned long) stack & THREAD_MASK;
stack_start = (unsigned long *) addr;
stack_end = (unsigned long *) (addr + THREAD_SIZE);
fp = stack;
pr_info("\nCall Trace:");
while (fp > stack_start && fp < stack_end) {
#ifdef CONFIG_STACKTRACE
addr = fp[1];
fp = (unsigned long *) fp[0];
#else
addr = *fp++;
#endif
pr_cont(" [<%08lx>] %pS\n", addr, (void *)addr);
i++;
}
if (__kernel_text_address(addr))
pr_cont("\n[<%08lx>] %pS", addr, (void *)addr);
}
pr_cont("\n");
}
void show_stack(struct task_struct *task, unsigned long *stack)
{
unsigned long *p;
unsigned long *endstack;
int i;
if (!stack) {
if (task)
stack = (unsigned long *)task->thread.esp0;
stack = (unsigned long *)thread_saved_fp(task);
else
stack = (unsigned long *)&stack;
}
endstack = (unsigned long *)
(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE);
pr_info("Stack from %08lx:", (unsigned long)stack);
p = stack;
for (i = 0; i < kstack_depth_to_print; i++) {
if (p + 1 > endstack)
break;
if (i % 8 == 0)
pr_cont("\n ");
pr_cont(" %08lx", *p++);
}
pr_cont("\n");
show_trace(stack);
}
......@@ -122,16 +122,6 @@ ENTRY(csky_systemcall)
psrset ee, ie
/* Stack frame for syscall, origin call set_esp0 */
mov r12, sp
bmaski r11, 13
andn r12, r11
bgeni r11, 9
addi r11, 32
addu r12, r11
st sp, (r12, 0)
lrw r11, __NR_syscalls
cmphs syscallid, r11 /* Check nr of syscall */
bt ret_from_exception
......@@ -183,18 +173,10 @@ ENTRY(csky_systemcall)
#endif
stw a0, (sp, LSAVE_A0) /* Save return value */
movi a0, 1 /* leave system call */
mov a1, sp /* sp = pt_regs pointer */
jbsr syscall_trace
syscall_exit_work:
ld syscallid, (sp, LSAVE_PSR)
btsti syscallid, 31
bt 2f
jmpi resume_userspace
2: RESTORE_ALL
movi a0, 1 /* leave system call */
mov a1, sp /* right now, sp --> pt_regs */
jbsr syscall_trace
br ret_from_exception
ENTRY(ret_from_kernel_thread)
jbsr schedule_tail
......@@ -238,8 +220,6 @@ resume_userspace:
1: RESTORE_ALL
exit_work:
mov a0, sp /* Stack address is arg[0] */
jbsr set_esp0 /* Call C level */
btsti r8, TIF_NEED_RESCHED
bt work_resched
/* If thread_info->flag is empty, RESTORE_ALL */
......@@ -354,34 +334,12 @@ ENTRY(__switch_to)
stw sp, (a3, THREAD_KSP)
#ifdef CONFIG_CPU_HAS_HILO
lrw r10, THREAD_DSPHI
add r10, a3
mfhi r6
mflo r7
stw r6, (r10, 0) /* THREAD_DSPHI */
stw r7, (r10, 4) /* THREAD_DSPLO */
mfcr r6, cr14
stw r6, (r10, 8) /* THREAD_DSPCSR */
#endif
/* Set up next process to run */
lrw a3, TASK_THREAD
addu a3, a1
ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */
#ifdef CONFIG_CPU_HAS_HILO
lrw r10, THREAD_DSPHI
add r10, a3
ldw r6, (r10, 8) /* THREAD_DSPCSR */
mtcr r6, cr14
ldw r6, (r10, 0) /* THREAD_DSPHI */
ldw r7, (r10, 4) /* THREAD_DSPLO */
mthi r6
mtlo r7
#endif
ldw a2, (a3, THREAD_SR) /* Set next PSR */
mtcr a2, psr
......
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/ftrace.h>
#include <linux/uaccess.h>
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long frame_pointer)
{
unsigned long return_hooker = (unsigned long)&return_to_handler;
unsigned long old;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
old = *parent;
if (!function_graph_enter(old, self_addr,
*(unsigned long *)frame_pointer, parent)) {
/*
* For csky-gcc function has sub-call:
* subi sp, sp, 8
* stw r8, (sp, 0)
* mov r8, sp
* st.w r15, (sp, 0x4)
* push r15
* jl _mcount
* We only need set *parent for resume
*
* For csky-gcc function has no sub-call:
* subi sp, sp, 4
* stw r8, (sp, 0)
* mov r8, sp
* push r15
* jl _mcount
* We need set *parent and *(frame_pointer + 4) for resume,
* because lr is resumed twice.
*/
*parent = return_hooker;
frame_pointer += 4;
if (*(unsigned long *)frame_pointer == old)
*(unsigned long *)frame_pointer = return_hooker;
}
}
#endif
/* _mcount is defined in abi's mcount.S */
extern void _mcount(void);
EXPORT_SYMBOL(_mcount);
This diff is collapsed.
......@@ -93,26 +93,31 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs)
unsigned long get_wchan(struct task_struct *p)
{
unsigned long esp, pc;
unsigned long stack_page;
unsigned long lr;
unsigned long *fp, *stack_start, *stack_end;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack_page = (unsigned long)p;
esp = p->thread.esp0;
stack_start = (unsigned long *)end_of_stack(p);
stack_end = (unsigned long *)(task_stack_page(p) + THREAD_SIZE);
fp = (unsigned long *) thread_saved_fp(p);
do {
if (esp < stack_page+sizeof(struct task_struct) ||
esp >= 8184+stack_page)
if (fp < stack_start || fp > stack_end)
return 0;
/*FIXME: There's may be error here!*/
pc = ((unsigned long *)esp)[1];
/* FIXME: This depends on the order of these functions. */
if (!in_sched_functions(pc))
return pc;
esp = *(unsigned long *) esp;
#ifdef CONFIG_STACKTRACE
lr = fp[1];
fp = (unsigned long *)fp[0];
#else
lr = *fp++;
#endif
if (!in_sched_functions(lr) &&
__kernel_text_address(lr))
return lr;
} while (count++ < 16);
return 0;
}
EXPORT_SYMBOL(get_wchan);
......
......@@ -50,15 +50,11 @@ static void singlestep_enable(struct task_struct *tsk)
*/
void user_enable_single_step(struct task_struct *child)
{
if (child->thread.esp0 == 0)
return;
singlestep_enable(child);
}
void user_disable_single_step(struct task_struct *child)
{
if (child->thread.esp0 == 0)
return;
singlestep_disable(child);
}
......@@ -95,7 +91,9 @@ static int gpr_set(struct task_struct *target,
return ret;
regs.sr = task_pt_regs(target)->sr;
#ifdef CONFIG_CPU_HAS_HILO
regs.dcsr = task_pt_regs(target)->dcsr;
#endif
task_thread_info(target)->tp_value = regs.tls;
*task_pt_regs(target) = regs;
......@@ -239,6 +237,7 @@ asmlinkage void syscall_trace(int why, struct pt_regs *regs)
regs->regs[SYSTRACE_SAVENUM] = saved_why;
}
extern void show_stack(struct task_struct *task, unsigned long *stack);
void show_regs(struct pt_regs *fp)
{
unsigned long *sp;
......@@ -261,35 +260,37 @@ void show_regs(struct pt_regs *fp)
(int) (((unsigned long) current) + 2 * PAGE_SIZE));
}
pr_info("PC: 0x%08lx\n", (long)fp->pc);
pr_info("PC: 0x%08lx (%pS)\n", (long)fp->pc, (void *)fp->pc);
pr_info("LR: 0x%08lx (%pS)\n", (long)fp->lr, (void *)fp->lr);
pr_info("SP: 0x%08lx\n", (long)fp);
pr_info("orig_a0: 0x%08lx\n", fp->orig_a0);
pr_info("PSR: 0x%08lx\n", (long)fp->sr);
pr_info("a0: 0x%08lx a1: 0x%08lx a2: 0x%08lx a3: 0x%08lx\n",
fp->a0, fp->a1, fp->a2, fp->a3);
pr_info(" a0: 0x%08lx a1: 0x%08lx a2: 0x%08lx a3: 0x%08lx\n",
fp->a0, fp->a1, fp->a2, fp->a3);
#if defined(__CSKYABIV2__)
pr_info("r4: 0x%08lx r5: 0x%08lx r6: 0x%08lx r7: 0x%08lx\n",
pr_info(" r4: 0x%08lx r5: 0x%08lx r6: 0x%08lx r7: 0x%08lx\n",
fp->regs[0], fp->regs[1], fp->regs[2], fp->regs[3]);
pr_info("r8: 0x%08lx r9: 0x%08lx r10: 0x%08lx r11: 0x%08lx\n",
pr_info(" r8: 0x%08lx r9: 0x%08lx r10: 0x%08lx r11: 0x%08lx\n",
fp->regs[4], fp->regs[5], fp->regs[6], fp->regs[7]);
pr_info("r12 0x%08lx r13: 0x%08lx r15: 0x%08lx\n",
pr_info("r12: 0x%08lx r13: 0x%08lx r15: 0x%08lx\n",
fp->regs[8], fp->regs[9], fp->lr);
pr_info("r16:0x%08lx r17: 0x%08lx r18: 0x%08lx r19: 0x%08lx\n",
pr_info("r16: 0x%08lx r17: 0x%08lx r18: 0x%08lx r19: 0x%08lx\n",
fp->exregs[0], fp->exregs[1], fp->exregs[2], fp->exregs[3]);
pr_info("r20 0x%08lx r21: 0x%08lx r22: 0x%08lx r23: 0x%08lx\n",
pr_info("r20: 0x%08lx r21: 0x%08lx r22: 0x%08lx r23: 0x%08lx\n",
fp->exregs[4], fp->exregs[5], fp->exregs[6], fp->exregs[7]);
pr_info("r24 0x%08lx r25: 0x%08lx r26: 0x%08lx r27: 0x%08lx\n",
pr_info("r24: 0x%08lx r25: 0x%08lx r26: 0x%08lx r27: 0x%08lx\n",
fp->exregs[8], fp->exregs[9], fp->exregs[10], fp->exregs[11]);
pr_info("r28 0x%08lx r29: 0x%08lx r30: 0x%08lx tls: 0x%08lx\n",
pr_info("r28: 0x%08lx r29: 0x%08lx r30: 0x%08lx tls: 0x%08lx\n",
fp->exregs[12], fp->exregs[13], fp->exregs[14], fp->tls);
pr_info("hi 0x%08lx lo: 0x%08lx\n",
pr_info(" hi: 0x%08lx lo: 0x%08lx\n",
fp->rhi, fp->rlo);
#else
pr_info("r6: 0x%08lx r7: 0x%08lx r8: 0x%08lx r9: 0x%08lx\n",
pr_info(" r6: 0x%08lx r7: 0x%08lx r8: 0x%08lx r9: 0x%08lx\n",
fp->regs[0], fp->regs[1], fp->regs[2], fp->regs[3]);
pr_info("r10: 0x%08lx r11: 0x%08lx r12: 0x%08lx r13: 0x%08lx\n",
pr_info("r10: 0x%08lx r11: 0x%08lx r12: 0x%08lx r13: 0x%08lx\n",
fp->regs[4], fp->regs[5], fp->regs[6], fp->regs[7]);
pr_info("r14 0x%08lx r1: 0x%08lx r15: 0x%08lx\n",
pr_info("r14: 0x%08lx r1: 0x%08lx r15: 0x%08lx\n",
fp->regs[8], fp->regs[9], fp->lr);
#endif
......@@ -311,4 +312,7 @@ void show_regs(struct pt_regs *fp)
pr_cont("%08x ", (int) *sp++);
}
pr_cont("\n");
show_stack(NULL, (unsigned long *)fp->regs[4]);
return;
}
......@@ -238,8 +238,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
if (!user_mode(regs))
return;
current->thread.esp0 = (unsigned long)regs;
/*
* If we were from a system call, check for system call restarting...
*/
......
......@@ -16,6 +16,7 @@
#include <linux/of.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/mm.h>
#include <linux/sched/hotplug.h>
#include <asm/irq.h>
#include <asm/traps.h>
#include <asm/sections.h>
......@@ -112,12 +113,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{
}
static void __init enable_smp_ipi(void)
{
enable_percpu_irq(ipi_irq, 0);
}
static int ipi_dummy_dev;
void __init setup_smp_ipi(void)
{
int rc;
......@@ -130,7 +127,7 @@ void __init setup_smp_ipi(void)
if (rc)
panic("%s IRQ request failed\n", __func__);
enable_smp_ipi();
enable_percpu_irq(ipi_irq, 0);
}
void __init setup_smp(void)
......@@ -138,7 +135,7 @@ void __init setup_smp(void)
struct device_node *node = NULL;
int cpu;
while ((node = of_find_node_by_type(node, "cpu"))) {
for_each_of_cpu_node(node) {
if (!of_device_is_available(node))
continue;
......@@ -161,12 +158,10 @@ volatile unsigned int secondary_stack;
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
unsigned int tmp;
secondary_stack = (unsigned int)tidle->stack + THREAD_SIZE;
unsigned long mask = 1 << cpu;
secondary_stack = (unsigned int)tidle->stack + THREAD_SIZE - 8;
secondary_hint = mfcr("cr31");
secondary_ccr = mfcr("cr18");
/*
......@@ -176,10 +171,13 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
*/
mtcr("cr17", 0x22);
/* Enable cpu in SMP reset ctrl reg */
tmp = mfcr("cr<29, 0>");
tmp |= 1 << cpu;
mtcr("cr<29, 0>", tmp);
if (mask & mfcr("cr<29, 0>")) {
send_arch_ipi(cpumask_of(cpu));
} else {
/* Enable cpu in SMP reset ctrl reg */
mask |= mfcr("cr<29, 0>");
mtcr("cr<29, 0>", mask);
}
/* Wait for the cpu online */
while (!cpu_online(cpu));
......@@ -219,7 +217,7 @@ void csky_start_secondary(void)
init_fpu();
#endif
enable_smp_ipi();
enable_percpu_irq(ipi_irq, 0);
mmget(mm);
mmgrab(mm);
......@@ -235,3 +233,46 @@ void csky_start_secondary(void)
preempt_disable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
#ifdef CONFIG_HOTPLUG_CPU
int __cpu_disable(void)
{
unsigned int cpu = smp_processor_id();
set_cpu_online(cpu, false);
irq_migrate_all_off_this_cpu();
clear_tasks_mm_cpumask(cpu);
return 0;
}
void __cpu_die(unsigned int cpu)
{
if (!cpu_wait_death(cpu, 5)) {
pr_crit("CPU%u: shutdown failed\n", cpu);
return;
}
pr_notice("CPU%u: shutdown\n", cpu);
}
void arch_cpu_idle_dead(void)
{
idle_task_exit();
cpu_report_death();
while (!secondary_stack)
arch_cpu_idle();
local_irq_disable();
asm volatile(
"mov sp, %0\n"
"mov r8, %0\n"
"jmpi csky_start_secondary"
:
: "r" (secondary_stack));
}
#endif
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. */
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/stacktrace.h>
#include <linux/ftrace.h>
void save_stack_trace(struct stack_trace *trace)
{
save_stack_trace_tsk(current, trace);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
unsigned long *fp, *stack_start, *stack_end;
unsigned long addr;
int skip = trace->skip;
int savesched;
int graph_idx = 0;
if (tsk == current) {
asm volatile("mov %0, r8\n":"=r"(fp));
savesched = 1;
} else {
fp = (unsigned long *)thread_saved_fp(tsk);
savesched = 0;
}
addr = (unsigned long) fp & THREAD_MASK;
stack_start = (unsigned long *) addr;
stack_end = (unsigned long *) (addr + THREAD_SIZE);
while (fp > stack_start && fp < stack_end) {
unsigned long lpp, fpp;
fpp = fp[0];
lpp = fp[1];
if (!__kernel_text_address(lpp))
break;
else
lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL);
if (savesched || !in_sched_functions(lpp)) {
if (skip) {
skip--;
} else {
trace->entries[trace->nr_entries++] = lpp;
if (trace->nr_entries >= trace->max_entries)
break;
}
}
fp = (unsigned long *)fpp;
}
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
......@@ -106,7 +106,6 @@ void buserr(struct pt_regs *regs)
pr_err("User mode Bus Error\n");
show_regs(regs);
current->thread.esp0 = (unsigned long) regs;
force_sig_fault(SIGSEGV, 0, (void __user *)regs->pc, current);
}
......@@ -162,8 +161,3 @@ asmlinkage void trap_c(struct pt_regs *regs)
}
send_sig(sig, current, 0);
}
asmlinkage void set_esp0(unsigned long ssp)
{
current->thread.esp0 = ssp;
}
......@@ -172,8 +172,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) {
tsk->thread.address = address;
tsk->thread.error_code = write;
force_sig_fault(SIGSEGV, si_code, (void __user *)address, current);
return;
}
......@@ -188,8 +186,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
* terminate things with extreme prejudice.
*/
bust_spinlocks(1);
pr_alert("Unable to %s at vaddr: %08lx, epc: %08lx\n",
__func__, address, regs->pc);
pr_alert("Unable to handle kernel paging request at virtual "
"address 0x%08lx, pc: 0x%08lx\n", address, regs->pc);
die_if_kernel("Oops", regs, write);
out_of_memory:
......@@ -207,6 +205,5 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
if (!user_mode(regs))
goto no_context;
tsk->thread.address = address;
force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address, current);
}
......@@ -30,7 +30,7 @@ void __iomem *ioremap(phys_addr_t addr, size_t size)
vaddr = (unsigned long)area->addr;
prot = __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE |
_PAGE_GLOBAL | _CACHE_UNCACHED);
_PAGE_GLOBAL | _CACHE_UNCACHED | _PAGE_SO);
if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
free_vm_area(area);
......
......@@ -79,11 +79,11 @@ static int csky_mptimer_starting_cpu(unsigned int cpu)
to->clkevt.cpumask = cpumask_of(cpu);
enable_percpu_irq(csky_mptimer_irq, 0);
clockevents_config_and_register(&to->clkevt, timer_of_rate(to),
2, ULONG_MAX);
enable_percpu_irq(csky_mptimer_irq, 0);
return 0;
}
......@@ -97,7 +97,7 @@ static int csky_mptimer_dying_cpu(unsigned int cpu)
/*
* clock source
*/
static u64 sched_clock_read(void)
static u64 notrace sched_clock_read(void)
{
return (u64)mfcr(PTIM_CCVR);
}
......
......@@ -378,6 +378,7 @@ enum {
#define AUDIT_ARCH_ARM (EM_ARM|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_ARMEB (EM_ARM)
#define AUDIT_ARCH_CRIS (EM_CRIS|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_CSKY (EM_CSKY|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_FRV (EM_FRV)
#define AUDIT_ARCH_I386 (EM_386|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_IA64 (EM_IA_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
......
......@@ -44,6 +44,7 @@
#define EM_TILEGX 191 /* Tilera TILE-Gx */
#define EM_RISCV 243 /* RISC-V */
#define EM_BPF 247 /* Linux BPF - in-kernel virtual machine */
#define EM_CSKY 252 /* C-SKY */
#define EM_FRV 0x5441 /* Fujitsu FR-V */
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment