Commit 37744fee authored by Arnd Bergmann's avatar Arnd Bergmann Committed by Rich Felker

sh: remove sh5 support

sh5 never became a product and has probably never really worked.

Remove it by recursively deleting all associated Kconfig options
and all corresponding files.
Reviewed-by: default avatarGeert Uytterhoeven <geert+renesas@glider.be>
Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarRich Felker <dalias@libc.org>
parent d1f56f31
......@@ -54,15 +54,6 @@ config SUPERH
select HAVE_NMI
select NEED_SG_DMA_LENGTH
select ARCH_HAS_GIGANTIC_PAGE
help
The SuperH is a RISC processor targeted for use in embedded systems
and consumer electronics; it was also used in the Sega Dreamcast
gaming console. The SuperH port has a home page at
<http://www.linux-sh.org/>.
config SUPERH32
def_bool "$(ARCH)" = "sh"
select ARCH_32BIT_OFF_T
select GUP_GET_PTE_LOW_HIGH if X2TLB
select HAVE_KPROBES
......@@ -81,19 +72,15 @@ config SUPERH32
select ARCH_HIBERNATION_POSSIBLE if MMU
select SPARSE_IRQ
select HAVE_STACKPROTECTOR
config SUPERH64
def_bool "$(ARCH)" = "sh64"
select HAVE_EXIT_THREAD
select KALLSYMS
help
The SuperH is a RISC processor targeted for use in embedded systems
and consumer electronics; it was also used in the Sega Dreamcast
gaming console. The SuperH port has a home page at
<http://www.linux-sh.org/>.
config GENERIC_BUG
def_bool y
depends on BUG && SUPERH32
config GENERIC_CSUM
def_bool y
depends on SUPERH64
depends on BUG
config GENERIC_HWEIGHT
def_bool y
......@@ -203,12 +190,6 @@ config CPU_SH4AL_DSP
select CPU_SH4A
select CPU_HAS_DSP
config CPU_SH5
bool
select CPU_HAS_FPU
select SYS_SUPPORTS_SH_TMU
select SYS_SUPPORTS_HUGETLBFS if MMU
config CPU_SHX2
bool
......@@ -228,8 +209,6 @@ config CPU_HAS_PMU
default y
bool
if SUPERH32
choice
prompt "Processor sub-type selection"
......@@ -518,27 +497,6 @@ config CPU_SUBTYPE_SH7366
endchoice
endif
if SUPERH64
choice
prompt "Processor sub-type selection"
# SH-5 Processor Support
config CPU_SUBTYPE_SH5_101
bool "Support SH5-101 processor"
select CPU_SH5
config CPU_SUBTYPE_SH5_103
bool "Support SH5-103 processor"
select CPU_SH5
endchoice
endif
source "arch/sh/mm/Kconfig"
source "arch/sh/Kconfig.cpu"
......@@ -592,7 +550,7 @@ source "kernel/Kconfig.hz"
config KEXEC
bool "kexec system call (EXPERIMENTAL)"
depends on SUPERH32 && MMU
depends on MMU
select KEXEC_CORE
help
kexec is a system call that implements the ability to shutdown your
......@@ -610,7 +568,7 @@ config KEXEC
config CRASH_DUMP
bool "kernel crash dumps (EXPERIMENTAL)"
depends on SUPERH32 && BROKEN_ON_SMP
depends on BROKEN_ON_SMP
help
Generate crash dump after being started by kexec.
This should be normally only set in special crash dump kernels
......@@ -624,7 +582,7 @@ config CRASH_DUMP
config KEXEC_JUMP
bool "kexec jump (EXPERIMENTAL)"
depends on SUPERH32 && KEXEC && HIBERNATION
depends on KEXEC && HIBERNATION
help
Jump between original kernel and kexeced kernel and invoke
code via KEXEC
......@@ -701,7 +659,7 @@ config HOTPLUG_CPU
config GUSA
def_bool y
depends on !SMP && SUPERH32
depends on !SMP
help
This enables support for gUSA (general UserSpace Atomicity).
This is the default implementation for both UP and non-ll/sc
......
......@@ -13,7 +13,6 @@ config CPU_LITTLE_ENDIAN
config CPU_BIG_ENDIAN
bool "Big Endian"
depends on !CPU_SH5
endchoice
......@@ -27,10 +26,6 @@ config SH_FPU
This option must be set in order to enable the FPU.
config SH64_FPU_DENORM_FLUSH
bool "Flush floating point denorms to zero"
depends on SH_FPU && SUPERH64
config SH_FPU_EMU
def_bool n
prompt "FPU emulation support"
......@@ -77,10 +72,6 @@ config SPECULATIVE_EXECUTION
If unsure, say N.
config SH64_ID2815_WORKAROUND
bool "Include workaround for SH5-101 cut2 silicon defect ID2815"
depends on CPU_SUBTYPE_SH5_101
config CPU_HAS_INTEVT
bool
......
......@@ -5,7 +5,6 @@ config TRACE_IRQFLAGS_SUPPORT
config SH_STANDARD_BIOS
bool "Use LinuxSH standard BIOS"
depends on SUPERH32
help
Say Y here if your target has the gdb-sh-stub
package from www.m17n.org (or any conforming standard LinuxSH BIOS)
......@@ -19,7 +18,7 @@ config SH_STANDARD_BIOS
config STACK_DEBUG
bool "Check for stack overflows"
depends on DEBUG_KERNEL && SUPERH32
depends on DEBUG_KERNEL
help
This option will cause messages to be printed if free stack space
drops below a certain limit. Saying Y here will add overhead to
......@@ -38,7 +37,7 @@ config 4KSTACKS
config IRQSTACKS
bool "Use separate kernel stacks when processing interrupts"
depends on DEBUG_KERNEL && SUPERH32 && BROKEN
depends on DEBUG_KERNEL && BROKEN
help
If you say Y here the kernel will use separate kernel stacks
for handling hard and soft interrupts. This can help avoid
......@@ -46,7 +45,7 @@ config IRQSTACKS
config DUMP_CODE
bool "Show disassembly of nearby code in register dumps"
depends on DEBUG_KERNEL && SUPERH32
depends on DEBUG_KERNEL
default y if DEBUG_BUGVERBOSE
default n
help
......@@ -59,7 +58,6 @@ config DUMP_CODE
config DWARF_UNWINDER
bool "Enable the DWARF unwinder for stacktraces"
select FRAME_POINTER
depends on SUPERH32
default n
help
Enabling this option will make stacktraces more accurate, at
......@@ -77,11 +75,6 @@ config SH_NO_BSS_INIT
For all other cases, say N. If this option seems perplexing, or
you aren't sure, say N.
config SH64_SR_WATCH
bool "Debug: set SR.WATCH to enable hardware watchpoints and trace"
depends on SUPERH64
config MCOUNT
def_bool y
depends on SUPERH32
depends on STACK_DEBUG || FUNCTION_TRACER
......@@ -11,7 +11,7 @@
#
ifneq ($(SUBARCH),$(ARCH))
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-)
CROSS_COMPILE := $(call cc-cross-prefix, sh-linux- sh-linux-gnu- sh-unknown-linux-gnu-)
endif
endif
......@@ -29,12 +29,9 @@ isa-$(CONFIG_CPU_SH3) := sh3
isa-$(CONFIG_CPU_SH4) := sh4
isa-$(CONFIG_CPU_SH4A) := sh4a
isa-$(CONFIG_CPU_SH4AL_DSP) := sh4al
isa-$(CONFIG_CPU_SH5) := shmedia
ifeq ($(CONFIG_SUPERH32),y)
isa-$(CONFIG_SH_DSP) := $(isa-y)-dsp
isa-y := $(isa-y)-up
endif
cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,)
cflags-$(CONFIG_CPU_J2) += $(call cc-option,-mj2,)
......@@ -47,7 +44,6 @@ cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \
cflags-$(CONFIG_CPU_SH4A) += $(call cc-option,-m4a,) \
$(call cc-option,-m4a-nofpu,)
cflags-$(CONFIG_CPU_SH4AL_DSP) += $(call cc-option,-m4al,)
cflags-$(CONFIG_CPU_SH5) := $(call cc-option,-m5-32media-nofpu,)
ifeq ($(cflags-y),)
#
......@@ -88,7 +84,7 @@ OBJCOPYFLAGS := -O binary -R .note -R .note.gnu.build-id -R .comment \
-R .stab -R .stabstr -S
# Give the various platforms the opportunity to set default image types
defaultimage-$(CONFIG_SUPERH32) := zImage
defaultimage-y := zImage
defaultimage-$(CONFIG_SH_SH7785LCR) := uImage
defaultimage-$(CONFIG_SH_RSK) := uImage
defaultimage-$(CONFIG_SH_URQUELL) := uImage
......@@ -107,31 +103,22 @@ KBUILD_IMAGE := $(boot)/$(defaultimage-y)
# Choosing incompatible machines durings configuration will result in
# error messages during linking.
#
ifdef CONFIG_SUPERH32
UTS_MACHINE := sh
BITS := 32
LDFLAGS_vmlinux += -e _stext
else
UTS_MACHINE := sh64
BITS := 64
LDFLAGS_vmlinux += --defsym phys_stext=_stext-$(CONFIG_PAGE_OFFSET) \
--defsym phys_stext_shmedia=phys_stext+1 \
-e phys_stext_shmedia
endif
ifdef CONFIG_CPU_LITTLE_ENDIAN
ld-bfd := elf32-$(UTS_MACHINE)-linux
ld-bfd := elf32-sh-linux
LDFLAGS_vmlinux += --defsym jiffies=jiffies_64 --oformat $(ld-bfd)
KBUILD_LDFLAGS += -EL
else
ld-bfd := elf32-$(UTS_MACHINE)big-linux
ld-bfd := elf32-shbig-linux
LDFLAGS_vmlinux += --defsym jiffies=jiffies_64+4 --oformat $(ld-bfd)
KBUILD_LDFLAGS += -EB
endif
export ld-bfd BITS
export ld-bfd
head-y := arch/sh/kernel/head_$(BITS).o
head-y := arch/sh/kernel/head_32.o
core-y += arch/sh/kernel/ arch/sh/mm/ arch/sh/boards/
core-$(CONFIG_SH_FPU_EMU) += arch/sh/math-emu/
......@@ -185,7 +172,6 @@ cpuincdir-$(CONFIG_CPU_SH2) += cpu-sh2
cpuincdir-$(CONFIG_CPU_SH3) += cpu-sh3
cpuincdir-$(CONFIG_CPU_SH4A) += cpu-sh4a
cpuincdir-$(CONFIG_CPU_SH4) += cpu-sh4
cpuincdir-$(CONFIG_CPU_SH5) += cpu-sh5
cpuincdir-y += cpu-common # Must be last
drivers-y += arch/sh/drivers/
......@@ -206,8 +192,7 @@ ifeq ($(CONFIG_DWARF_UNWINDER),y)
KBUILD_CFLAGS += -fasynchronous-unwind-tables
endif
libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y)
libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y)
libs-y := arch/sh/lib/ $(libs-y)
BOOT_TARGETS = uImage uImage.bz2 uImage.gz uImage.lzma uImage.xz uImage.lzo \
uImage.srec uImage.bin zImage vmlinux.bin vmlinux.srec \
......
......@@ -8,9 +8,9 @@
targets := vmlinux vmlinux.bin vmlinux.bin.gz \
vmlinux.bin.bz2 vmlinux.bin.lzma \
vmlinux.bin.xz vmlinux.bin.lzo \
head_$(BITS).o misc.o piggy.o
head_32.o misc.o piggy.o
OBJECTS = $(obj)/head_$(BITS).o $(obj)/misc.o $(obj)/cache.o
OBJECTS = $(obj)/head_32.o $(obj)/misc.o $(obj)/cache.o
GCOV_PROFILE := n
......@@ -39,15 +39,11 @@ LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(IMAGE_OFFSET) -e startup \
#
# Pull in the necessary libgcc bits from the in-kernel implementation.
#
lib1funcs-$(CONFIG_SUPERH32) := ashiftrt.S ashldi3.c ashrsi3.S ashlsi3.S \
lshrsi3.S
lib1funcs-obj := \
lib1funcs-y := ashiftrt.S ashldi3.c ashrsi3.S ashlsi3.S lshrsi3.S
lib1funcs-obj := \
$(addsuffix .o, $(basename $(addprefix $(obj)/, $(lib1funcs-y))))
lib1funcs-dir := $(srctree)/arch/$(SRCARCH)/lib
ifeq ($(BITS),64)
lib1funcs-dir := $(addsuffix $(BITS), $(lib1funcs-dir))
endif
KBUILD_CFLAGS += -I$(lib1funcs-dir) -DDISABLE_BRANCH_PROFILING
......
......@@ -116,11 +116,7 @@ void ftrace_stub(void)
{
}
#ifdef CONFIG_SUPERH64
#define stackalign 8
#else
#define stackalign 4
#endif
#define STACK_SIZE (4096)
long __attribute__ ((aligned(stackalign))) user_stack[STACK_SIZE];
......@@ -130,13 +126,9 @@ void decompress_kernel(void)
{
unsigned long output_addr;
#ifdef CONFIG_SUPERH64
output_addr = (CONFIG_MEMORY_START + 0x2000);
#else
output_addr = __pa((unsigned long)&_text+PAGE_SIZE);
#if defined(CONFIG_29BIT)
output_addr |= P2SEG;
#endif
#endif
output = (unsigned char *)output_addr;
......
......@@ -10,7 +10,6 @@ obj-$(CONFIG_CPU_SUBTYPE_SH7763) += pci-sh7780.o ops-sh4.o
obj-$(CONFIG_CPU_SUBTYPE_SH7780) += pci-sh7780.o ops-sh4.o
obj-$(CONFIG_CPU_SUBTYPE_SH7785) += pci-sh7780.o ops-sh4.o
obj-$(CONFIG_CPU_SUBTYPE_SH7786) += pcie-sh7786.o ops-sh7786.o
obj-$(CONFIG_CPU_SH5) += pci-sh5.o ops-sh5.o
obj-$(CONFIG_SH_DREAMCAST) += ops-dreamcast.o fixups-dreamcast.o \
pci-dreamcast.o
......
// SPDX-License-Identifier: GPL-2.0
/*
* Support functions for the SH5 PCI hardware.
*
* Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
* Copyright (C) 2003, 2004 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*/
#include <linux/kernel.h>
#include <linux/rwsem.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/irq.h>
#include <asm/io.h>
#include "pci-sh5.h"
static int sh5pci_read(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 *val)
{
SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
switch (size) {
case 1:
*val = (u8)SH5PCI_READ_BYTE(PDR + (where & 3));
break;
case 2:
*val = (u16)SH5PCI_READ_SHORT(PDR + (where & 2));
break;
case 4:
*val = SH5PCI_READ(PDR);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int sh5pci_write(struct pci_bus *bus, unsigned int devfn, int where,
int size, u32 val)
{
SH5PCI_WRITE(PAR, CONFIG_CMD(bus, devfn, where));
switch (size) {
case 1:
SH5PCI_WRITE_BYTE(PDR + (where & 3), (u8)val);
break;
case 2:
SH5PCI_WRITE_SHORT(PDR + (where & 2), (u16)val);
break;
case 4:
SH5PCI_WRITE(PDR, val);
break;
}
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops sh5_pci_ops = {
.read = sh5pci_read,
.write = sh5pci_write,
};
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
* Copyright (C) 2003, 2004 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*
* Support functions for the SH5 PCI hardware.
*/
#include <linux/kernel.h>
#include <linux/rwsem.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/irq.h>
#include <cpu/irq.h>
#include <asm/io.h>
#include "pci-sh5.h"
unsigned long pcicr_virt;
unsigned long PCI_IO_AREA;
/* Rounds a number UP to the nearest power of two. Used for
* sizing the PCI window.
*/
static u32 __init r2p2(u32 num)
{
int i = 31;
u32 tmp = num;
if (num == 0)
return 0;
do {
if (tmp & (1 << 31))
break;
i--;
tmp <<= 1;
} while (i >= 0);
tmp = 1 << i;
/* If the original number isn't a power of 2, round it up */
if (tmp != num)
tmp <<= 1;
return tmp;
}
static irqreturn_t pcish5_err_irq(int irq, void *dev_id)
{
struct pt_regs *regs = get_irq_regs();
unsigned pci_int, pci_air, pci_cir, pci_aint;
pci_int = SH5PCI_READ(INT);
pci_cir = SH5PCI_READ(CIR);
pci_air = SH5PCI_READ(AIR);
if (pci_int) {
printk("PCI INTERRUPT (at %08llx)!\n", regs->pc);
printk("PCI INT -> 0x%x\n", pci_int & 0xffff);
printk("PCI AIR -> 0x%x\n", pci_air);
printk("PCI CIR -> 0x%x\n", pci_cir);
SH5PCI_WRITE(INT, ~0);
}
pci_aint = SH5PCI_READ(AINT);
if (pci_aint) {
printk("PCI ARB INTERRUPT!\n");
printk("PCI AINT -> 0x%x\n", pci_aint);
printk("PCI AIR -> 0x%x\n", pci_air);
printk("PCI CIR -> 0x%x\n", pci_cir);
SH5PCI_WRITE(AINT, ~0);
}
return IRQ_HANDLED;
}
static irqreturn_t pcish5_serr_irq(int irq, void *dev_id)
{
printk("SERR IRQ\n");
return IRQ_NONE;
}
static struct resource sh5_pci_resources[2];
static struct pci_channel sh5pci_controller = {
.pci_ops = &sh5_pci_ops,
.resources = sh5_pci_resources,
.nr_resources = ARRAY_SIZE(sh5_pci_resources),
.mem_offset = 0x00000000,
.io_offset = 0x00000000,
};
static int __init sh5pci_init(void)
{
unsigned long memStart = __pa(memory_start);
unsigned long memSize = __pa(memory_end) - memStart;
u32 lsr0;
u32 uval;
if (request_irq(IRQ_ERR, pcish5_err_irq,
0, "PCI Error",NULL) < 0) {
printk(KERN_ERR "PCISH5: Cannot hook PCI_PERR interrupt\n");
return -EINVAL;
}
if (request_irq(IRQ_SERR, pcish5_serr_irq,
0, "PCI SERR interrupt", NULL) < 0) {
printk(KERN_ERR "PCISH5: Cannot hook PCI_SERR interrupt\n");
return -EINVAL;
}
pcicr_virt = (unsigned long)ioremap(SH5PCI_ICR_BASE, 1024);
if (!pcicr_virt) {
panic("Unable to remap PCICR\n");
}
PCI_IO_AREA = (unsigned long)ioremap(SH5PCI_IO_BASE, 0x10000);
if (!PCI_IO_AREA) {
panic("Unable to remap PCIIO\n");
}
/* Clear snoop registers */
SH5PCI_WRITE(CSCR0, 0);
SH5PCI_WRITE(CSCR1, 0);
/* Switch off interrupts */
SH5PCI_WRITE(INTM, 0);
SH5PCI_WRITE(AINTM, 0);
SH5PCI_WRITE(PINTM, 0);
/* Set bus active, take it out of reset */
uval = SH5PCI_READ(CR);
/* Set command Register */
SH5PCI_WRITE(CR, uval | CR_LOCK_MASK | CR_CFINT| CR_FTO | CR_PFE |
CR_PFCS | CR_BMAM);
uval=SH5PCI_READ(CR);
/* Allow it to be a master */
/* NB - WE DISABLE I/O ACCESS to stop overlap */
/* set WAIT bit to enable stepping, an attempt to improve stability */
SH5PCI_WRITE_SHORT(CSR_CMD,
PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
PCI_COMMAND_WAIT);
/*
** Set translation mapping memory in order to convert the address
** used for the main bus, to the PCI internal address.
*/
SH5PCI_WRITE(MBR,0x40000000);
/* Always set the max size 512M */
SH5PCI_WRITE(MBMR, PCISH5_MEM_SIZCONV(512*1024*1024));
/*
** I/O addresses are mapped at internal PCI specific address
** as is described into the configuration bridge table.
** These are changed to 0, to allow cards that have legacy
** io such as vga to function correctly. We set the SH5 IOBAR to
** 256K, which is a bit big as we can only have 64K of address space
*/
SH5PCI_WRITE(IOBR,0x0);
/* Set up a 256K window. Totally pointless waste of address space */
SH5PCI_WRITE(IOBMR,0);
/* The SH5 has a HUGE 256K I/O region, which breaks the PCI spec.
* Ideally, we would want to map the I/O region somewhere, but it
* is so big this is not that easy!
*/
SH5PCI_WRITE(CSR_IBAR0,~0);
/* Set memory size value */
memSize = memory_end - memory_start;
/* Now we set up the mbars so the PCI bus can see the memory of
* the machine */
if (memSize < (1024 * 1024)) {
printk(KERN_ERR "PCISH5: Ridiculous memory size of 0x%lx?\n",
memSize);
return -EINVAL;
}
/* Set LSR 0 */
lsr0 = (memSize > (512 * 1024 * 1024)) ? 0x1ff00001 :
((r2p2(memSize) - 0x100000) | 0x1);
SH5PCI_WRITE(LSR0, lsr0);
/* Set MBAR 0 */
SH5PCI_WRITE(CSR_MBAR0, memory_start);
SH5PCI_WRITE(LAR0, memory_start);
SH5PCI_WRITE(CSR_MBAR1,0);
SH5PCI_WRITE(LAR1,0);
SH5PCI_WRITE(LSR1,0);
/* Enable the PCI interrupts on the device */
SH5PCI_WRITE(INTM, ~0);
SH5PCI_WRITE(AINTM, ~0);
SH5PCI_WRITE(PINTM, ~0);
sh5_pci_resources[0].start = PCI_IO_AREA;
sh5_pci_resources[0].end = PCI_IO_AREA + 0x10000;
sh5_pci_resources[1].start = memStart;
sh5_pci_resources[1].end = memStart + memSize;
return register_pci_controller(&sh5pci_controller);
}
arch_initcall(sh5pci_init);
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
*
* Definitions for the SH5 PCI hardware.
*/
#ifndef __PCI_SH5_H
#define __PCI_SH5_H
/* Product ID */
#define PCISH5_PID 0x350d
/* vendor ID */
#define PCISH5_VID 0x1054
/* Configuration types */
#define ST_TYPE0 0x00 /* Configuration cycle type 0 */
#define ST_TYPE1 0x01 /* Configuration cycle type 1 */
/* VCR data */
#define PCISH5_VCR_STATUS 0x00
#define PCISH5_VCR_VERSION 0x08
/*
** ICR register offsets and bits
*/
#define PCISH5_ICR_CR 0x100 /* PCI control register values */
#define CR_PBAM (1<<12)
#define CR_PFCS (1<<11)
#define CR_FTO (1<<10)
#define CR_PFE (1<<9)
#define CR_TBS (1<<8)
#define CR_SPUE (1<<7)
#define CR_BMAM (1<<6)
#define CR_HOST (1<<5)
#define CR_CLKEN (1<<4)
#define CR_SOCS (1<<3)
#define CR_IOCS (1<<2)
#define CR_RSTCTL (1<<1)
#define CR_CFINT (1<<0)
#define CR_LOCK_MASK 0xa5000000
#define PCISH5_ICR_INT 0x114 /* Interrupt registert values */
#define INT_MADIM (1<<2)
#define PCISH5_ICR_LSR0 0X104 /* Local space register values */
#define PCISH5_ICR_LSR1 0X108 /* Local space register values */
#define PCISH5_ICR_LAR0 0x10c /* Local address register values */
#define PCISH5_ICR_LAR1 0x110 /* Local address register values */
#define PCISH5_ICR_INTM 0x118 /* Interrupt mask register values */
#define PCISH5_ICR_AIR 0x11c /* Interrupt error address information register values */
#define PCISH5_ICR_CIR 0x120 /* Interrupt error command information register values */
#define PCISH5_ICR_AINT 0x130 /* Interrupt error arbiter interrupt register values */
#define PCISH5_ICR_AINTM 0x134 /* Interrupt error arbiter interrupt mask register values */
#define PCISH5_ICR_BMIR 0x138 /* Interrupt error info register of bus master values */
#define PCISH5_ICR_PAR 0x1c0 /* Pio address register values */
#define PCISH5_ICR_MBR 0x1c4 /* Memory space bank register values */
#define PCISH5_ICR_IOBR 0x1c8 /* I/O space bank register values */
#define PCISH5_ICR_PINT 0x1cc /* power management interrupt register values */
#define PCISH5_ICR_PINTM 0x1d0 /* power management interrupt mask register values */
#define PCISH5_ICR_MBMR 0x1d8 /* memory space bank mask register values */
#define PCISH5_ICR_IOBMR 0x1dc /* I/O space bank mask register values */
#define PCISH5_ICR_CSCR0 0x210 /* PCI cache snoop control register 0 */
#define PCISH5_ICR_CSCR1 0x214 /* PCI cache snoop control register 1 */
#define PCISH5_ICR_PDR 0x220 /* Pio data register values */
/* These are configs space registers */
#define PCISH5_ICR_CSR_VID 0x000 /* Vendor id */
#define PCISH5_ICR_CSR_DID 0x002 /* Device id */
#define PCISH5_ICR_CSR_CMD 0x004 /* Command register */
#define PCISH5_ICR_CSR_STATUS 0x006 /* Stautus */
#define PCISH5_ICR_CSR_IBAR0 0x010 /* I/O base address register */
#define PCISH5_ICR_CSR_MBAR0 0x014 /* First Memory base address register */
#define PCISH5_ICR_CSR_MBAR1 0x018 /* Second Memory base address register */
/* Base address of registers */
#define SH5PCI_ICR_BASE (PHYS_PCI_BLOCK + 0x00040000)
#define SH5PCI_IO_BASE (PHYS_PCI_BLOCK + 0x00800000)
/* #define SH5PCI_VCR_BASE (P2SEG_PCICB_BLOCK + P2SEG) */
extern unsigned long pcicr_virt;
/* Register selection macro */
#define PCISH5_ICR_REG(x) ( pcicr_virt + (PCISH5_ICR_##x))
/* #define PCISH5_VCR_REG(x) ( SH5PCI_VCR_BASE (PCISH5_VCR_##x)) */
/* Write I/O functions */
#define SH5PCI_WRITE(reg,val) __raw_writel((u32)(val),PCISH5_ICR_REG(reg))
#define SH5PCI_WRITE_SHORT(reg,val) __raw_writew((u16)(val),PCISH5_ICR_REG(reg))
#define SH5PCI_WRITE_BYTE(reg,val) __raw_writeb((u8)(val),PCISH5_ICR_REG(reg))
/* Read I/O functions */
#define SH5PCI_READ(reg) __raw_readl(PCISH5_ICR_REG(reg))
#define SH5PCI_READ_SHORT(reg) __raw_readw(PCISH5_ICR_REG(reg))
#define SH5PCI_READ_BYTE(reg) __raw_readb(PCISH5_ICR_REG(reg))
/* Set PCI config bits */
#define SET_CONFIG_BITS(bus,devfn,where) ((((bus) << 16) | ((devfn) << 8) | ((where) & ~3)) | 0x80000000)
/* Set PCI command register */
#define CONFIG_CMD(bus, devfn, where) SET_CONFIG_BITS(bus->number,devfn,where)
/* Size converters */
#define PCISH5_MEM_SIZCONV(x) (((x / 0x40000) - 1) << 18)
#define PCISH5_IO_SIZCONV(x) (((x / 0x40000) - 1) << 18)
extern struct pci_ops sh5_pci_ops;
#endif /* __PCI_SH5_H */
......@@ -6,7 +6,7 @@
#ifndef __ASM_SH_BARRIER_H
#define __ASM_SH_BARRIER_H
#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
#if defined(CONFIG_CPU_SH4A)
#include <asm/cache_insns.h>
#endif
......@@ -24,7 +24,7 @@
* Historically we have only done this type of barrier for the MMUCR, but
* it's also necessary for the CCR, so we make it generic here instead.
*/
#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5)
#if defined(CONFIG_CPU_SH4A)
#define mb() __asm__ __volatile__ ("synco": : :"memory")
#define rmb() mb()
#define wmb() mb()
......
......@@ -26,7 +26,6 @@
#include <asm-generic/bitops/non-atomic.h>
#endif
#ifdef CONFIG_SUPERH32
static inline unsigned long ffz(unsigned long word)
{
unsigned long result;
......@@ -60,31 +59,6 @@ static inline unsigned long __ffs(unsigned long word)
: "t");
return result;
}
#else
static inline unsigned long ffz(unsigned long word)
{
unsigned long result, __d2, __d3;
__asm__("gettr tr0, %2\n\t"
"pta $+32, tr0\n\t"
"andi %1, 1, %3\n\t"
"beq %3, r63, tr0\n\t"
"pta $+4, tr0\n"
"0:\n\t"
"shlri.l %1, 1, %1\n\t"
"addi %0, 1, %0\n\t"
"andi %1, 1, %3\n\t"
"beqi %3, 1, tr0\n"
"1:\n\t"
"ptabs %2, tr0\n\t"
: "=r" (result), "=r" (word), "=r" (__d2), "=r" (__d3)
: "0" (0L), "1" (word));
return result;
}
#include <asm-generic/bitops/__ffs.h>
#endif
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/ffs.h>
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_BL_BIT_H
#define __ASM_SH_BL_BIT_H
#ifdef CONFIG_SUPERH32
# include <asm/bl_bit_32.h>
#else
# include <asm/bl_bit_64.h>
#endif
#endif /* __ASM_SH_BL_BIT_H */
#include <asm/bl_bit_32.h>
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*/
#ifndef __ASM_SH_BL_BIT_64_H
#define __ASM_SH_BL_BIT_64_H
#include <asm/processor.h>
#define SR_BL_LL 0x0000000010000000LL
static inline void set_bl_bit(void)
{
unsigned long long __dummy0, __dummy1 = SR_BL_LL;
__asm__ __volatile__("getcon " __SR ", %0\n\t"
"or %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy0)
: "r" (__dummy1));
}
static inline void clear_bl_bit(void)
{
unsigned long long __dummy0, __dummy1 = ~SR_BL_LL;
__asm__ __volatile__("getcon " __SR ", %0\n\t"
"and %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy0)
: "r" (__dummy1));
}
#endif /* __ASM_SH_BL_BIT_64_H */
......@@ -53,10 +53,6 @@ static void __init check_bugs(void)
*p++ = 's';
*p++ = 'p';
break;
case CPU_FAMILY_SH5:
*p++ = '6';
*p++ = '4';
break;
case CPU_FAMILY_UNKNOWN:
/*
* Specifically use CPU_FAMILY_UNKNOWN rather than
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_CACHE_INSNS_H
#define __ASM_SH_CACHE_INSNS_H
#ifdef CONFIG_SUPERH32
# include <asm/cache_insns_32.h>
#else
# include <asm/cache_insns_64.h>
#endif
#endif /* __ASM_SH_CACHE_INSNS_H */
#include <asm/cache_insns_32.h>
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*/
#ifndef __ASM_SH_CACHE_INSNS_64_H
#define __ASM_SH_CACHE_INSNS_64_H
#define __icbi(addr) __asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
#define __ocbp(addr) __asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
#define __ocbi(addr) __asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
#define __ocbwb(addr) __asm__ __volatile__ ( "ocbwb %0, 0\n\t" : : "r" (addr))
static inline reg_size_t register_align(void *val)
{
return (unsigned long long)(signed long long)(signed long)val;
}
#endif /* __ASM_SH_CACHE_INSNS_64_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifdef CONFIG_SUPERH32
# include <asm/checksum_32.h>
#else
# include <asm-generic/checksum.h>
#endif
#include <asm/checksum_32.h>
......@@ -133,28 +133,6 @@ typedef struct user_fpu_struct elf_fpregset_t;
#define ELF_PLATFORM (utsname()->machine)
#ifdef __SH5__
#define ELF_PLAT_INIT(_r, load_addr) \
do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \
_r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \
_r->regs[8]=0; _r->regs[9]=0; _r->regs[10]=0; _r->regs[11]=0; \
_r->regs[12]=0; _r->regs[13]=0; _r->regs[14]=0; _r->regs[15]=0; \
_r->regs[16]=0; _r->regs[17]=0; _r->regs[18]=0; _r->regs[19]=0; \
_r->regs[20]=0; _r->regs[21]=0; _r->regs[22]=0; _r->regs[23]=0; \
_r->regs[24]=0; _r->regs[25]=0; _r->regs[26]=0; _r->regs[27]=0; \
_r->regs[28]=0; _r->regs[29]=0; _r->regs[30]=0; _r->regs[31]=0; \
_r->regs[32]=0; _r->regs[33]=0; _r->regs[34]=0; _r->regs[35]=0; \
_r->regs[36]=0; _r->regs[37]=0; _r->regs[38]=0; _r->regs[39]=0; \
_r->regs[40]=0; _r->regs[41]=0; _r->regs[42]=0; _r->regs[43]=0; \
_r->regs[44]=0; _r->regs[45]=0; _r->regs[46]=0; _r->regs[47]=0; \
_r->regs[48]=0; _r->regs[49]=0; _r->regs[50]=0; _r->regs[51]=0; \
_r->regs[52]=0; _r->regs[53]=0; _r->regs[54]=0; _r->regs[55]=0; \
_r->regs[56]=0; _r->regs[57]=0; _r->regs[58]=0; _r->regs[59]=0; \
_r->regs[60]=0; _r->regs[61]=0; _r->regs[62]=0; \
_r->tregs[0]=0; _r->tregs[1]=0; _r->tregs[2]=0; _r->tregs[3]=0; \
_r->tregs[4]=0; _r->tregs[5]=0; _r->tregs[6]=0; _r->tregs[7]=0; \
_r->sr = SR_FD | SR_MMU; } while (0)
#else
#define ELF_PLAT_INIT(_r, load_addr) \
do { _r->regs[0]=0; _r->regs[1]=0; _r->regs[2]=0; _r->regs[3]=0; \
_r->regs[4]=0; _r->regs[5]=0; _r->regs[6]=0; _r->regs[7]=0; \
......@@ -182,7 +160,6 @@ do { \
_r->regs[14] = 0; \
_r->sr = SR_FD; \
} while (0)
#endif
#define SET_PERSONALITY(ex) \
set_personality(PER_LINUX_32BIT | (current->personality & (~PER_MASK)))
......
......@@ -4,8 +4,4 @@
#include <asm-generic/extable.h>
#if defined(CONFIG_SUPERH64) && defined(CONFIG_MMU)
#define ARCH_HAS_SEARCH_EXTABLE
#endif
#endif
......@@ -83,11 +83,7 @@ extern void __clear_fixmap(enum fixed_addresses idx, pgprot_t flags);
* the start of the fixmap, and leave one page empty
* at the top of mem..
*/
#ifdef CONFIG_SUPERH32
#define FIXADDR_TOP (P4SEG - PAGE_SIZE)
#else
#define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
#endif
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
......
......@@ -115,12 +115,8 @@ static inline void pfx##reads##bwlq(volatile void __iomem *mem, \
__BUILD_MEMORY_STRING(__raw_, b, u8)
__BUILD_MEMORY_STRING(__raw_, w, u16)
#ifdef CONFIG_SUPERH32
void __raw_writesl(void __iomem *addr, const void *data, int longlen);
void __raw_readsl(const void __iomem *addr, void *data, int longlen);
#else
__BUILD_MEMORY_STRING(__raw_, l, u32)
#endif
__BUILD_MEMORY_STRING(__raw_, q, u64)
......
......@@ -66,8 +66,5 @@ extern void irq_finish(unsigned int irq);
#endif
#include <asm-generic/irq.h>
#ifdef CONFIG_CPU_SH5
#include <cpu/irq.h>
#endif
#endif /* __ASM_SH_IRQ_H */
......@@ -48,11 +48,7 @@
*/
#define MMU_VPN_MASK 0xfffff000
#if defined(CONFIG_SUPERH32)
#include <asm/mmu_context_32.h>
#else
#include <asm/mmu_context_64.h>
#endif
/*
* Get MMU context if needed.
......@@ -74,14 +70,6 @@ static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
*/
local_flush_tlb_all();
#ifdef CONFIG_SUPERH64
/*
* The SH-5 cache uses the ASIDs, requiring both the I and D
* cache to be flushed when the ASID is exhausted. Weak.
*/
flush_cache_all();
#endif
/*
* Fix version; Note that we avoid version #0
* to distinguish NO_CONTEXT.
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_MMU_CONTEXT_64_H
#define __ASM_SH_MMU_CONTEXT_64_H
/*
* sh64-specific mmu_context interface.
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 - 2007 Paul Mundt
*/
#include <cpu/registers.h>
#include <asm/cacheflush.h>
#define SR_ASID_MASK 0xffffffffff00ffffULL
#define SR_ASID_SHIFT 16
/*
* Destroy context related info for an mm_struct that is about
* to be put to rest.
*/
static inline void destroy_context(struct mm_struct *mm)
{
/* Well, at least free TLB entries */
flush_tlb_mm(mm);
}
static inline unsigned long get_asid(void)
{
unsigned long long sr;
asm volatile ("getcon " __SR ", %0\n\t"
: "=r" (sr));
sr = (sr >> SR_ASID_SHIFT) & MMU_CONTEXT_ASID_MASK;
return (unsigned long) sr;
}
/* Set ASID into SR */
static inline void set_asid(unsigned long asid)
{
unsigned long long sr, pc;
asm volatile ("getcon " __SR ", %0" : "=r" (sr));
sr = (sr & SR_ASID_MASK) | (asid << SR_ASID_SHIFT);
/*
* It is possible that this function may be inlined and so to avoid
* the assembler reporting duplicate symbols we make use of the
* gas trick of generating symbols using numerics and forward
* reference.
*/
asm volatile ("movi 1, %1\n\t"
"shlli %1, 28, %1\n\t"
"or %0, %1, %1\n\t"
"putcon %1, " __SR "\n\t"
"putcon %0, " __SSR "\n\t"
"movi 1f, %1\n\t"
"ori %1, 1 , %1\n\t"
"putcon %1, " __SPC "\n\t"
"rte\n"
"1:\n\t"
: "=r" (sr), "=r" (pc) : "0" (sr));
}
/* arch/sh/kernel/cpu/sh5/entry.S */
extern unsigned long switch_and_save_asid(unsigned long new_asid);
/* No spare register to twiddle, so use a software cache */
extern pgd_t *mmu_pdtp_cache;
#define set_TTB(pgd) (mmu_pdtp_cache = (pgd))
#define get_TTB() (mmu_pdtp_cache)
#endif /* __ASM_SH_MMU_CONTEXT_64_H */
......@@ -35,8 +35,6 @@
#define HPAGE_SHIFT 22
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64MB)
#define HPAGE_SHIFT 26
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
#define HPAGE_SHIFT 29
#endif
#ifdef CONFIG_HUGETLB_PAGE
......@@ -82,18 +80,12 @@ typedef struct { unsigned long long pgd; } pgd_t;
((x).pte_low | ((unsigned long long)(x).pte_high << 32))
#define __pte(x) \
({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
#elif defined(CONFIG_SUPERH32)
#else
typedef struct { unsigned long pte_low; } pte_t;
typedef struct { unsigned long pgprot; } pgprot_t;
typedef struct { unsigned long pgd; } pgd_t;
#define pte_val(x) ((x).pte_low)
#define __pte(x) ((pte_t) { (x) } )
#else
typedef struct { unsigned long long pte_low; } pte_t;
typedef struct { unsigned long long pgprot; } pgprot_t;
typedef struct { unsigned long pgd; } pgd_t;
#define pte_val(x) ((x).pte_low)
#define __pte(x) ((pte_t) { (x) } )
#endif
#define pgd_val(x) ((x).pgd)
......@@ -191,15 +183,4 @@ typedef struct page *pgtable_t;
*/
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#ifdef CONFIG_SUPERH64
/*
* While BYTES_PER_WORD == 4 on the current sh64 ABI, GCC will still
* happily generate {ld/st}.q pairs, requiring us to have 8-byte
* alignment to avoid traps. The kmalloc alignment is guaranteed by
* virtue of L1_CACHE_BYTES, requiring this to only be special cased
* for slab caches.
*/
#define ARCH_SLAB_MINALIGN 8
#endif
#endif /* __ASM_SH_PAGE_H */
......@@ -76,18 +76,10 @@ static inline unsigned long phys_addr_mask(void)
#define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK)
#define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT)
#ifdef CONFIG_SUPERH32
#define VMALLOC_START (P3SEG)
#else
#define VMALLOC_START (0xf0000000)
#endif
#define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
#if defined(CONFIG_SUPERH32)
#include <asm/pgtable_32.h>
#else
#include <asm/pgtable_64.h>
#endif
/*
* SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page
......@@ -159,15 +151,6 @@ static inline bool pte_access_permitted(pte_t pte, bool write)
prot |= _PAGE_EXT(_PAGE_EXT_KERN_WRITE | _PAGE_EXT_USER_WRITE);
return __pte_access_permitted(pte, prot);
}
#elif defined(CONFIG_SUPERH64)
static inline bool pte_access_permitted(pte_t pte, bool write)
{
u64 prot = _PAGE_PRESENT | _PAGE_USER | _PAGE_READ;
if (write)
prot |= _PAGE_WRITE;
return __pte_access_permitted(pte, prot);
}
#else
static inline bool pte_access_permitted(pte_t pte, bool write)
{
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_PGTABLE_64_H
#define __ASM_SH_PGTABLE_64_H
/*
* include/asm-sh/pgtable_64.h
*
* This file contains the functions and defines necessary to modify and use
* the SuperH page table tree.
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*/
#include <linux/threads.h>
#include <asm/processor.h>
#include <asm/page.h>
/*
* Error outputs.
*/
#define pte_ERROR(e) \
printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
/*
* Table setting routines. Used within arch/mm only.
*/
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
static __inline__ void set_pte(pte_t *pteptr, pte_t pteval)
{
unsigned long long x = ((unsigned long long) pteval.pte_low);
unsigned long long *xp = (unsigned long long *) pteptr;
/*
* Sign-extend based on NPHYS.
*/
*(xp) = (x & NPHYS_SIGN) ? (x | NPHYS_MASK) : x;
}
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
/*
* PGD defines. Top level.
*/
/* To find an entry in a generic PGD. */
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#define __pgd_offset(address) pgd_index(address)
#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
/* To find an entry in a kernel PGD. */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
/*
* PMD level access routines. Same notes as above.
*/
#define _PMD_EMPTY 0x0
/* Either the PMD is empty or present, it's not paged out */
#define pmd_present(pmd_entry) (pmd_val(pmd_entry) & _PAGE_PRESENT)
#define pmd_clear(pmd_entry_p) (set_pmd((pmd_entry_p), __pmd(_PMD_EMPTY)))
#define pmd_none(pmd_entry) (pmd_val((pmd_entry)) == _PMD_EMPTY)
#define pmd_bad(pmd_entry) ((pmd_val(pmd_entry) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
#define pmd_page_vaddr(pmd_entry) \
((unsigned long) __va(pmd_val(pmd_entry) & PAGE_MASK))
#define pmd_page(pmd) \
(virt_to_page(pmd_val(pmd)))
/* PMD to PTE dereferencing */
#define pte_index(address) \
((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
#define __pte_offset(address) pte_index(address)
#define pte_offset_kernel(dir, addr) \
((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))
#define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
#define pte_unmap(pte) do { } while (0)
#ifndef __ASSEMBLY__
/*
* PTEL coherent flags.
* See Chapter 17 ST50 CPU Core Volume 1, Architecture.
*/
/* The bits that are required in the SH-5 TLB are placed in the h/w-defined
positions, to avoid expensive bit shuffling on every refill. The remaining
bits are used for s/w purposes and masked out on each refill.
Note, the PTE slots are used to hold data of type swp_entry_t when a page is
swapped out. Only the _PAGE_PRESENT flag is significant when the page is
swapped out, and it must be placed so that it doesn't overlap either the
type or offset fields of swp_entry_t. For x86, offset is at [31:8] and type
at [6:1], with _PAGE_PRESENT at bit 0 for both pte_t and swp_entry_t. This
scheme doesn't map to SH-5 because bit [0] controls cacheability. So bit
[2] is used for _PAGE_PRESENT and the type field of swp_entry_t is split
into 2 pieces. That is handled by SWP_ENTRY and SWP_TYPE below. */
#define _PAGE_WT 0x001 /* CB0: if cacheable, 1->write-thru, 0->write-back */
#define _PAGE_DEVICE 0x001 /* CB0: if uncacheable, 1->device (i.e. no write-combining or reordering at bus level) */
#define _PAGE_CACHABLE 0x002 /* CB1: uncachable/cachable */
#define _PAGE_PRESENT 0x004 /* software: page referenced */
#define _PAGE_SIZE0 0x008 /* SZ0-bit : size of page */
#define _PAGE_SIZE1 0x010 /* SZ1-bit : size of page */
#define _PAGE_SHARED 0x020 /* software: reflects PTEH's SH */
#define _PAGE_READ 0x040 /* PR0-bit : read access allowed */
#define _PAGE_EXECUTE 0x080 /* PR1-bit : execute access allowed */
#define _PAGE_WRITE 0x100 /* PR2-bit : write access allowed */
#define _PAGE_USER 0x200 /* PR3-bit : user space access allowed */
#define _PAGE_DIRTY 0x400 /* software: page accessed in write */
#define _PAGE_ACCESSED 0x800 /* software: page referenced */
/* Wrapper for extended mode pgprot twiddling */
#define _PAGE_EXT(x) ((unsigned long long)(x) << 32)
/*
* We can use the sign-extended bits in the PTEL to get 32 bits of
* software flags. This works for now because no implementations uses
* anything above the PPN field.
*/
#define _PAGE_WIRED _PAGE_EXT(0x001) /* software: wire the tlb entry */
#define _PAGE_SPECIAL _PAGE_EXT(0x002)
#define _PAGE_CLEAR_FLAGS (_PAGE_PRESENT | _PAGE_SHARED | \
_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_WIRED)
/* Mask which drops software flags */
#define _PAGE_FLAGS_HARDWARE_MASK (NEFF_MASK & ~(_PAGE_CLEAR_FLAGS))
/*
* HugeTLB support
*/
#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
#define _PAGE_SZHUGE (_PAGE_SIZE0)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_1MB)
#define _PAGE_SZHUGE (_PAGE_SIZE1)
#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512MB)
#define _PAGE_SZHUGE (_PAGE_SIZE0 | _PAGE_SIZE1)
#endif
/*
* Stub out _PAGE_SZHUGE if we don't have a good definition for it,
* to make pte_mkhuge() happy.
*/
#ifndef _PAGE_SZHUGE
# define _PAGE_SZHUGE (0)
#endif
/*
* Default flags for a Kernel page.
* This is fundametally also SHARED because the main use of this define
* (other than for PGD/PMD entries) is for the VMALLOC pool which is
* contextless.
*
* _PAGE_EXECUTE is required for modules
*
*/
#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
_PAGE_EXECUTE | \
_PAGE_CACHABLE | _PAGE_ACCESSED | _PAGE_DIRTY | \
_PAGE_SHARED)
/* Default flags for a User page */
#define _PAGE_TABLE (_KERNPG_TABLE | _PAGE_USER)
#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
_PAGE_SPECIAL)
/*
* We have full permissions (Read/Write/Execute/Shared).
*/
#define _PAGE_COMMON (_PAGE_PRESENT | _PAGE_USER | \
_PAGE_CACHABLE | _PAGE_ACCESSED)
#define PAGE_NONE __pgprot(_PAGE_CACHABLE | _PAGE_ACCESSED)
#define PAGE_SHARED __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_WRITE | \
_PAGE_SHARED)
#define PAGE_EXECREAD __pgprot(_PAGE_COMMON | _PAGE_READ | _PAGE_EXECUTE)
/*
* We need to include PAGE_EXECUTE in PAGE_COPY because it is the default
* protection mode for the stack.
*/
#define PAGE_COPY PAGE_EXECREAD
#define PAGE_READONLY __pgprot(_PAGE_COMMON | _PAGE_READ)
#define PAGE_WRITEONLY __pgprot(_PAGE_COMMON | _PAGE_WRITE)
#define PAGE_RWX __pgprot(_PAGE_COMMON | _PAGE_READ | \
_PAGE_WRITE | _PAGE_EXECUTE)
#define PAGE_KERNEL __pgprot(_KERNPG_TABLE)
#define PAGE_KERNEL_NOCACHE \
__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
_PAGE_EXECUTE | _PAGE_ACCESSED | \
_PAGE_DIRTY | _PAGE_SHARED)
/* Make it a device mapping for maximum safety (e.g. for mapping device
registers into user-space via /dev/map). */
#define pgprot_noncached(x) __pgprot(((x).pgprot & ~(_PAGE_CACHABLE)) | _PAGE_DEVICE)
#define pgprot_writecombine(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHABLE)
/*
* PTE level access routines.
*
* Note1:
* It's the tree walk leaf. This is physical address to be stored.
*
* Note 2:
* Regarding the choice of _PTE_EMPTY:
We must choose a bit pattern that cannot be valid, whether or not the page
is present. bit[2]==1 => present, bit[2]==0 => swapped out. If swapped
out, bits [31:8], [6:3], [1:0] are under swapper control, so only bit[7] is
left for us to select. If we force bit[7]==0 when swapped out, we could use
the combination bit[7,2]=2'b10 to indicate an empty PTE. Alternatively, if
we force bit[7]==1 when swapped out, we can use all zeroes to indicate
empty. This is convenient, because the page tables get cleared to zero
when they are allocated.
*/
#define _PTE_EMPTY 0x0
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
#define pte_clear(mm,addr,xp) (set_pte_at(mm, addr, xp, __pte(_PTE_EMPTY)))
#define pte_none(x) (pte_val(x) == _PTE_EMPTY)
/*
* Some definitions to translate between mem_map, PTEs, and page
* addresses:
*/
/*
* Given a PTE, return the index of the mem_map[] entry corresponding
* to the page frame the PTE. Get the absolute physical address, make
* a relative physical address and translate it to an index.
*/
#define pte_pagenr(x) (((unsigned long) (pte_val(x)) - \
__MEMORY_START) >> PAGE_SHIFT)
/*
* Given a PTE, return the "struct page *".
*/
#define pte_page(x) (mem_map + pte_pagenr(x))
/*
* Return number of (down rounded) MB corresponding to x pages.
*/
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
/*
* The following have defined behavior only work if pte_present() is true.
*/
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
static inline int pte_special(pte_t pte){ return pte_val(pte) & _PAGE_SPECIAL; }
static inline pte_t pte_wrprotect(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_WRITE)); return pte; }
static inline pte_t pte_mkclean(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_DIRTY)); return pte; }
static inline pte_t pte_mkold(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) & ~_PAGE_ACCESSED)); return pte; }
static inline pte_t pte_mkwrite(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_WRITE)); return pte; }
static inline pte_t pte_mkdirty(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_DIRTY)); return pte; }
static inline pte_t pte_mkyoung(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_ACCESSED)); return pte; }
static inline pte_t pte_mkhuge(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SZHUGE)); return pte; }
static inline pte_t pte_mkspecial(pte_t pte) { set_pte(&pte, __pte(pte_val(pte) | _PAGE_SPECIAL)); return pte; }
/*
* Conversion functions: convert a page and protection to a page entry.
*
* extern pte_t mk_pte(struct page *page, pgprot_t pgprot)
*/
#define mk_pte(page,pgprot) \
({ \
pte_t __pte; \
\
set_pte(&__pte, __pte((((page)-mem_map) << PAGE_SHIFT) | \
__MEMORY_START | pgprot_val((pgprot)))); \
__pte; \
})
/*
* This takes a (absolute) physical page address that is used
* by the remapping functions
*/
#define mk_pte_phys(physpage, pgprot) \
({ pte_t __pte; set_pte(&__pte, __pte(physpage | pgprot_val(pgprot))); __pte; })
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ set_pte(&pte, __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot))); return pte; }
/* Encode and decode a swap entry */
#define __swp_type(x) (((x).val & 3) + (((x).val >> 1) & 0x3c))
#define __swp_offset(x) ((x).val >> 8)
#define __swp_entry(type, offset) ((swp_entry_t) { ((offset << 8) + ((type & 0x3c) << 1) + (type & 3)) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#endif /* !__ASSEMBLY__ */
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define pfn_pmd(pfn, prot) __pmd(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#endif /* __ASM_SH_PGTABLE_64_H */
/* SPDX-License-Identifier: GPL-2.0 */
# ifdef CONFIG_SUPERH32
# include <asm/posix_types_32.h>
# else
# include <asm/posix_types_64.h>
# endif
#include <asm/posix_types_32.h>
......@@ -39,9 +39,6 @@ enum cpu_type {
/* SH4AL-DSP types */
CPU_SH7343, CPU_SH7722, CPU_SH7366, CPU_SH7372,
/* SH-5 types */
CPU_SH5_101, CPU_SH5_103,
/* Unknown subtype */
CPU_SH_NONE
};
......@@ -53,7 +50,6 @@ enum cpu_family {
CPU_FAMILY_SH4,
CPU_FAMILY_SH4A,
CPU_FAMILY_SH4AL_DSP,
CPU_FAMILY_SH5,
CPU_FAMILY_UNKNOWN,
};
......@@ -167,18 +163,12 @@ int vsyscall_init(void);
*/
#ifdef CONFIG_CPU_SH2A
extern unsigned int instruction_size(unsigned int insn);
#elif defined(CONFIG_SUPERH32)
#define instruction_size(insn) (2)
#else
#define instruction_size(insn) (4)
#define instruction_size(insn) (2)
#endif
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_SUPERH32
# include <asm/processor_32.h>
#else
# include <asm/processor_64.h>
#endif
#include <asm/processor_32.h>
#endif /* __ASM_SH_PROCESSOR_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_PROCESSOR_64_H
#define __ASM_SH_PROCESSOR_64_H
/*
* include/asm-sh/processor_64.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*/
#ifndef __ASSEMBLY__
#include <linux/compiler.h>
#include <asm/page.h>
#include <asm/types.h>
#include <cpu/registers.h>
#endif
/*
* User space process size: 2GB - 4k.
*/
#define TASK_SIZE 0x7ffff000UL
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX STACK_TOP
/* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
/*
* Bit of SR register
*
* FD-bit:
* When it's set, it means the processor doesn't have right to use FPU,
* and it results exception when the floating operation is executed.
*
* IMASK-bit:
* Interrupt level mask
*
* STEP-bit:
* Single step bit
*
*/
#if defined(CONFIG_SH64_SR_WATCH)
#define SR_MMU 0x84000000
#else
#define SR_MMU 0x80000000
#endif
#define SR_IMASK 0x000000f0
#define SR_FD 0x00008000
#define SR_SSTEP 0x08000000
#ifndef __ASSEMBLY__
/*
* FPU structure and data : require 8-byte alignment as we need to access it
with fld.p, fst.p
*/
struct sh_fpu_hard_struct {
unsigned long fp_regs[64];
unsigned int fpscr;
/* long status; * software status information */
};
/* Dummy fpu emulator */
struct sh_fpu_soft_struct {
unsigned long fp_regs[64];
unsigned int fpscr;
unsigned char lookahead;
unsigned long entry_pc;
};
union thread_xstate {
struct sh_fpu_hard_struct hardfpu;
struct sh_fpu_soft_struct softfpu;
/*
* The structure definitions only produce 32 bit alignment, yet we need
* to access them using 64 bit load/store as well.
*/
unsigned long long alignment_dummy;
};
struct thread_struct {
unsigned long sp;
unsigned long pc;
/* Various thread flags, see SH_THREAD_xxx */
unsigned long flags;
/* This stores the address of the pt_regs built during a context
switch, or of the register save area built for a kernel mode
exception. It is used for backtracing the stack of a sleeping task
or one that traps in kernel mode. */
struct pt_regs *kregs;
/* This stores the address of the pt_regs constructed on entry from
user mode. It is a fixed value over the lifetime of a process, or
NULL for a kernel thread. */
struct pt_regs *uregs;
unsigned long address;
/* Hardware debugging registers may come here */
/* floating point info */
union thread_xstate *xstate;
/*
* fpu_counter contains the number of consecutive context switches
* that the FPU is used. If this is over a threshold, the lazy fpu
* saving becomes unlazy to save the trap. This is an unsigned char
* so that after 256 times the counter wraps and the behavior turns
* lazy again; this to deal with bursty apps that only use FPU for
* a short time
*/
unsigned char fpu_counter;
};
#define INIT_MMAP \
{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
#define INIT_THREAD { \
.sp = sizeof(init_stack) + \
(long) &init_stack, \
.pc = 0, \
.kregs = &fake_swapper_regs, \
.uregs = NULL, \
.address = 0, \
.flags = 0, \
}
/*
* Do necessary setup to start up a newly executed thread.
*/
#define SR_USER (SR_MMU | SR_FD)
#define start_thread(_regs, new_pc, new_sp) \
_regs->sr = SR_USER; /* User mode. */ \
_regs->pc = new_pc - 4; /* Compensate syscall exit */ \
_regs->pc |= 1; /* Set SHmedia ! */ \
_regs->regs[18] = 0; \
_regs->regs[15] = new_sp
/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
/*
* FPU lazy state save handling.
*/
static inline void disable_fpu(void)
{
unsigned long long __dummy;
/* Set FD flag in SR */
__asm__ __volatile__("getcon " __SR ", %0\n\t"
"or %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy)
: "r" (SR_FD));
}
static inline void enable_fpu(void)
{
unsigned long long __dummy;
/* Clear out FD flag in SR */
__asm__ __volatile__("getcon " __SR ", %0\n\t"
"and %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy)
: "r" (~SR_FD));
}
/* Round to nearest, no exceptions on inexact, overflow, underflow,
zero-divide, invalid. Configure option for whether to flush denorms to
zero, or except if a denorm is encountered. */
#if defined(CONFIG_SH64_FPU_DENORM_FLUSH)
#define FPSCR_INIT 0x00040000
#else
#define FPSCR_INIT 0x00000000
#endif
#ifdef CONFIG_SH_FPU
/* Initialise the FP state of a task */
void fpinit(struct sh_fpu_hard_struct *fpregs);
#else
#define fpinit(fpregs) do { } while (0)
#endif
extern struct task_struct *last_task_used_math;
/*
* Return saved PC of a blocked thread.
*/
#define thread_saved_pc(tsk) (tsk->thread.pc)
extern unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.pc)
#define KSTK_ESP(tsk) ((tsk)->thread.sp)
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_PROCESSOR_64_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_PTRACE_64_H
#define __ASM_SH_PTRACE_64_H
#include <uapi/asm/ptrace_64.h>
#define MAX_REG_OFFSET offsetof(struct pt_regs, tregs[7])
static inline long regs_return_value(struct pt_regs *regs)
{
return regs->regs[3];
}
#endif /* __ASM_SH_PTRACE_64_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifdef CONFIG_SUPERH32
# include <asm/string_32.h>
#else
# include <asm/string_64.h>
#endif
#include <asm/string_32.h>
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_STRING_64_H
#define __ASM_SH_STRING_64_H
#ifdef __KERNEL__
#define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count);
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *dest, const void *src, size_t count);
#define __HAVE_ARCH_STRLEN
extern size_t strlen(const char *);
#define __HAVE_ARCH_STRCPY
extern char *strcpy(char *__dest, const char *__src);
#endif /* __KERNEL__ */
#endif /* __ASM_SH_STRING_64_H */
......@@ -4,13 +4,4 @@
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*/
#ifndef __ASM_SH_SWITCH_TO_H
#define __ASM_SH_SWITCH_TO_H
#ifdef CONFIG_SUPERH32
# include <asm/switch_to_32.h>
#else
# include <asm/switch_to_64.h>
#endif
#endif /* __ASM_SH_SWITCH_TO_H */
#include <asm/switch_to_32.h>
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*/
#ifndef __ASM_SH_SWITCH_TO_64_H
#define __ASM_SH_SWITCH_TO_64_H
struct thread_struct;
struct task_struct;
/*
* switch_to() should switch tasks to task nr n, first
*/
struct task_struct *sh64_switch_to(struct task_struct *prev,
struct thread_struct *prev_thread,
struct task_struct *next,
struct thread_struct *next_thread);
#define switch_to(prev,next,last) \
do { \
if (last_task_used_math != next) { \
struct pt_regs *regs = next->thread.uregs; \
if (regs) regs->sr |= SR_FD; \
} \
last = sh64_switch_to(prev, &prev->thread, next, \
&next->thread); \
} while (0)
#endif /* __ASM_SH_SWITCH_TO_64_H */
......@@ -4,10 +4,6 @@
extern const unsigned long sys_call_table[];
#ifdef CONFIG_SUPERH32
# include <asm/syscall_32.h>
#else
# include <asm/syscall_64.h>
#endif
#include <asm/syscall_32.h>
#endif /* __ASM_SH_SYSCALL_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_SYSCALL_64_H
#define __ASM_SH_SYSCALL_64_H
#include <uapi/linux/audit.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <asm/ptrace.h>
/* The system call number is given by the user in R9 */
static inline long syscall_get_nr(struct task_struct *task,
struct pt_regs *regs)
{
return (regs->syscall_nr >= 0) ? regs->regs[9] : -1L;
}
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
/*
* XXX: This needs some thought. On SH we don't
* save away the original R9 value anywhere.
*/
}
static inline long syscall_get_error(struct task_struct *task,
struct pt_regs *regs)
{
return IS_ERR_VALUE(regs->regs[9]) ? regs->regs[9] : 0;
}
static inline long syscall_get_return_value(struct task_struct *task,
struct pt_regs *regs)
{
return regs->regs[9];
}
static inline void syscall_set_return_value(struct task_struct *task,
struct pt_regs *regs,
int error, long val)
{
if (error)
regs->regs[9] = -error;
else
regs->regs[9] = val;
}
static inline void syscall_get_arguments(struct task_struct *task,
struct pt_regs *regs,
unsigned long *args)
{
memcpy(args, &regs->regs[2], 6 * sizeof(args[0]));
}
static inline void syscall_set_arguments(struct task_struct *task,
struct pt_regs *regs,
const unsigned long *args)
{
memcpy(&regs->regs[2], args, 6 * sizeof(args[0]));
}
static inline int syscall_get_arch(struct task_struct *task)
{
int arch = AUDIT_ARCH_SH;
#ifdef CONFIG_64BIT
arch |= __AUDIT_ARCH_64BIT;
#endif
#ifdef CONFIG_CPU_LITTLE_ENDIAN
arch |= __AUDIT_ARCH_LE;
#endif
return arch;
}
#endif /* __ASM_SH_SYSCALL_64_H */
......@@ -2,8 +2,6 @@
#ifndef __ASM_SH_SYSCALLS_H
#define __ASM_SH_SYSCALLS_H
#ifdef __KERNEL__
asmlinkage int old_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
int fd, unsigned long off);
......@@ -11,11 +9,6 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff);
#ifdef CONFIG_SUPERH32
# include <asm/syscalls_32.h>
#else
# include <asm/syscalls_64.h>
#endif
#include <asm/syscalls_32.h>
#endif /* __KERNEL__ */
#endif /* __ASM_SH_SYSCALLS_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_SYSCALLS_64_H
#define __ASM_SH_SYSCALLS_64_H
#ifdef __KERNEL__
#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/types.h>
struct pt_regs;
/* Misc syscall related bits */
asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs);
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs);
#endif /* __KERNEL__ */
#endif /* __ASM_SH_SYSCALLS_64_H */
......@@ -70,9 +70,7 @@ register unsigned long current_stack_pointer asm("r15") __used;
static inline struct thread_info *current_thread_info(void)
{
struct thread_info *ti;
#if defined(CONFIG_SUPERH64)
__asm__ __volatile__ ("getcon cr17, %0" : "=r" (ti));
#elif defined(CONFIG_CPU_HAS_SR_RB)
#if defined(CONFIG_CPU_HAS_SR_RB)
__asm__ __volatile__ ("stc r7_bank, %0" : "=r" (ti));
#else
unsigned long __dummy;
......
......@@ -2,10 +2,6 @@
#ifndef __ASM_SH_TLB_H
#define __ASM_SH_TLB_H
#ifdef CONFIG_SUPERH64
# include <asm/tlb_64.h>
#endif
#ifndef __ASSEMBLY__
#include <linux/pagemap.h>
......@@ -14,7 +10,7 @@
#include <asm-generic/tlb.h>
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64)
#if defined(CONFIG_CPU_SH4)
extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
extern void tlb_unwire_entry(void);
#else
......
/* SPDX-License-Identifier: GPL-2.0
*
* include/asm-sh/tlb_64.h
*
* Copyright (C) 2003 Paul Mundt
*/
#ifndef __ASM_SH_TLB_64_H
#define __ASM_SH_TLB_64_H
/* ITLB defines */
#define ITLB_FIXED 0x00000000 /* First fixed ITLB, see head.S */
#define ITLB_LAST_VAR_UNRESTRICTED 0x000003F0 /* Last ITLB */
/* DTLB defines */
#define DTLB_FIXED 0x00800000 /* First fixed DTLB, see head.S */
#define DTLB_LAST_VAR_UNRESTRICTED 0x008003F0 /* Last DTLB */
#ifndef __ASSEMBLY__
/**
* for_each_dtlb_entry - Iterate over free (non-wired) DTLB entries
*
* @tlb: TLB entry
*/
#define for_each_dtlb_entry(tlb) \
for (tlb = cpu_data->dtlb.first; \
tlb <= cpu_data->dtlb.last; \
tlb += cpu_data->dtlb.step)
/**
* for_each_itlb_entry - Iterate over free (non-wired) ITLB entries
*
* @tlb: TLB entry
*/
#define for_each_itlb_entry(tlb) \
for (tlb = cpu_data->itlb.first; \
tlb <= cpu_data->itlb.last; \
tlb += cpu_data->itlb.step)
/**
* __flush_tlb_slot - Flushes TLB slot @slot.
*
* @slot: Address of TLB slot.
*/
static inline void __flush_tlb_slot(unsigned long long slot)
{
__asm__ __volatile__ ("putcfg %0, 0, r63\n" : : "r" (slot));
}
#ifdef CONFIG_MMU
/* arch/sh64/mm/tlb.c */
int sh64_tlb_init(void);
unsigned long long sh64_next_free_dtlb_entry(void);
unsigned long long sh64_get_wired_dtlb_entry(void);
int sh64_put_wired_dtlb_entry(unsigned long long entry);
void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr,
unsigned long asid, unsigned long paddr);
void sh64_teardown_tlb_slot(unsigned long long config_addr);
#else
#define sh64_tlb_init() do { } while (0)
#define sh64_next_free_dtlb_entry() (0)
#define sh64_get_wired_dtlb_entry() (0)
#define sh64_put_wired_dtlb_entry(entry) do { } while (0)
#define sh64_setup_tlb_slot(conf, virt, asid, phys) do { } while (0)
#define sh64_teardown_tlb_slot(addr) do { } while (0)
#endif /* CONFIG_MMU */
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_TLB_64_H */
......@@ -4,11 +4,7 @@
#include <linux/compiler.h>
#ifdef CONFIG_SUPERH32
# include <asm/traps_32.h>
#else
# include <asm/traps_64.h>
#endif
BUILD_TRAP_HANDLER(address_error);
BUILD_TRAP_HANDLER(debug);
......
/* SPDX-License-Identifier: GPL-2.0
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*/
#ifndef __ASM_SH_TRAPS_64_H
#define __ASM_SH_TRAPS_64_H
#include <cpu/registers.h>
extern void phys_stext(void);
#define lookup_exception_vector() \
({ \
unsigned long _vec; \
\
__asm__ __volatile__ ( \
"getcon " __EXPEVT ", %0\n\t" \
: "=r" (_vec) \
); \
\
_vec; \
})
static inline void trigger_address_error(void)
{
phys_stext();
}
#define BUILD_TRAP_HANDLER(name) \
asmlinkage void name##_trap_handler(unsigned int vec, struct pt_regs *regs)
#define TRAP_HANDLER_DECL
#endif /* __ASM_SH_TRAPS_64_H */
......@@ -9,13 +9,8 @@
*/
#ifndef __ASSEMBLY__
#ifdef CONFIG_SUPERH32
typedef u16 insn_size_t;
typedef u32 reg_size_t;
#else
typedef u32 insn_size_t;
typedef u64 reg_size_t;
#endif
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_TYPES_H */
......@@ -96,11 +96,7 @@ struct __large_struct { unsigned long buf[100]; };
__pu_err; \
})
#ifdef CONFIG_SUPERH32
# include <asm/uaccess_32.h>
#else
# include <asm/uaccess_64.h>
#endif
extern long strncpy_from_user(char *dest, const char __user *src, long count);
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_UACCESS_64_H
#define __ASM_SH_UACCESS_64_H
/*
* include/asm-sh/uaccess_64.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
*
* User space memory access functions
*
* Copyright (C) 1999 Niibe Yutaka
*
* Based on:
* MIPS implementation version 1.15 by
* Copyright (C) 1996, 1997, 1998 by Ralf Baechle
* and i386 version.
*/
#define __get_user_size(x,ptr,size,retval) \
do { \
retval = 0; \
x = 0; \
switch (size) { \
case 1: \
retval = __get_user_asm_b((void *)&x, \
(long)ptr); \
break; \
case 2: \
retval = __get_user_asm_w((void *)&x, \
(long)ptr); \
break; \
case 4: \
retval = __get_user_asm_l((void *)&x, \
(long)ptr); \
break; \
case 8: \
retval = __get_user_asm_q((void *)&x, \
(long)ptr); \
break; \
default: \
__get_user_unknown(); \
break; \
} \
} while (0)
extern long __get_user_asm_b(void *, long);
extern long __get_user_asm_w(void *, long);
extern long __get_user_asm_l(void *, long);
extern long __get_user_asm_q(void *, long);
extern void __get_user_unknown(void);
#define __put_user_size(x,ptr,size,retval) \
do { \
retval = 0; \
switch (size) { \
case 1: \
retval = __put_user_asm_b((void *)&x, \
(__force long)ptr); \
break; \
case 2: \
retval = __put_user_asm_w((void *)&x, \
(__force long)ptr); \
break; \
case 4: \
retval = __put_user_asm_l((void *)&x, \
(__force long)ptr); \
break; \
case 8: \
retval = __put_user_asm_q((void *)&x, \
(__force long)ptr); \
break; \
default: \
__put_user_unknown(); \
} \
} while (0)
extern long __put_user_asm_b(void *, long);
extern long __put_user_asm_w(void *, long);
extern long __put_user_asm_l(void *, long);
extern long __put_user_asm_q(void *, long);
extern void __put_user_unknown(void);
#endif /* __ASM_SH_UACCESS_64_H */
/* SPDX-License-Identifier: GPL-2.0 */
# ifdef CONFIG_SUPERH32
# include <asm/unistd_32.h>
# else
# include <asm/unistd_64.h>
# endif
#include <asm/unistd_32.h>
#define NR_syscalls __NR_syscalls
......
......@@ -28,19 +28,12 @@
* to write an integer number of pages.
*/
#if defined(__SH5__) || defined(CONFIG_CPU_SH5)
struct user_fpu_struct {
unsigned long fp_regs[32];
unsigned int fpscr;
};
#else
struct user_fpu_struct {
unsigned long fp_regs[16];
unsigned long xfp_regs[16];
unsigned long fpscr;
unsigned long fpul;
};
#endif
struct user {
struct pt_regs regs; /* entire machine state */
......
......@@ -10,8 +10,6 @@
# define MODULE_PROC_FAMILY "SH3LE "
# elif defined CONFIG_CPU_SH4
# define MODULE_PROC_FAMILY "SH4LE "
# elif defined CONFIG_CPU_SH5
# define MODULE_PROC_FAMILY "SH5LE "
# else
# error unknown processor family
# endif
......@@ -22,8 +20,6 @@
# define MODULE_PROC_FAMILY "SH3BE "
# elif defined CONFIG_CPU_SH4
# define MODULE_PROC_FAMILY "SH4BE "
# elif defined CONFIG_CPU_SH5
# define MODULE_PROC_FAMILY "SH5BE "
# else
# error unknown processor family
# endif
......
......@@ -15,12 +15,4 @@
#define DWARF_EH_FRAME
#endif
#ifdef CONFIG_SUPERH64
#define EXTRA_TEXT \
*(.text64) \
*(.text..SHmedia32)
#else
#define EXTRA_TEXT
#endif
#endif /* __ASM_SH_VMLINUX_LDS_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_CPU_SH5_ADDRSPACE_H
#define __ASM_SH_CPU_SH5_ADDRSPACE_H
#define PHYS_PERIPHERAL_BLOCK 0x09000000
#define PHYS_DMAC_BLOCK 0x0e000000
#define PHYS_PCI_BLOCK 0x60000000
#define PHYS_EMI_BLOCK 0xff000000
/* No segmentation.. */
#endif /* __ASM_SH_CPU_SH5_ADDRSPACE_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_CPU_SH5_CACHE_H
#define __ASM_SH_CPU_SH5_CACHE_H
/*
* include/asm-sh/cpu-sh5/cache.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
*/
#define L1_CACHE_SHIFT 5
/* Valid and Dirty bits */
#define SH_CACHE_VALID (1LL<<0)
#define SH_CACHE_UPDATED (1LL<<57)
/* Unimplemented compat bits.. */
#define SH_CACHE_COMBINED 0
#define SH_CACHE_ASSOC 0
/* Cache flags */
#define SH_CACHE_MODE_WT (1LL<<0)
#define SH_CACHE_MODE_WB (1LL<<1)
/*
* Control Registers.
*/
#define ICCR_BASE 0x01600000 /* Instruction Cache Control Register */
#define ICCR_REG0 0 /* Register 0 offset */
#define ICCR_REG1 1 /* Register 1 offset */
#define ICCR0 ICCR_BASE+ICCR_REG0
#define ICCR1 ICCR_BASE+ICCR_REG1
#define ICCR0_OFF 0x0 /* Set ICACHE off */
#define ICCR0_ON 0x1 /* Set ICACHE on */
#define ICCR0_ICI 0x2 /* Invalidate all in IC */
#define ICCR1_NOLOCK 0x0 /* Set No Locking */
#define OCCR_BASE 0x01E00000 /* Operand Cache Control Register */
#define OCCR_REG0 0 /* Register 0 offset */
#define OCCR_REG1 1 /* Register 1 offset */
#define OCCR0 OCCR_BASE+OCCR_REG0
#define OCCR1 OCCR_BASE+OCCR_REG1
#define OCCR0_OFF 0x0 /* Set OCACHE off */
#define OCCR0_ON 0x1 /* Set OCACHE on */
#define OCCR0_OCI 0x2 /* Invalidate all in OC */
#define OCCR0_WT 0x4 /* Set OCACHE in WT Mode */
#define OCCR0_WB 0x0 /* Set OCACHE in WB Mode */
#define OCCR1_NOLOCK 0x0 /* Set No Locking */
/*
* SH-5
* A bit of description here, for neff=32.
*
* |<--- tag (19 bits) --->|
* +-----------------------------+-----------------+------+----------+------+
* | | | ways |set index |offset|
* +-----------------------------+-----------------+------+----------+------+
* ^ 2 bits 8 bits 5 bits
* +- Bit 31
*
* Cacheline size is based on offset: 5 bits = 32 bytes per line
* A cache line is identified by a tag + set but OCACHETAG/ICACHETAG
* have a broader space for registers. These are outlined by
* CACHE_?C_*_STEP below.
*
*/
/* Instruction cache */
#define CACHE_IC_ADDRESS_ARRAY 0x01000000
/* Operand Cache */
#define CACHE_OC_ADDRESS_ARRAY 0x01800000
/* These declarations relate to cache 'synonyms' in the operand cache. A
'synonym' occurs where effective address bits overlap between those used for
indexing the cache sets and those passed to the MMU for translation. In the
case of SH5-101 & SH5-103, only bit 12 is affected for 4k pages. */
#define CACHE_OC_N_SYNBITS 1 /* Number of synonym bits */
#define CACHE_OC_SYN_SHIFT 12
/* Mask to select synonym bit(s) */
#define CACHE_OC_SYN_MASK (((1UL<<CACHE_OC_N_SYNBITS)-1)<<CACHE_OC_SYN_SHIFT)
/*
* Instruction cache can't be invalidated based on physical addresses.
* No Instruction Cache defines required, then.
*/
#endif /* __ASM_SH_CPU_SH5_CACHE_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_CPU_SH5_IRQ_H
#define __ASM_SH_CPU_SH5_IRQ_H
/*
* include/asm-sh/cpu-sh5/irq.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*/
/*
* Encoded IRQs are not considered worth to be supported.
* Main reason is that there's no per-encoded-interrupt
* enable/disable mechanism (as there was in SH3/4).
* An all enabled/all disabled is worth only if there's
* a cascaded IC to disable/enable/ack on. Until such
* IC is available there's no such support.
*
* Presumably Encoded IRQs may use extra IRQs beyond 64,
* below. Some logic must be added to cope with IRQ_IRL?
* in an exclusive way.
*
* Priorities are set at Platform level, when IRQ_IRL0-3
* are set to 0 Encoding is allowed. Otherwise it's not
* allowed.
*/
/* Independent IRQs */
#define IRQ_IRL0 0
#define IRQ_IRL1 1
#define IRQ_IRL2 2
#define IRQ_IRL3 3
#define IRQ_INTA 4
#define IRQ_INTB 5
#define IRQ_INTC 6
#define IRQ_INTD 7
#define IRQ_SERR 12
#define IRQ_ERR 13
#define IRQ_PWR3 14
#define IRQ_PWR2 15
#define IRQ_PWR1 16
#define IRQ_PWR0 17
#define IRQ_DMTE0 18
#define IRQ_DMTE1 19
#define IRQ_DMTE2 20
#define IRQ_DMTE3 21
#define IRQ_DAERR 22
#define IRQ_TUNI0 32
#define IRQ_TUNI1 33
#define IRQ_TUNI2 34
#define IRQ_TICPI2 35
#define IRQ_ATI 36
#define IRQ_PRI 37
#define IRQ_CUI 38
#define IRQ_ERI 39
#define IRQ_RXI 40
#define IRQ_BRI 41
#define IRQ_TXI 42
#define IRQ_ITI 63
#define NR_INTC_IRQS 64
#ifdef CONFIG_SH_CAYMAN
#define NR_EXT_IRQS 32
#define START_EXT_IRQS 64
/* PCI bus 2 uses encoded external interrupts on the Cayman board */
#define IRQ_P2INTA (START_EXT_IRQS + (3*8) + 0)
#define IRQ_P2INTB (START_EXT_IRQS + (3*8) + 1)
#define IRQ_P2INTC (START_EXT_IRQS + (3*8) + 2)
#define IRQ_P2INTD (START_EXT_IRQS + (3*8) + 3)
#define I8042_KBD_IRQ (START_EXT_IRQS + 2)
#define I8042_AUX_IRQ (START_EXT_IRQS + 6)
#define IRQ_CFCARD (START_EXT_IRQS + 7)
#define IRQ_PCMCIA (0)
#else
#define NR_EXT_IRQS 0
#endif
/* Default IRQs, fixed */
#define TIMER_IRQ IRQ_TUNI0
#define RTC_IRQ IRQ_CUI
/* Default Priorities, Platform may choose differently */
#define NO_PRIORITY 0 /* Disabled */
#define TIMER_PRIORITY 2
#define RTC_PRIORITY TIMER_PRIORITY
#define SCIF_PRIORITY 3
#define INTD_PRIORITY 3
#define IRL3_PRIORITY 4
#define INTC_PRIORITY 6
#define IRL2_PRIORITY 7
#define INTB_PRIORITY 9
#define IRL1_PRIORITY 10
#define INTA_PRIORITY 12
#define IRL0_PRIORITY 13
#define TOP_PRIORITY 15
extern int intc_evt_to_irq[(0xE20/0x20)+1];
extern int platform_int_priority[NR_INTC_IRQS];
#endif /* __ASM_SH_CPU_SH5_IRQ_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_CPU_SH5_MMU_CONTEXT_H
#define __ASM_SH_CPU_SH5_MMU_CONTEXT_H
/* Common defines */
#define TLB_STEP 0x00000010
#define TLB_PTEH 0x00000000
#define TLB_PTEL 0x00000008
/* PTEH defines */
#define PTEH_ASID_SHIFT 2
#define PTEH_VALID 0x0000000000000001
#define PTEH_SHARED 0x0000000000000002
#define PTEH_MATCH_ASID 0x00000000000003ff
#ifndef __ASSEMBLY__
/* This has to be a common function because the next location to fill
* information is shared. */
extern void __do_tlb_refill(unsigned long address, unsigned long long is_text_not_data, pte_t *pte);
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_CPU_SH5_MMU_CONTEXT_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_CPU_SH5_REGISTERS_H
#define __ASM_SH_CPU_SH5_REGISTERS_H
/*
* include/asm-sh/cpu-sh5/registers.h
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2004 Richard Curnow
*/
#ifdef __ASSEMBLY__
/* =====================================================================
**
** Section 1: acts on assembly sources pre-processed by GPP ( <source.S>).
** Assigns symbolic names to control & target registers.
*/
/*
* Define some useful aliases for control registers.
*/
#define SR cr0
#define SSR cr1
#define PSSR cr2
/* cr3 UNDEFINED */
#define INTEVT cr4
#define EXPEVT cr5
#define PEXPEVT cr6
#define TRA cr7
#define SPC cr8
#define PSPC cr9
#define RESVEC cr10
#define VBR cr11
/* cr12 UNDEFINED */
#define TEA cr13
/* cr14-cr15 UNDEFINED */
#define DCR cr16
#define KCR0 cr17
#define KCR1 cr18
/* cr19-cr31 UNDEFINED */
/* cr32-cr61 RESERVED */
#define CTC cr62
#define USR cr63
/*
* ABI dependent registers (general purpose set)
*/
#define RET r2
#define ARG1 r2
#define ARG2 r3
#define ARG3 r4
#define ARG4 r5
#define ARG5 r6
#define ARG6 r7
#define SP r15
#define LINK r18
#define ZERO r63
/*
* Status register defines: used only by assembly sources (and
* syntax independednt)
*/
#define SR_RESET_VAL 0x0000000050008000
#define SR_HARMLESS 0x00000000500080f0 /* Write ignores for most */
#define SR_ENABLE_FPU 0xffffffffffff7fff /* AND with this */
#if defined (CONFIG_SH64_SR_WATCH)
#define SR_ENABLE_MMU 0x0000000084000000 /* OR with this */
#else
#define SR_ENABLE_MMU 0x0000000080000000 /* OR with this */
#endif
#define SR_UNBLOCK_EXC 0xffffffffefffffff /* AND with this */
#define SR_BLOCK_EXC 0x0000000010000000 /* OR with this */
#else /* Not __ASSEMBLY__ syntax */
/*
** Stringify reg. name
*/
#define __str(x) #x
/* Stringify control register names for use in inline assembly */
#define __SR __str(SR)
#define __SSR __str(SSR)
#define __PSSR __str(PSSR)
#define __INTEVT __str(INTEVT)
#define __EXPEVT __str(EXPEVT)
#define __PEXPEVT __str(PEXPEVT)
#define __TRA __str(TRA)
#define __SPC __str(SPC)
#define __PSPC __str(PSPC)
#define __RESVEC __str(RESVEC)
#define __VBR __str(VBR)
#define __TEA __str(TEA)
#define __DCR __str(DCR)
#define __KCR0 __str(KCR0)
#define __KCR1 __str(KCR1)
#define __CTC __str(CTC)
#define __USR __str(USR)
#endif /* __ASSEMBLY__ */
#endif /* __ASM_SH_CPU_SH5_REGISTERS_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_SH_CPU_SH5_RTC_H
#define __ASM_SH_CPU_SH5_RTC_H
#define rtc_reg_size sizeof(u32)
#define RTC_BIT_INVERTED 0 /* The SH-5 RTC is surprisingly sane! */
#define RTC_DEF_CAPABILITIES RTC_CAP_4_DIGIT_YEAR
#endif /* __ASM_SH_CPU_SH5_RTC_H */
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __KERNEL__
# ifdef __SH5__
# include <asm/posix_types_64.h>
# else
# include <asm/posix_types_32.h>
# endif
#endif /* __KERNEL__ */
#include <asm/posix_types_32.h>
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __ASM_SH_POSIX_TYPES_64_H
#define __ASM_SH_POSIX_TYPES_64_H
typedef unsigned short __kernel_mode_t;
#define __kernel_mode_t __kernel_mode_t
typedef unsigned short __kernel_ipc_pid_t;
#define __kernel_ipc_pid_t __kernel_ipc_pid_t
typedef unsigned short __kernel_uid_t;
#define __kernel_uid_t __kernel_uid_t
typedef unsigned short __kernel_gid_t;
#define __kernel_gid_t __kernel_gid_t
typedef long unsigned int __kernel_size_t;
#define __kernel_size_t __kernel_size_t
typedef int __kernel_ssize_t;
#define __kernel_ssize_t __kernel_ssize_t
typedef int __kernel_ptrdiff_t;
#define __kernel_ptrdiff_t __kernel_ptrdiff_t
typedef unsigned short __kernel_old_uid_t;
#define __kernel_old_uid_t __kernel_old_uid_t
typedef unsigned short __kernel_old_gid_t;
#define __kernel_old_gid_t __kernel_old_gid_t
typedef unsigned short __kernel_old_dev_t;
#define __kernel_old_dev_t __kernel_old_dev_t
#include <asm-generic/posix_types.h>
#endif /* __ASM_SH_POSIX_TYPES_64_H */
......@@ -25,11 +25,6 @@
#define PT_DATA_ADDR 248 /* &(struct user)->start_data */
#define PT_TEXT_LEN 252
#if defined(__SH5__) || defined(CONFIG_CPU_SH5)
#include <asm/ptrace_64.h>
#else
#include <asm/ptrace_32.h>
#endif
#endif /* _UAPI__ASM_SH_PTRACE_H */
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _UAPI__ASM_SH_PTRACE_64_H
#define _UAPI__ASM_SH_PTRACE_64_H
struct pt_regs {
unsigned long long pc;
unsigned long long sr;
long long syscall_nr;
unsigned long long regs[63];
unsigned long long tregs[8];
unsigned long long pad[2];
};
#endif /* _UAPI__ASM_SH_PTRACE_64_H */
......@@ -5,18 +5,6 @@
struct sigcontext {
unsigned long oldmask;
#if defined(__SH5__) || defined(CONFIG_CPU_SH5)
/* CPU registers */
unsigned long long sc_regs[63];
unsigned long long sc_tregs[8];
unsigned long long sc_pc;
unsigned long long sc_sr;
/* FPU registers */
unsigned long long sc_fpregs[32];
unsigned int sc_fpscr;
unsigned int sc_fpvalid;
#else
/* CPU registers */
unsigned long sc_regs[16];
unsigned long sc_pc;
......@@ -32,7 +20,6 @@ struct sigcontext {
unsigned int sc_fpscr;
unsigned int sc_fpul;
unsigned int sc_ownedfp;
#endif
};
#endif /* __ASM_SH_SIGCONTEXT_H */
......@@ -16,66 +16,6 @@ struct __old_kernel_stat {
unsigned long st_ctime;
};
#if defined(__SH5__) || defined(CONFIG_CPU_SH5)
struct stat {
unsigned short st_dev;
unsigned short __pad1;
unsigned long st_ino;
unsigned short st_mode;
unsigned short st_nlink;
unsigned short st_uid;
unsigned short st_gid;
unsigned short st_rdev;
unsigned short __pad2;
unsigned long st_size;
unsigned long st_blksize;
unsigned long st_blocks;
unsigned long st_atime;
unsigned long st_atime_nsec;
unsigned long st_mtime;
unsigned long st_mtime_nsec;
unsigned long st_ctime;
unsigned long st_ctime_nsec;
unsigned long __unused4;
unsigned long __unused5;
};
/* This matches struct stat64 in glibc2.1, hence the absolutely
* insane amounts of padding around dev_t's.
*/
struct stat64 {
unsigned short st_dev;
unsigned char __pad0[10];
unsigned long st_ino;
unsigned int st_mode;
unsigned int st_nlink;
unsigned long st_uid;
unsigned long st_gid;
unsigned short st_rdev;
unsigned char __pad3[10];
long long st_size;
unsigned long st_blksize;
unsigned long st_blocks; /* Number 512-byte blocks allocated. */
unsigned long __pad4; /* future possible st_blocks high bits */
unsigned long st_atime;
unsigned long st_atime_nsec;
unsigned long st_mtime;
unsigned long st_mtime_nsec;
unsigned long st_ctime;
unsigned long st_ctime_nsec; /* will be high 32 bits of ctime someday */
unsigned long __unused1;
unsigned long __unused2;
};
#else
struct stat {
unsigned long st_dev;
unsigned long st_ino;
......@@ -134,6 +74,5 @@ struct stat64 {
};
#define STAT_HAVE_NSEC 1
#endif
#endif /* __ASM_SH_STAT_H */
......@@ -13,14 +13,9 @@
static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
{
__asm__(
#ifdef __SH5__
"byterev %1, %0\n\t"
"shari %0, 32, %0"
#else
"swap.b %1, %0\n\t"
"swap.w %0, %0\n\t"
"swap.b %0, %0"
#endif
: "=r" (x)
: "r" (x));
......@@ -31,12 +26,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
{
__asm__(
#ifdef __SH5__
"byterev %1, %0\n\t"
"shari %0, 32, %0"
#else
"swap.b %1, %0"
#endif
: "=r" (x)
: "r" (x));
......
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __KERNEL__
# ifdef __SH5__
# include <asm/unistd_64.h>
# else
# include <asm/unistd_32.h>
# endif
#endif
#include <asm/unistd_32.h>
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef __ASM_SH_UNISTD_64_H
#define __ASM_SH_UNISTD_64_H
/*
* include/asm-sh/unistd_64.h
*
* This file contains the system call numbers.
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 - 2007 Paul Mundt
* Copyright (C) 2004 Sean McGoogan
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#define __NR_restart_syscall 0
#define __NR_exit 1
#define __NR_fork 2
#define __NR_read 3
#define __NR_write 4
#define __NR_open 5
#define __NR_close 6
#define __NR_waitpid 7
#define __NR_creat 8
#define __NR_link 9
#define __NR_unlink 10
#define __NR_execve 11
#define __NR_chdir 12
#define __NR_time 13
#define __NR_mknod 14
#define __NR_chmod 15
#define __NR_lchown 16
/* 17 was sys_break */
#define __NR_oldstat 18
#define __NR_lseek 19
#define __NR_getpid 20
#define __NR_mount 21
#define __NR_umount 22
#define __NR_setuid 23
#define __NR_getuid 24
#define __NR_stime 25
#define __NR_ptrace 26
#define __NR_alarm 27
#define __NR_oldfstat 28
#define __NR_pause 29
#define __NR_utime 30
/* 31 was sys_stty */
/* 32 was sys_gtty */
#define __NR_access 33
#define __NR_nice 34
/* 35 was sys_ftime */
#define __NR_sync 36
#define __NR_kill 37
#define __NR_rename 38
#define __NR_mkdir 39
#define __NR_rmdir 40
#define __NR_dup 41
#define __NR_pipe 42
#define __NR_times 43
/* 44 was sys_prof */
#define __NR_brk 45
#define __NR_setgid 46
#define __NR_getgid 47
#define __NR_signal 48
#define __NR_geteuid 49
#define __NR_getegid 50
#define __NR_acct 51
#define __NR_umount2 52
/* 53 was sys_lock */
#define __NR_ioctl 54
#define __NR_fcntl 55
/* 56 was sys_mpx */
#define __NR_setpgid 57
/* 58 was sys_ulimit */
/* 59 was sys_olduname */
#define __NR_umask 60
#define __NR_chroot 61
#define __NR_ustat 62
#define __NR_dup2 63
#define __NR_getppid 64
#define __NR_getpgrp 65
#define __NR_setsid 66
#define __NR_sigaction 67
#define __NR_sgetmask 68
#define __NR_ssetmask 69
#define __NR_setreuid 70
#define __NR_setregid 71
#define __NR_sigsuspend 72
#define __NR_sigpending 73
#define __NR_sethostname 74
#define __NR_setrlimit 75
#define __NR_getrlimit 76 /* Back compatible 2Gig limited rlimit */
#define __NR_getrusage 77
#define __NR_gettimeofday 78
#define __NR_settimeofday 79
#define __NR_getgroups 80
#define __NR_setgroups 81
/* 82 was sys_select */
#define __NR_symlink 83
#define __NR_oldlstat 84
#define __NR_readlink 85
#define __NR_uselib 86
#define __NR_swapon 87
#define __NR_reboot 88
#define __NR_readdir 89
#define __NR_mmap 90
#define __NR_munmap 91
#define __NR_truncate 92
#define __NR_ftruncate 93
#define __NR_fchmod 94
#define __NR_fchown 95
#define __NR_getpriority 96
#define __NR_setpriority 97
/* 98 was sys_profil */
#define __NR_statfs 99
#define __NR_fstatfs 100
/* 101 was sys_ioperm */
#define __NR_socketcall 102 /* old implementation of socket systemcall */
#define __NR_syslog 103
#define __NR_setitimer 104
#define __NR_getitimer 105
#define __NR_stat 106
#define __NR_lstat 107
#define __NR_fstat 108
#define __NR_olduname 109
/* 110 was sys_iopl */
#define __NR_vhangup 111
/* 112 was sys_idle */
/* 113 was sys_vm86old */
#define __NR_wait4 114
#define __NR_swapoff 115
#define __NR_sysinfo 116
#define __NR_ipc 117
#define __NR_fsync 118
#define __NR_sigreturn 119
#define __NR_clone 120
#define __NR_setdomainname 121
#define __NR_uname 122
#define __NR_cacheflush 123
#define __NR_adjtimex 124
#define __NR_mprotect 125
#define __NR_sigprocmask 126
/* 127 was sys_create_module */
#define __NR_init_module 128
#define __NR_delete_module 129
/* 130 was sys_get_kernel_syms */
#define __NR_quotactl 131
#define __NR_getpgid 132
#define __NR_fchdir 133
#define __NR_bdflush 134
#define __NR_sysfs 135
#define __NR_personality 136
/* 137 was sys_afs_syscall */
#define __NR_setfsuid 138
#define __NR_setfsgid 139
#define __NR__llseek 140
#define __NR_getdents 141
#define __NR__newselect 142
#define __NR_flock 143
#define __NR_msync 144
#define __NR_readv 145
#define __NR_writev 146
#define __NR_getsid 147
#define __NR_fdatasync 148
#define __NR__sysctl 149
#define __NR_mlock 150
#define __NR_munlock 151
#define __NR_mlockall 152
#define __NR_munlockall 153
#define __NR_sched_setparam 154
#define __NR_sched_getparam 155
#define __NR_sched_setscheduler 156
#define __NR_sched_getscheduler 157
#define __NR_sched_yield 158
#define __NR_sched_get_priority_max 159
#define __NR_sched_get_priority_min 160
#define __NR_sched_rr_get_interval 161
#define __NR_nanosleep 162
#define __NR_mremap 163
#define __NR_setresuid 164
#define __NR_getresuid 165
/* 166 was sys_vm86 */
/* 167 was sys_query_module */
#define __NR_poll 168
#define __NR_nfsservctl 169
#define __NR_setresgid 170
#define __NR_getresgid 171
#define __NR_prctl 172
#define __NR_rt_sigreturn 173
#define __NR_rt_sigaction 174
#define __NR_rt_sigprocmask 175
#define __NR_rt_sigpending 176
#define __NR_rt_sigtimedwait 177
#define __NR_rt_sigqueueinfo 178
#define __NR_rt_sigsuspend 179
#define __NR_pread64 180
#define __NR_pwrite64 181
#define __NR_chown 182
#define __NR_getcwd 183
#define __NR_capget 184
#define __NR_capset 185
#define __NR_sigaltstack 186
#define __NR_sendfile 187
/* 188 reserved for getpmsg */
/* 189 reserved for putpmsg */
#define __NR_vfork 190
#define __NR_ugetrlimit 191 /* SuS compliant getrlimit */
#define __NR_mmap2 192
#define __NR_truncate64 193
#define __NR_ftruncate64 194
#define __NR_stat64 195
#define __NR_lstat64 196
#define __NR_fstat64 197
#define __NR_lchown32 198
#define __NR_getuid32 199
#define __NR_getgid32 200
#define __NR_geteuid32 201
#define __NR_getegid32 202
#define __NR_setreuid32 203
#define __NR_setregid32 204
#define __NR_getgroups32 205
#define __NR_setgroups32 206
#define __NR_fchown32 207
#define __NR_setresuid32 208
#define __NR_getresuid32 209
#define __NR_setresgid32 210
#define __NR_getresgid32 211
#define __NR_chown32 212
#define __NR_setuid32 213
#define __NR_setgid32 214
#define __NR_setfsuid32 215
#define __NR_setfsgid32 216
#define __NR_pivot_root 217
#define __NR_mincore 218
#define __NR_madvise 219
/* Non-multiplexed socket family */
#define __NR_socket 220
#define __NR_bind 221
#define __NR_connect 222
#define __NR_listen 223
#define __NR_accept 224
#define __NR_getsockname 225
#define __NR_getpeername 226
#define __NR_socketpair 227
#define __NR_send 228
#define __NR_sendto 229
#define __NR_recv 230
#define __NR_recvfrom 231
#define __NR_shutdown 232
#define __NR_setsockopt 233
#define __NR_getsockopt 234
#define __NR_sendmsg 235
#define __NR_recvmsg 236
/* Non-multiplexed IPC family */
#define __NR_semop 237
#define __NR_semget 238
#define __NR_semctl 239
#define __NR_msgsnd 240
#define __NR_msgrcv 241
#define __NR_msgget 242
#define __NR_msgctl 243
#define __NR_shmat 244
#define __NR_shmdt 245
#define __NR_shmget 246
#define __NR_shmctl 247
#define __NR_getdents64 248
#define __NR_fcntl64 249
/* 250 is reserved for tux */
/* 251 is unused */
#define __NR_gettid 252
#define __NR_readahead 253
#define __NR_setxattr 254
#define __NR_lsetxattr 255
#define __NR_fsetxattr 256
#define __NR_getxattr 257
#define __NR_lgetxattr 258
#define __NR_fgetxattr 259
#define __NR_listxattr 260
#define __NR_llistxattr 261
#define __NR_flistxattr 262
#define __NR_removexattr 263
#define __NR_lremovexattr 264
#define __NR_fremovexattr 265
#define __NR_tkill 266
#define __NR_sendfile64 267
#define __NR_futex 268
#define __NR_sched_setaffinity 269
#define __NR_sched_getaffinity 270
/* 271 is reserved for set_thread_area */
/* 272 is reserved for get_thread_area */
#define __NR_io_setup 273
#define __NR_io_destroy 274
#define __NR_io_getevents 275
#define __NR_io_submit 276
#define __NR_io_cancel 277
#define __NR_fadvise64 278
/* 279 is unused */
#define __NR_exit_group 280
#define __NR_lookup_dcookie 281
#define __NR_epoll_create 282
#define __NR_epoll_ctl 283
#define __NR_epoll_wait 284
#define __NR_remap_file_pages 285
#define __NR_set_tid_address 286
#define __NR_timer_create 287
#define __NR_timer_settime (__NR_timer_create+1)
#define __NR_timer_gettime (__NR_timer_create+2)
#define __NR_timer_getoverrun (__NR_timer_create+3)
#define __NR_timer_delete (__NR_timer_create+4)
#define __NR_clock_settime (__NR_timer_create+5)
#define __NR_clock_gettime (__NR_timer_create+6)
#define __NR_clock_getres (__NR_timer_create+7)
#define __NR_clock_nanosleep (__NR_timer_create+8)
#define __NR_statfs64 296
#define __NR_fstatfs64 297
#define __NR_tgkill 298
#define __NR_utimes 299
#define __NR_fadvise64_64 300
/* 301 is reserved for vserver */
/* 302 is reserved for mbind */
/* 303 is reserved for get_mempolicy */
/* 304 is reserved for set_mempolicy */
#define __NR_mq_open 305
#define __NR_mq_unlink (__NR_mq_open+1)
#define __NR_mq_timedsend (__NR_mq_open+2)
#define __NR_mq_timedreceive (__NR_mq_open+3)
#define __NR_mq_notify (__NR_mq_open+4)
#define __NR_mq_getsetattr (__NR_mq_open+5)
/* 311 is reserved for kexec */
#define __NR_waitid 312
#define __NR_add_key 313
#define __NR_request_key 314
#define __NR_keyctl 315
#define __NR_ioprio_set 316
#define __NR_ioprio_get 317
#define __NR_inotify_init 318
#define __NR_inotify_add_watch 319
#define __NR_inotify_rm_watch 320
/* 321 is unused */
#define __NR_migrate_pages 322
#define __NR_openat 323
#define __NR_mkdirat 324
#define __NR_mknodat 325
#define __NR_fchownat 326
#define __NR_futimesat 327
#define __NR_fstatat64 328
#define __NR_unlinkat 329
#define __NR_renameat 330
#define __NR_linkat 331
#define __NR_symlinkat 332
#define __NR_readlinkat 333
#define __NR_fchmodat 334
#define __NR_faccessat 335
#define __NR_pselect6 336
#define __NR_ppoll 337
#define __NR_unshare 338
#define __NR_set_robust_list 339
#define __NR_get_robust_list 340
#define __NR_splice 341
#define __NR_sync_file_range 342
#define __NR_tee 343
#define __NR_vmsplice 344
#define __NR_move_pages 345
#define __NR_getcpu 346
#define __NR_epoll_pwait 347
#define __NR_utimensat 348
#define __NR_signalfd 349
#define __NR_timerfd_create 350
#define __NR_eventfd 351
#define __NR_fallocate 352
#define __NR_timerfd_settime 353
#define __NR_timerfd_gettime 354
#define __NR_signalfd4 355
#define __NR_eventfd2 356
#define __NR_epoll_create1 357
#define __NR_dup3 358
#define __NR_pipe2 359
#define __NR_inotify_init1 360
#define __NR_preadv 361
#define __NR_pwritev 362
#define __NR_rt_tgsigqueueinfo 363
#define __NR_perf_event_open 364
#define __NR_recvmmsg 365
#define __NR_accept4 366
#define __NR_fanotify_init 367
#define __NR_fanotify_mark 368
#define __NR_prlimit64 369
#define __NR_name_to_handle_at 370
#define __NR_open_by_handle_at 371
#define __NR_clock_adjtime 372
#define __NR_syncfs 373
#define __NR_sendmmsg 374
#define __NR_setns 375
#define __NR_process_vm_readv 376
#define __NR_process_vm_writev 377
#define __NR_kcmp 378
#define __NR_finit_module 379
#define __NR_sched_getattr 380
#define __NR_sched_setattr 381
#define __NR_renameat2 382
#define __NR_seccomp 383
#define __NR_getrandom 384
#define __NR_memfd_create 385
#define __NR_bpf 386
#define __NR_execveat 387
#define __NR_userfaultfd 388
#define __NR_membarrier 389
#define __NR_mlock2 390
#define __NR_copy_file_range 391
#define __NR_preadv2 392
#define __NR_pwritev2 393
#ifdef __KERNEL__
#define __NR_syscalls 394
#endif
#endif /* __ASM_SH_UNISTD_64_H */
......@@ -3,7 +3,7 @@
# Makefile for the Linux/SuperH kernel.
#
extra-y := head_$(BITS).o vmlinux.lds
extra-y := head_32.o vmlinux.lds
ifdef CONFIG_FUNCTION_TRACER
# Do not profile debug and lowlevel utilities
......@@ -13,26 +13,26 @@ endif
CFLAGS_REMOVE_return_address.o = -pg
obj-y := debugtraps.o dumpstack.o \
idle.o io.o irq.o irq_$(BITS).o kdebugfs.o \
idle.o io.o irq.o irq_32.o kdebugfs.o \
machvec.o nmi_debug.o process.o \
process_$(BITS).o ptrace.o ptrace_$(BITS).o \
process_32.o ptrace.o ptrace_32.o \
reboot.o return_address.o \
setup.o signal_$(BITS).o sys_sh.o \
syscalls_$(BITS).o time.o topology.o traps.o \
traps_$(BITS).o unwinder.o
setup.o signal_32.o sys_sh.o \
syscalls_32.o time.o topology.o traps.o \
traps_32.o unwinder.o
ifndef CONFIG_GENERIC_IOMAP
obj-y += iomap.o
obj-$(CONFIG_HAS_IOPORT_MAP) += ioport.o
endif
obj-$(CONFIG_SUPERH32) += sys_sh32.o
obj-y += sys_sh32.o
obj-y += cpu/
obj-$(CONFIG_VSYSCALL) += vsyscall/
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o
obj-$(CONFIG_MODULES) += sh_ksyms_32.o module.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
......
......@@ -7,7 +7,6 @@ obj-$(CONFIG_CPU_SH2) = sh2/
obj-$(CONFIG_CPU_SH2A) = sh2a/
obj-$(CONFIG_CPU_SH3) = sh3/
obj-$(CONFIG_CPU_SH4) = sh4/
obj-$(CONFIG_CPU_SH5) = sh5/
# Special cases for family ancestry.
......
......@@ -103,7 +103,7 @@ void __attribute__ ((weak)) l2_cache_init(void)
/*
* Generic first-level cache init
*/
#if defined(CONFIG_SUPERH32) && !defined(CONFIG_CPU_J2)
#if !defined(CONFIG_CPU_J2)
static void cache_init(void)
{
unsigned long ccr, flags;
......
......@@ -2,6 +2,5 @@
#
# Makefile for the Linux/SuperH CPU-specific IRQ handlers.
#
obj-$(CONFIG_SUPERH32) += imask.o
obj-$(CONFIG_CPU_SH5) += intc-sh5.o
obj-y += imask.o
obj-$(CONFIG_CPU_HAS_IPR_IRQ) += ipr.o
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/irq/intc-sh5.c
*
* Interrupt Controller support for SH5 INTC.
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Paul Mundt
*
* Per-interrupt selective. IRLM=0 (Fixed priority) is not
* supported being useless without a cascaded interrupt
* controller.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <cpu/irq.h>
#include <asm/page.h>
/*
* Maybe the generic Peripheral block could move to a more
* generic include file. INTC Block will be defined here
* and only here to make INTC self-contained in a single
* file.
*/
#define INTC_BLOCK_OFFSET 0x01000000
/* Base */
#define INTC_BASE PHYS_PERIPHERAL_BLOCK + \
INTC_BLOCK_OFFSET
/* Address */
#define INTC_ICR_SET (intc_virt + 0x0)
#define INTC_ICR_CLEAR (intc_virt + 0x8)
#define INTC_INTPRI_0 (intc_virt + 0x10)
#define INTC_INTSRC_0 (intc_virt + 0x50)
#define INTC_INTSRC_1 (intc_virt + 0x58)
#define INTC_INTREQ_0 (intc_virt + 0x60)
#define INTC_INTREQ_1 (intc_virt + 0x68)
#define INTC_INTENB_0 (intc_virt + 0x70)
#define INTC_INTENB_1 (intc_virt + 0x78)
#define INTC_INTDSB_0 (intc_virt + 0x80)
#define INTC_INTDSB_1 (intc_virt + 0x88)
#define INTC_ICR_IRLM 0x1
#define INTC_INTPRI_PREGS 8 /* 8 Priority Registers */
#define INTC_INTPRI_PPREG 8 /* 8 Priorities per Register */
/*
* Mapper between the vector ordinal and the IRQ number
* passed to kernel/device drivers.
*/
int intc_evt_to_irq[(0xE20/0x20)+1] = {
-1, -1, -1, -1, -1, -1, -1, -1, /* 0x000 - 0x0E0 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 0x100 - 0x1E0 */
0, 0, 0, 0, 0, 1, 0, 0, /* 0x200 - 0x2E0 */
2, 0, 0, 3, 0, 0, 0, -1, /* 0x300 - 0x3E0 */
32, 33, 34, 35, 36, 37, 38, -1, /* 0x400 - 0x4E0 */
-1, -1, -1, 63, -1, -1, -1, -1, /* 0x500 - 0x5E0 */
-1, -1, 18, 19, 20, 21, 22, -1, /* 0x600 - 0x6E0 */
39, 40, 41, 42, -1, -1, -1, -1, /* 0x700 - 0x7E0 */
4, 5, 6, 7, -1, -1, -1, -1, /* 0x800 - 0x8E0 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 0x900 - 0x9E0 */
12, 13, 14, 15, 16, 17, -1, -1, /* 0xA00 - 0xAE0 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 0xB00 - 0xBE0 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 0xC00 - 0xCE0 */
-1, -1, -1, -1, -1, -1, -1, -1, /* 0xD00 - 0xDE0 */
-1, -1 /* 0xE00 - 0xE20 */
};
static unsigned long intc_virt;
static int irlm; /* IRL mode */
static void enable_intc_irq(struct irq_data *data)
{
unsigned int irq = data->irq;
unsigned long reg;
unsigned long bitmask;
if ((irq <= IRQ_IRL3) && (irlm == NO_PRIORITY))
printk("Trying to use straight IRL0-3 with an encoding platform.\n");
if (irq < 32) {
reg = INTC_INTENB_0;
bitmask = 1 << irq;
} else {
reg = INTC_INTENB_1;
bitmask = 1 << (irq - 32);
}
__raw_writel(bitmask, reg);
}
static void disable_intc_irq(struct irq_data *data)
{
unsigned int irq = data->irq;
unsigned long reg;
unsigned long bitmask;
if (irq < 32) {
reg = INTC_INTDSB_0;
bitmask = 1 << irq;
} else {
reg = INTC_INTDSB_1;
bitmask = 1 << (irq - 32);
}
__raw_writel(bitmask, reg);
}
static struct irq_chip intc_irq_type = {
.name = "INTC",
.irq_enable = enable_intc_irq,
.irq_disable = disable_intc_irq,
};
void __init plat_irq_setup(void)
{
unsigned long long __dummy0, __dummy1=~0x00000000100000f0;
unsigned long reg;
int i;
intc_virt = (unsigned long)ioremap(INTC_BASE, 1024);
if (!intc_virt) {
panic("Unable to remap INTC\n");
}
/* Set default: per-line enable/disable, priority driven ack/eoi */
for (i = 0; i < NR_INTC_IRQS; i++)
irq_set_chip_and_handler(i, &intc_irq_type, handle_level_irq);
/* Disable all interrupts and set all priorities to 0 to avoid trouble */
__raw_writel(-1, INTC_INTDSB_0);
__raw_writel(-1, INTC_INTDSB_1);
for (reg = INTC_INTPRI_0, i = 0; i < INTC_INTPRI_PREGS; i++, reg += 8)
__raw_writel( NO_PRIORITY, reg);
#ifdef CONFIG_SH_CAYMAN
{
unsigned long data;
/* Set IRLM */
/* If all the priorities are set to 'no priority', then
* assume we are using encoded mode.
*/
irlm = platform_int_priority[IRQ_IRL0] +
platform_int_priority[IRQ_IRL1] +
platform_int_priority[IRQ_IRL2] +
platform_int_priority[IRQ_IRL3];
if (irlm == NO_PRIORITY) {
/* IRLM = 0 */
reg = INTC_ICR_CLEAR;
i = IRQ_INTA;
printk("Trying to use encoded IRL0-3. IRLs unsupported.\n");
} else {
/* IRLM = 1 */
reg = INTC_ICR_SET;
i = IRQ_IRL0;
}
__raw_writel(INTC_ICR_IRLM, reg);
/* Set interrupt priorities according to platform description */
for (data = 0, reg = INTC_INTPRI_0; i < NR_INTC_IRQS; i++) {
data |= platform_int_priority[i] <<
((i % INTC_INTPRI_PPREG) * 4);
if ((i % INTC_INTPRI_PPREG) == (INTC_INTPRI_PPREG - 1)) {
/* Upon the 7th, set Priority Register */
__raw_writel(data, reg);
data = 0;
reg += 8;
}
}
}
#endif
/*
* And now let interrupts come in.
* sti() is not enough, we need to
* lower priority, too.
*/
__asm__ __volatile__("getcon " __SR ", %0\n\t"
"and %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy0)
: "r" (__dummy1));
}
......@@ -24,7 +24,6 @@ static const char *cpu_name[] = {
[CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785",
[CPU_SH7786] = "SH7786", [CPU_SH7757] = "SH7757",
[CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3",
[CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103",
[CPU_MXG] = "MX-G", [CPU_SH7723] = "SH7723",
[CPU_SH7366] = "SH7366", [CPU_SH7724] = "SH7724",
[CPU_SH7372] = "SH7372", [CPU_SH7734] = "SH7734",
......
# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the Linux/SuperH SH-5 backends.
#
obj-y := entry.o probe.o switchto.o
obj-$(CONFIG_SH_FPU) += fpu.o
obj-$(CONFIG_KALLSYMS) += unwind.o
# CPU subtype setup
obj-$(CONFIG_CPU_SH5) += setup-sh5.o
# Primary on-chip clocks (common)
clock-$(CONFIG_CPU_SH5) := clock-sh5.o
obj-y += $(clock-y)
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh5/clock-sh5.c
*
* SH-5 support for the clock framework
*
* Copyright (C) 2008 Paul Mundt
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/clock.h>
#include <asm/io.h>
static int ifc_table[] = { 2, 4, 6, 8, 10, 12, 16, 24 };
/* Clock, Power and Reset Controller */
#define CPRC_BLOCK_OFF 0x01010000
#define CPRC_BASE (PHYS_PERIPHERAL_BLOCK + CPRC_BLOCK_OFF)
static unsigned long cprc_base;
static void master_clk_init(struct clk *clk)
{
int idx = (__raw_readl(cprc_base + 0x00) >> 6) & 0x0007;
clk->rate *= ifc_table[idx];
}
static struct sh_clk_ops sh5_master_clk_ops = {
.init = master_clk_init,
};
static unsigned long module_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(cprc_base) >> 12) & 0x0007;
return clk->parent->rate / ifc_table[idx];
}
static struct sh_clk_ops sh5_module_clk_ops = {
.recalc = module_clk_recalc,
};
static unsigned long bus_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(cprc_base) >> 3) & 0x0007;
return clk->parent->rate / ifc_table[idx];
}
static struct sh_clk_ops sh5_bus_clk_ops = {
.recalc = bus_clk_recalc,
};
static unsigned long cpu_clk_recalc(struct clk *clk)
{
int idx = (__raw_readw(cprc_base) & 0x0007);
return clk->parent->rate / ifc_table[idx];
}
static struct sh_clk_ops sh5_cpu_clk_ops = {
.recalc = cpu_clk_recalc,
};
static struct sh_clk_ops *sh5_clk_ops[] = {
&sh5_master_clk_ops,
&sh5_module_clk_ops,
&sh5_bus_clk_ops,
&sh5_cpu_clk_ops,
};
void __init arch_init_clk_ops(struct sh_clk_ops **ops, int idx)
{
cprc_base = (unsigned long)ioremap(CPRC_BASE, 1024);
BUG_ON(!cprc_base);
if (idx < ARRAY_SIZE(sh5_clk_ops))
*ops = sh5_clk_ops[idx];
}
/* SPDX-License-Identifier: GPL-2.0
*
* arch/sh/kernel/cpu/sh5/entry.S
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2004 - 2008 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/sys.h>
#include <cpu/registers.h>
#include <asm/processor.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
/*
* SR fields.
*/
#define SR_ASID_MASK 0x00ff0000
#define SR_FD_MASK 0x00008000
#define SR_SS 0x08000000
#define SR_BL 0x10000000
#define SR_MD 0x40000000
/*
* Event code.
*/
#define EVENT_INTERRUPT 0
#define EVENT_FAULT_TLB 1
#define EVENT_FAULT_NOT_TLB 2
#define EVENT_DEBUG 3
/* EXPEVT values */
#define RESET_CAUSE 0x20
#define DEBUGSS_CAUSE 0x980
/*
* Frame layout. Quad index.
*/
#define FRAME_T(x) FRAME_TBASE+(x*8)
#define FRAME_R(x) FRAME_RBASE+(x*8)
#define FRAME_S(x) FRAME_SBASE+(x*8)
#define FSPC 0
#define FSSR 1
#define FSYSCALL_ID 2
/* Arrange the save frame to be a multiple of 32 bytes long */
#define FRAME_SBASE 0
#define FRAME_RBASE (FRAME_SBASE+(3*8)) /* SYSCALL_ID - SSR - SPC */
#define FRAME_TBASE (FRAME_RBASE+(63*8)) /* r0 - r62 */
#define FRAME_PBASE (FRAME_TBASE+(8*8)) /* tr0 -tr7 */
#define FRAME_SIZE (FRAME_PBASE+(2*8)) /* pad0-pad1 */
#define FP_FRAME_SIZE FP_FRAME_BASE+(33*8) /* dr0 - dr31 + fpscr */
#define FP_FRAME_BASE 0
#define SAVED_R2 0*8
#define SAVED_R3 1*8
#define SAVED_R4 2*8
#define SAVED_R5 3*8
#define SAVED_R18 4*8
#define SAVED_R6 5*8
#define SAVED_TR0 6*8
/* These are the registers saved in the TLB path that aren't saved in the first
level of the normal one. */
#define TLB_SAVED_R25 7*8
#define TLB_SAVED_TR1 8*8
#define TLB_SAVED_TR2 9*8
#define TLB_SAVED_TR3 10*8
#define TLB_SAVED_TR4 11*8
/* Save R0/R1 : PT-migrating compiler currently dishounours -ffixed-r0 and -ffixed-r1 causing
breakage otherwise. */
#define TLB_SAVED_R0 12*8
#define TLB_SAVED_R1 13*8
#define CLI() \
getcon SR, r6; \
ori r6, 0xf0, r6; \
putcon r6, SR;
#define STI() \
getcon SR, r6; \
andi r6, ~0xf0, r6; \
putcon r6, SR;
#ifdef CONFIG_PREEMPTION
# define preempt_stop() CLI()
#else
# define preempt_stop()
# define resume_kernel restore_all
#endif
.section .data, "aw"
#define FAST_TLBMISS_STACK_CACHELINES 4
#define FAST_TLBMISS_STACK_QUADWORDS (4*FAST_TLBMISS_STACK_CACHELINES)
/* Register back-up area for all exceptions */
.balign 32
/* Allow for 16 quadwords to be pushed by fast tlbmiss handling
* register saves etc. */
.fill FAST_TLBMISS_STACK_QUADWORDS, 8, 0x0
/* This is 32 byte aligned by construction */
/* Register back-up area for all exceptions */
reg_save_area:
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
/* Save area for RESVEC exceptions. We cannot use reg_save_area because of
* reentrancy. Note this area may be accessed via physical address.
* Align so this fits a whole single cache line, for ease of purging.
*/
.balign 32,0,32
resvec_save_area:
.quad 0
.quad 0
.quad 0
.quad 0
.quad 0
.balign 32,0,32
/* Jump table of 3rd level handlers */
trap_jtable:
.long do_exception_error /* 0x000 */
.long do_exception_error /* 0x020 */
#ifdef CONFIG_MMU
.long tlb_miss_load /* 0x040 */
.long tlb_miss_store /* 0x060 */
#else
.long do_exception_error
.long do_exception_error
#endif
! ARTIFICIAL pseudo-EXPEVT setting
.long do_debug_interrupt /* 0x080 */
#ifdef CONFIG_MMU
.long tlb_miss_load /* 0x0A0 */
.long tlb_miss_store /* 0x0C0 */
#else
.long do_exception_error
.long do_exception_error
#endif
.long do_address_error_load /* 0x0E0 */
.long do_address_error_store /* 0x100 */
#ifdef CONFIG_SH_FPU
.long do_fpu_error /* 0x120 */
#else
.long do_exception_error /* 0x120 */
#endif
.long do_exception_error /* 0x140 */
.long system_call /* 0x160 */
.long do_reserved_inst /* 0x180 */
.long do_illegal_slot_inst /* 0x1A0 */
.long do_exception_error /* 0x1C0 - NMI */
.long do_exception_error /* 0x1E0 */
.rept 15
.long do_IRQ /* 0x200 - 0x3C0 */
.endr
.long do_exception_error /* 0x3E0 */
.rept 32
.long do_IRQ /* 0x400 - 0x7E0 */
.endr
.long fpu_error_or_IRQA /* 0x800 */
.long fpu_error_or_IRQB /* 0x820 */
.long do_IRQ /* 0x840 */
.long do_IRQ /* 0x860 */
.rept 6
.long do_exception_error /* 0x880 - 0x920 */
.endr
.long breakpoint_trap_handler /* 0x940 */
.long do_exception_error /* 0x960 */
.long do_single_step /* 0x980 */
.rept 3
.long do_exception_error /* 0x9A0 - 0x9E0 */
.endr
.long do_IRQ /* 0xA00 */
.long do_IRQ /* 0xA20 */
#ifdef CONFIG_MMU
.long itlb_miss_or_IRQ /* 0xA40 */
#else
.long do_IRQ
#endif
.long do_IRQ /* 0xA60 */
.long do_IRQ /* 0xA80 */
#ifdef CONFIG_MMU
.long itlb_miss_or_IRQ /* 0xAA0 */
#else
.long do_IRQ
#endif
.long do_exception_error /* 0xAC0 */
.long do_address_error_exec /* 0xAE0 */
.rept 8
.long do_exception_error /* 0xB00 - 0xBE0 */
.endr
.rept 18
.long do_IRQ /* 0xC00 - 0xE20 */
.endr
.section .text64, "ax"
/*
* --- Exception/Interrupt/Event Handling Section
*/
/*
* VBR and RESVEC blocks.
*
* First level handler for VBR-based exceptions.
*
* To avoid waste of space, align to the maximum text block size.
* This is assumed to be at most 128 bytes or 32 instructions.
* DO NOT EXCEED 32 instructions on the first level handlers !
*
* Also note that RESVEC is contained within the VBR block
* where the room left (1KB - TEXT_SIZE) allows placing
* the RESVEC block (at most 512B + TEXT_SIZE).
*
* So first (and only) level handler for RESVEC-based exceptions.
*
* Where the fault/interrupt is handled (not_a_tlb_miss, tlb_miss
* and interrupt) we are a lot tight with register space until
* saving onto the stack frame, which is done in handle_exception().
*
*/
#define TEXT_SIZE 128
#define BLOCK_SIZE 1664 /* Dynamic check, 13*128 */
.balign TEXT_SIZE
LVBR_block:
.space 256, 0 /* Power-on class handler, */
/* not required here */
not_a_tlb_miss:
synco /* TAKum03020 (but probably a good idea anyway.) */
/* Save original stack pointer into KCR1 */
putcon SP, KCR1
/* Save other original registers into reg_save_area */
movi reg_save_area, SP
st.q SP, SAVED_R2, r2
st.q SP, SAVED_R3, r3
st.q SP, SAVED_R4, r4
st.q SP, SAVED_R5, r5
st.q SP, SAVED_R6, r6
st.q SP, SAVED_R18, r18
gettr tr0, r3
st.q SP, SAVED_TR0, r3
/* Set args for Non-debug, Not a TLB miss class handler */
getcon EXPEVT, r2
movi ret_from_exception, r3
ori r3, 1, r3
movi EVENT_FAULT_NOT_TLB, r4
or SP, ZERO, r5
getcon KCR1, SP
pta handle_exception, tr0
blink tr0, ZERO
.balign 256
! VBR+0x200
nop
.balign 256
! VBR+0x300
nop
.balign 256
/*
* Instead of the natural .balign 1024 place RESVEC here
* respecting the final 1KB alignment.
*/
.balign TEXT_SIZE
/*
* Instead of '.space 1024-TEXT_SIZE' place the RESVEC
* block making sure the final alignment is correct.
*/
#ifdef CONFIG_MMU
tlb_miss:
synco /* TAKum03020 (but probably a good idea anyway.) */
putcon SP, KCR1
movi reg_save_area, SP
/* SP is guaranteed 32-byte aligned. */
st.q SP, TLB_SAVED_R0 , r0
st.q SP, TLB_SAVED_R1 , r1
st.q SP, SAVED_R2 , r2
st.q SP, SAVED_R3 , r3
st.q SP, SAVED_R4 , r4
st.q SP, SAVED_R5 , r5
st.q SP, SAVED_R6 , r6
st.q SP, SAVED_R18, r18
/* Save R25 for safety; as/ld may want to use it to achieve the call to
* the code in mm/tlbmiss.c */
st.q SP, TLB_SAVED_R25, r25
gettr tr0, r2
gettr tr1, r3
gettr tr2, r4
gettr tr3, r5
gettr tr4, r18
st.q SP, SAVED_TR0 , r2
st.q SP, TLB_SAVED_TR1 , r3
st.q SP, TLB_SAVED_TR2 , r4
st.q SP, TLB_SAVED_TR3 , r5
st.q SP, TLB_SAVED_TR4 , r18
pt do_fast_page_fault, tr0
getcon SSR, r2
getcon EXPEVT, r3
getcon TEA, r4
shlri r2, 30, r2
andi r2, 1, r2 /* r2 = SSR.MD */
blink tr0, LINK
pt fixup_to_invoke_general_handler, tr1
/* If the fast path handler fixed the fault, just drop through quickly
to the restore code right away to return to the excepting context.
*/
bnei/u r2, 0, tr1
fast_tlb_miss_restore:
ld.q SP, SAVED_TR0, r2
ld.q SP, TLB_SAVED_TR1, r3
ld.q SP, TLB_SAVED_TR2, r4
ld.q SP, TLB_SAVED_TR3, r5
ld.q SP, TLB_SAVED_TR4, r18
ptabs r2, tr0
ptabs r3, tr1
ptabs r4, tr2
ptabs r5, tr3
ptabs r18, tr4
ld.q SP, TLB_SAVED_R0, r0
ld.q SP, TLB_SAVED_R1, r1
ld.q SP, SAVED_R2, r2
ld.q SP, SAVED_R3, r3
ld.q SP, SAVED_R4, r4
ld.q SP, SAVED_R5, r5
ld.q SP, SAVED_R6, r6
ld.q SP, SAVED_R18, r18
ld.q SP, TLB_SAVED_R25, r25
getcon KCR1, SP
rte
nop /* for safety, in case the code is run on sh5-101 cut1.x */
fixup_to_invoke_general_handler:
/* OK, new method. Restore stuff that's not expected to get saved into
the 'first-level' reg save area, then just fall through to setting
up the registers and calling the second-level handler. */
/* 2nd level expects r2,3,4,5,6,18,tr0 to be saved. So we must restore
r25,tr1-4 and save r6 to get into the right state. */
ld.q SP, TLB_SAVED_TR1, r3
ld.q SP, TLB_SAVED_TR2, r4
ld.q SP, TLB_SAVED_TR3, r5
ld.q SP, TLB_SAVED_TR4, r18
ld.q SP, TLB_SAVED_R25, r25
ld.q SP, TLB_SAVED_R0, r0
ld.q SP, TLB_SAVED_R1, r1
ptabs/u r3, tr1
ptabs/u r4, tr2
ptabs/u r5, tr3
ptabs/u r18, tr4
/* Set args for Non-debug, TLB miss class handler */
getcon EXPEVT, r2
movi ret_from_exception, r3
ori r3, 1, r3
movi EVENT_FAULT_TLB, r4
or SP, ZERO, r5
getcon KCR1, SP
pta handle_exception, tr0
blink tr0, ZERO
#else /* CONFIG_MMU */
.balign 256
#endif
/* NB TAKE GREAT CARE HERE TO ENSURE THAT THE INTERRUPT CODE
DOES END UP AT VBR+0x600 */
nop
nop
nop
nop
nop
nop
.balign 256
/* VBR + 0x600 */
interrupt:
synco /* TAKum03020 (but probably a good idea anyway.) */
/* Save original stack pointer into KCR1 */
putcon SP, KCR1
/* Save other original registers into reg_save_area */
movi reg_save_area, SP
st.q SP, SAVED_R2, r2
st.q SP, SAVED_R3, r3
st.q SP, SAVED_R4, r4
st.q SP, SAVED_R5, r5
st.q SP, SAVED_R6, r6
st.q SP, SAVED_R18, r18
gettr tr0, r3
st.q SP, SAVED_TR0, r3
/* Set args for interrupt class handler */
getcon INTEVT, r2
movi ret_from_irq, r3
ori r3, 1, r3
movi EVENT_INTERRUPT, r4
or SP, ZERO, r5
getcon KCR1, SP
pta handle_exception, tr0
blink tr0, ZERO
.balign TEXT_SIZE /* let's waste the bare minimum */
LVBR_block_end: /* Marker. Used for total checking */
.balign 256
LRESVEC_block:
/* Panic handler. Called with MMU off. Possible causes/actions:
* - Reset: Jump to program start.
* - Single Step: Turn off Single Step & return.
* - Others: Call panic handler, passing PC as arg.
* (this may need to be extended...)
*/
reset_or_panic:
synco /* TAKum03020 (but probably a good idea anyway.) */
putcon SP, DCR
/* First save r0-1 and tr0, as we need to use these */
movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
st.q SP, 0, r0
st.q SP, 8, r1
gettr tr0, r0
st.q SP, 32, r0
/* Check cause */
getcon EXPEVT, r0
movi RESET_CAUSE, r1
sub r1, r0, r1 /* r1=0 if reset */
movi _stext-CONFIG_PAGE_OFFSET, r0
ori r0, 1, r0
ptabs r0, tr0
beqi r1, 0, tr0 /* Jump to start address if reset */
getcon EXPEVT, r0
movi DEBUGSS_CAUSE, r1
sub r1, r0, r1 /* r1=0 if single step */
pta single_step_panic, tr0
beqi r1, 0, tr0 /* jump if single step */
/* Now jump to where we save the registers. */
movi panic_stash_regs-CONFIG_PAGE_OFFSET, r1
ptabs r1, tr0
blink tr0, r63
single_step_panic:
/* We are in a handler with Single Step set. We need to resume the
* handler, by turning on MMU & turning off Single Step. */
getcon SSR, r0
movi SR_MMU, r1
or r0, r1, r0
movi ~SR_SS, r1
and r0, r1, r0
putcon r0, SSR
/* Restore EXPEVT, as the rte won't do this */
getcon PEXPEVT, r0
putcon r0, EXPEVT
/* Restore regs */
ld.q SP, 32, r0
ptabs r0, tr0
ld.q SP, 0, r0
ld.q SP, 8, r1
getcon DCR, SP
synco
rte
.balign 256
debug_exception:
synco /* TAKum03020 (but probably a good idea anyway.) */
/*
* Single step/software_break_point first level handler.
* Called with MMU off, so the first thing we do is enable it
* by doing an rte with appropriate SSR.
*/
putcon SP, DCR
/* Save SSR & SPC, together with R0 & R1, as we need to use 2 regs. */
movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
/* With the MMU off, we are bypassing the cache, so purge any
* data that will be made stale by the following stores.
*/
ocbp SP, 0
synco
st.q SP, 0, r0
st.q SP, 8, r1
getcon SPC, r0
st.q SP, 16, r0
getcon SSR, r0
st.q SP, 24, r0
/* Enable MMU, block exceptions, set priv mode, disable single step */
movi SR_MMU | SR_BL | SR_MD, r1
or r0, r1, r0
movi ~SR_SS, r1
and r0, r1, r0
putcon r0, SSR
/* Force control to debug_exception_2 when rte is executed */
movi debug_exeception_2, r0
ori r0, 1, r0 /* force SHmedia, just in case */
putcon r0, SPC
getcon DCR, SP
synco
rte
debug_exeception_2:
/* Restore saved regs */
putcon SP, KCR1
movi resvec_save_area, SP
ld.q SP, 24, r0
putcon r0, SSR
ld.q SP, 16, r0
putcon r0, SPC
ld.q SP, 0, r0
ld.q SP, 8, r1
/* Save other original registers into reg_save_area */
movi reg_save_area, SP
st.q SP, SAVED_R2, r2
st.q SP, SAVED_R3, r3
st.q SP, SAVED_R4, r4
st.q SP, SAVED_R5, r5
st.q SP, SAVED_R6, r6
st.q SP, SAVED_R18, r18
gettr tr0, r3
st.q SP, SAVED_TR0, r3
/* Set args for debug class handler */
getcon EXPEVT, r2
movi ret_from_exception, r3
ori r3, 1, r3
movi EVENT_DEBUG, r4
or SP, ZERO, r5
getcon KCR1, SP
pta handle_exception, tr0
blink tr0, ZERO
.balign 256
debug_interrupt:
/* !!! WE COME HERE IN REAL MODE !!! */
/* Hook-up debug interrupt to allow various debugging options to be
* hooked into its handler. */
/* Save original stack pointer into KCR1 */
synco
putcon SP, KCR1
movi resvec_save_area-CONFIG_PAGE_OFFSET, SP
ocbp SP, 0
ocbp SP, 32
synco
/* Save other original registers into reg_save_area thru real addresses */
st.q SP, SAVED_R2, r2
st.q SP, SAVED_R3, r3
st.q SP, SAVED_R4, r4
st.q SP, SAVED_R5, r5
st.q SP, SAVED_R6, r6
st.q SP, SAVED_R18, r18
gettr tr0, r3
st.q SP, SAVED_TR0, r3
/* move (spc,ssr)->(pspc,pssr). The rte will shift
them back again, so that they look like the originals
as far as the real handler code is concerned. */
getcon spc, r6
putcon r6, pspc
getcon ssr, r6
putcon r6, pssr
! construct useful SR for handle_exception
movi 3, r6
shlli r6, 30, r6
getcon sr, r18
or r18, r6, r6
putcon r6, ssr
! SSR is now the current SR with the MD and MMU bits set
! i.e. the rte will switch back to priv mode and put
! the mmu back on
! construct spc
movi handle_exception, r18
ori r18, 1, r18 ! for safety (do we need this?)
putcon r18, spc
/* Set args for Non-debug, Not a TLB miss class handler */
! EXPEVT==0x80 is unused, so 'steal' this value to put the
! debug interrupt handler in the vectoring table
movi 0x80, r2
movi ret_from_exception, r3
ori r3, 1, r3
movi EVENT_FAULT_NOT_TLB, r4
or SP, ZERO, r5
movi CONFIG_PAGE_OFFSET, r6
add r6, r5, r5
getcon KCR1, SP
synco ! for safety
rte ! -> handle_exception, switch back to priv mode again
LRESVEC_block_end: /* Marker. Unused. */
.balign TEXT_SIZE
/*
* Second level handler for VBR-based exceptions. Pre-handler.
* In common to all stack-frame sensitive handlers.
*
* Inputs:
* (KCR0) Current [current task union]
* (KCR1) Original SP
* (r2) INTEVT/EXPEVT
* (r3) appropriate return address
* (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault, 3=debug)
* (r5) Pointer to reg_save_area
* (SP) Original SP
*
* Available registers:
* (r6)
* (r18)
* (tr0)
*
*/
handle_exception:
/* Common 2nd level handler. */
/* First thing we need an appropriate stack pointer */
getcon SSR, r6
shlri r6, 30, r6
andi r6, 1, r6
pta stack_ok, tr0
bne r6, ZERO, tr0 /* Original stack pointer is fine */
/* Set stack pointer for user fault */
getcon KCR0, SP
movi THREAD_SIZE, r6 /* Point to the end */
add SP, r6, SP
stack_ok:
/* DEBUG : check for underflow/overflow of the kernel stack */
pta no_underflow, tr0
getcon KCR0, r6
movi 1024, r18
add r6, r18, r6
bge SP, r6, tr0 ! ? below 1k from bottom of stack : danger zone
/* Just panic to cause a crash. */
bad_sp:
ld.b r63, 0, r6
nop
no_underflow:
pta bad_sp, tr0
getcon kcr0, r6
movi THREAD_SIZE, r18
add r18, r6, r6
bgt SP, r6, tr0 ! sp above the stack
/* Make some room for the BASIC frame. */
movi -(FRAME_SIZE), r6
add SP, r6, SP
/* Could do this with no stalling if we had another spare register, but the
code below will be OK. */
ld.q r5, SAVED_R2, r6
ld.q r5, SAVED_R3, r18
st.q SP, FRAME_R(2), r6
ld.q r5, SAVED_R4, r6
st.q SP, FRAME_R(3), r18
ld.q r5, SAVED_R5, r18
st.q SP, FRAME_R(4), r6
ld.q r5, SAVED_R6, r6
st.q SP, FRAME_R(5), r18
ld.q r5, SAVED_R18, r18
st.q SP, FRAME_R(6), r6
ld.q r5, SAVED_TR0, r6
st.q SP, FRAME_R(18), r18
st.q SP, FRAME_T(0), r6
/* Keep old SP around */
getcon KCR1, r6
/* Save the rest of the general purpose registers */
st.q SP, FRAME_R(0), r0
st.q SP, FRAME_R(1), r1
st.q SP, FRAME_R(7), r7
st.q SP, FRAME_R(8), r8
st.q SP, FRAME_R(9), r9
st.q SP, FRAME_R(10), r10
st.q SP, FRAME_R(11), r11
st.q SP, FRAME_R(12), r12
st.q SP, FRAME_R(13), r13
st.q SP, FRAME_R(14), r14
/* SP is somewhere else */
st.q SP, FRAME_R(15), r6
st.q SP, FRAME_R(16), r16
st.q SP, FRAME_R(17), r17
/* r18 is saved earlier. */
st.q SP, FRAME_R(19), r19
st.q SP, FRAME_R(20), r20
st.q SP, FRAME_R(21), r21
st.q SP, FRAME_R(22), r22
st.q SP, FRAME_R(23), r23
st.q SP, FRAME_R(24), r24
st.q SP, FRAME_R(25), r25
st.q SP, FRAME_R(26), r26
st.q SP, FRAME_R(27), r27
st.q SP, FRAME_R(28), r28
st.q SP, FRAME_R(29), r29
st.q SP, FRAME_R(30), r30
st.q SP, FRAME_R(31), r31
st.q SP, FRAME_R(32), r32
st.q SP, FRAME_R(33), r33
st.q SP, FRAME_R(34), r34
st.q SP, FRAME_R(35), r35
st.q SP, FRAME_R(36), r36
st.q SP, FRAME_R(37), r37
st.q SP, FRAME_R(38), r38
st.q SP, FRAME_R(39), r39
st.q SP, FRAME_R(40), r40
st.q SP, FRAME_R(41), r41
st.q SP, FRAME_R(42), r42
st.q SP, FRAME_R(43), r43
st.q SP, FRAME_R(44), r44
st.q SP, FRAME_R(45), r45
st.q SP, FRAME_R(46), r46
st.q SP, FRAME_R(47), r47
st.q SP, FRAME_R(48), r48
st.q SP, FRAME_R(49), r49
st.q SP, FRAME_R(50), r50
st.q SP, FRAME_R(51), r51
st.q SP, FRAME_R(52), r52
st.q SP, FRAME_R(53), r53
st.q SP, FRAME_R(54), r54
st.q SP, FRAME_R(55), r55
st.q SP, FRAME_R(56), r56
st.q SP, FRAME_R(57), r57
st.q SP, FRAME_R(58), r58
st.q SP, FRAME_R(59), r59
st.q SP, FRAME_R(60), r60
st.q SP, FRAME_R(61), r61
st.q SP, FRAME_R(62), r62
/*
* Save the S* registers.
*/
getcon SSR, r61
st.q SP, FRAME_S(FSSR), r61
getcon SPC, r62
st.q SP, FRAME_S(FSPC), r62
movi -1, r62 /* Reset syscall_nr */
st.q SP, FRAME_S(FSYSCALL_ID), r62
/* Save the rest of the target registers */
gettr tr1, r6
st.q SP, FRAME_T(1), r6
gettr tr2, r6
st.q SP, FRAME_T(2), r6
gettr tr3, r6
st.q SP, FRAME_T(3), r6
gettr tr4, r6
st.q SP, FRAME_T(4), r6
gettr tr5, r6
st.q SP, FRAME_T(5), r6
gettr tr6, r6
st.q SP, FRAME_T(6), r6
gettr tr7, r6
st.q SP, FRAME_T(7), r6
! setup FP so that unwinder can wind back through nested kernel mode
! exceptions
add SP, ZERO, r14
/* For syscall and debug race condition, get TRA now */
getcon TRA, r5
/* We are in a safe position to turn SR.BL off, but set IMASK=0xf
* Also set FD, to catch FPU usage in the kernel.
*
* benedict.gaster@superh.com 29/07/2002
*
* On all SH5-101 revisions it is unsafe to raise the IMASK and at the
* same time change BL from 1->0, as any pending interrupt of a level
* higher than he previous value of IMASK will leak through and be
* taken unexpectedly.
*
* To avoid this we raise the IMASK and then issue another PUTCON to
* enable interrupts.
*/
getcon SR, r6
movi SR_IMASK | SR_FD, r7
or r6, r7, r6
putcon r6, SR
movi SR_UNBLOCK_EXC, r7
and r6, r7, r6
putcon r6, SR
/* Now call the appropriate 3rd level handler */
or r3, ZERO, LINK
movi trap_jtable, r3
shlri r2, 3, r2
ldx.l r2, r3, r3
shlri r2, 2, r2
ptabs r3, tr0
or SP, ZERO, r3
blink tr0, ZERO
/*
* Second level handler for VBR-based exceptions. Post-handlers.
*
* Post-handlers for interrupts (ret_from_irq), exceptions
* (ret_from_exception) and common reentrance doors (restore_all
* to get back to the original context, ret_from_syscall loop to
* check kernel exiting).
*
* ret_with_reschedule and work_notifysig are an inner lables of
* the ret_from_syscall loop.
*
* In common to all stack-frame sensitive handlers.
*
* Inputs:
* (SP) struct pt_regs *, original register's frame pointer (basic)
*
*/
.global ret_from_irq
ret_from_irq:
ld.q SP, FRAME_S(FSSR), r6
shlri r6, 30, r6
andi r6, 1, r6
pta resume_kernel, tr0
bne r6, ZERO, tr0 /* no further checks */
STI()
pta ret_with_reschedule, tr0
blink tr0, ZERO /* Do not check softirqs */
.global ret_from_exception
ret_from_exception:
preempt_stop()
ld.q SP, FRAME_S(FSSR), r6
shlri r6, 30, r6
andi r6, 1, r6
pta resume_kernel, tr0
bne r6, ZERO, tr0 /* no further checks */
/* Check softirqs */
#ifdef CONFIG_PREEMPTION
pta ret_from_syscall, tr0
blink tr0, ZERO
resume_kernel:
CLI()
pta restore_all, tr0
getcon KCR0, r6
ld.l r6, TI_PRE_COUNT, r7
beq/u r7, ZERO, tr0
need_resched:
ld.l r6, TI_FLAGS, r7
movi (1 << TIF_NEED_RESCHED), r8
and r8, r7, r8
bne r8, ZERO, tr0
getcon SR, r7
andi r7, 0xf0, r7
bne r7, ZERO, tr0
movi preempt_schedule_irq, r7
ori r7, 1, r7
ptabs r7, tr1
blink tr1, LINK
pta need_resched, tr1
blink tr1, ZERO
#endif
.global ret_from_syscall
ret_from_syscall:
ret_with_reschedule:
getcon KCR0, r6 ! r6 contains current_thread_info
ld.l r6, TI_FLAGS, r7 ! r7 contains current_thread_info->flags
movi _TIF_NEED_RESCHED, r8
and r8, r7, r8
pta work_resched, tr0
bne r8, ZERO, tr0
pta restore_all, tr1
movi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), r8
and r8, r7, r8
pta work_notifysig, tr0
bne r8, ZERO, tr0
blink tr1, ZERO
work_resched:
pta ret_from_syscall, tr0
gettr tr0, LINK
movi schedule, r6
ptabs r6, tr0
blink tr0, ZERO /* Call schedule(), return on top */
work_notifysig:
gettr tr1, LINK
movi do_notify_resume, r6
ptabs r6, tr0
or SP, ZERO, r2
or r7, ZERO, r3
blink tr0, LINK /* Call do_notify_resume(regs, current_thread_info->flags), return here */
restore_all:
/* Do prefetches */
ld.q SP, FRAME_T(0), r6
ld.q SP, FRAME_T(1), r7
ld.q SP, FRAME_T(2), r8
ld.q SP, FRAME_T(3), r9
ptabs r6, tr0
ptabs r7, tr1
ptabs r8, tr2
ptabs r9, tr3
ld.q SP, FRAME_T(4), r6
ld.q SP, FRAME_T(5), r7
ld.q SP, FRAME_T(6), r8
ld.q SP, FRAME_T(7), r9
ptabs r6, tr4
ptabs r7, tr5
ptabs r8, tr6
ptabs r9, tr7
ld.q SP, FRAME_R(0), r0
ld.q SP, FRAME_R(1), r1
ld.q SP, FRAME_R(2), r2
ld.q SP, FRAME_R(3), r3
ld.q SP, FRAME_R(4), r4
ld.q SP, FRAME_R(5), r5
ld.q SP, FRAME_R(6), r6
ld.q SP, FRAME_R(7), r7
ld.q SP, FRAME_R(8), r8
ld.q SP, FRAME_R(9), r9
ld.q SP, FRAME_R(10), r10
ld.q SP, FRAME_R(11), r11
ld.q SP, FRAME_R(12), r12
ld.q SP, FRAME_R(13), r13
ld.q SP, FRAME_R(14), r14
ld.q SP, FRAME_R(16), r16
ld.q SP, FRAME_R(17), r17
ld.q SP, FRAME_R(18), r18
ld.q SP, FRAME_R(19), r19
ld.q SP, FRAME_R(20), r20
ld.q SP, FRAME_R(21), r21
ld.q SP, FRAME_R(22), r22
ld.q SP, FRAME_R(23), r23
ld.q SP, FRAME_R(24), r24
ld.q SP, FRAME_R(25), r25
ld.q SP, FRAME_R(26), r26
ld.q SP, FRAME_R(27), r27
ld.q SP, FRAME_R(28), r28
ld.q SP, FRAME_R(29), r29
ld.q SP, FRAME_R(30), r30
ld.q SP, FRAME_R(31), r31
ld.q SP, FRAME_R(32), r32
ld.q SP, FRAME_R(33), r33
ld.q SP, FRAME_R(34), r34
ld.q SP, FRAME_R(35), r35
ld.q SP, FRAME_R(36), r36
ld.q SP, FRAME_R(37), r37
ld.q SP, FRAME_R(38), r38
ld.q SP, FRAME_R(39), r39
ld.q SP, FRAME_R(40), r40
ld.q SP, FRAME_R(41), r41
ld.q SP, FRAME_R(42), r42
ld.q SP, FRAME_R(43), r43
ld.q SP, FRAME_R(44), r44
ld.q SP, FRAME_R(45), r45
ld.q SP, FRAME_R(46), r46
ld.q SP, FRAME_R(47), r47
ld.q SP, FRAME_R(48), r48
ld.q SP, FRAME_R(49), r49
ld.q SP, FRAME_R(50), r50
ld.q SP, FRAME_R(51), r51
ld.q SP, FRAME_R(52), r52
ld.q SP, FRAME_R(53), r53
ld.q SP, FRAME_R(54), r54
ld.q SP, FRAME_R(55), r55
ld.q SP, FRAME_R(56), r56
ld.q SP, FRAME_R(57), r57
ld.q SP, FRAME_R(58), r58
getcon SR, r59
movi SR_BLOCK_EXC, r60
or r59, r60, r59
putcon r59, SR /* SR.BL = 1, keep nesting out */
ld.q SP, FRAME_S(FSSR), r61
ld.q SP, FRAME_S(FSPC), r62
movi SR_ASID_MASK, r60
and r59, r60, r59
andc r61, r60, r61 /* Clear out older ASID */
or r59, r61, r61 /* Retain current ASID */
putcon r61, SSR
putcon r62, SPC
/* Ignore FSYSCALL_ID */
ld.q SP, FRAME_R(59), r59
ld.q SP, FRAME_R(60), r60
ld.q SP, FRAME_R(61), r61
ld.q SP, FRAME_R(62), r62
/* Last touch */
ld.q SP, FRAME_R(15), SP
rte
nop
/*
* Third level handlers for VBR-based exceptions. Adapting args to
* and/or deflecting to fourth level handlers.
*
* Fourth level handlers interface.
* Most are C-coded handlers directly pointed by the trap_jtable.
* (Third = Fourth level)
* Inputs:
* (r2) fault/interrupt code, entry number (e.g. NMI = 14,
* IRL0-3 (0000) = 16, RTLBMISS = 2, SYSCALL = 11, etc ...)
* (r3) struct pt_regs *, original register's frame pointer
* (r4) Event (0 = interrupt, 1 = TLB miss fault, 2 = Not TLB miss fault)
* (r5) TRA control register (for syscall/debug benefit only)
* (LINK) return address
* (SP) = r3
*
* Kernel TLB fault handlers will get a slightly different interface.
* (r2) struct pt_regs *, original register's frame pointer
* (r3) page fault error code (see asm/thread_info.h)
* (r4) Effective Address of fault
* (LINK) return address
* (SP) = r2
*
* fpu_error_or_IRQ? is a helper to deflect to the right cause.
*
*/
#ifdef CONFIG_MMU
tlb_miss_load:
or SP, ZERO, r2
or ZERO, ZERO, r3 /* Read */
getcon TEA, r4
pta call_do_page_fault, tr0
beq ZERO, ZERO, tr0
tlb_miss_store:
or SP, ZERO, r2
movi FAULT_CODE_WRITE, r3 /* Write */
getcon TEA, r4
pta call_do_page_fault, tr0
beq ZERO, ZERO, tr0
itlb_miss_or_IRQ:
pta its_IRQ, tr0
beqi/u r4, EVENT_INTERRUPT, tr0
/* ITLB miss */
or SP, ZERO, r2
movi FAULT_CODE_ITLB, r3
getcon TEA, r4
/* Fall through */
call_do_page_fault:
movi do_page_fault, r6
ptabs r6, tr0
blink tr0, ZERO
#endif /* CONFIG_MMU */
fpu_error_or_IRQA:
pta its_IRQ, tr0
beqi/l r4, EVENT_INTERRUPT, tr0
#ifdef CONFIG_SH_FPU
movi fpu_state_restore_trap_handler, r6
#else
movi do_exception_error, r6
#endif
ptabs r6, tr0
blink tr0, ZERO
fpu_error_or_IRQB:
pta its_IRQ, tr0
beqi/l r4, EVENT_INTERRUPT, tr0
#ifdef CONFIG_SH_FPU
movi fpu_state_restore_trap_handler, r6
#else
movi do_exception_error, r6
#endif
ptabs r6, tr0
blink tr0, ZERO
its_IRQ:
movi do_IRQ, r6
ptabs r6, tr0
blink tr0, ZERO
/*
* system_call/unknown_trap third level handler:
*
* Inputs:
* (r2) fault/interrupt code, entry number (TRAP = 11)
* (r3) struct pt_regs *, original register's frame pointer
* (r4) Not used. Event (0=interrupt, 1=TLB miss fault, 2=Not TLB miss fault)
* (r5) TRA Control Reg (0x00xyzzzz: x=1 SYSCALL, y = #args, z=nr)
* (SP) = r3
* (LINK) return address: ret_from_exception
* (*r3) Syscall parms: SC#, arg0, arg1, ..., arg5 in order (Saved r2/r7)
*
* Outputs:
* (*r3) Syscall reply (Saved r2)
* (LINK) In case of syscall only it can be scrapped.
* Common second level post handler will be ret_from_syscall.
* Common (non-trace) exit point to that is syscall_ret (saving
* result to r2). Common bad exit point is syscall_bad (returning
* ENOSYS then saved to r2).
*
*/
unknown_trap:
/* Unknown Trap or User Trace */
movi do_unknown_trapa, r6
ptabs r6, tr0
ld.q r3, FRAME_R(9), r2 /* r2 = #arg << 16 | syscall # */
andi r2, 0x1ff, r2 /* r2 = syscall # */
blink tr0, LINK
pta syscall_ret, tr0
blink tr0, ZERO
/* New syscall implementation*/
system_call:
pta unknown_trap, tr0
or r5, ZERO, r4 /* TRA (=r5) -> r4 */
shlri r4, 20, r4
bnei r4, 1, tr0 /* unknown_trap if not 0x1yzzzz */
/* It's a system call */
st.q r3, FRAME_S(FSYSCALL_ID), r5 /* ID (0x1yzzzz) -> stack */
andi r5, 0x1ff, r5 /* syscall # -> r5 */
STI()
pta syscall_allowed, tr0
movi NR_syscalls - 1, r4 /* Last valid */
bgeu/l r4, r5, tr0
syscall_bad:
/* Return ENOSYS ! */
movi -(ENOSYS), r2 /* Fall-through */
.global syscall_ret
syscall_ret:
st.q SP, FRAME_R(9), r2 /* Expecting SP back to BASIC frame */
ld.q SP, FRAME_S(FSPC), r2
addi r2, 4, r2 /* Move PC, being pre-execution event */
st.q SP, FRAME_S(FSPC), r2
pta ret_from_syscall, tr0
blink tr0, ZERO
/* A different return path for ret_from_fork, because we now need
* to call schedule_tail with the later kernels. Because prev is
* loaded into r2 by switch_to() means we can just call it straight away
*/
.global ret_from_fork
ret_from_fork:
movi schedule_tail,r5
ori r5, 1, r5
ptabs r5, tr0
blink tr0, LINK
ld.q SP, FRAME_S(FSPC), r2
addi r2, 4, r2 /* Move PC, being pre-execution event */
st.q SP, FRAME_S(FSPC), r2
pta ret_from_syscall, tr0
blink tr0, ZERO
.global ret_from_kernel_thread
ret_from_kernel_thread:
movi schedule_tail,r5
ori r5, 1, r5
ptabs r5, tr0
blink tr0, LINK
ld.q SP, FRAME_R(2), r2
ld.q SP, FRAME_R(3), r3
ptabs r3, tr0
blink tr0, LINK
ld.q SP, FRAME_S(FSPC), r2
addi r2, 4, r2 /* Move PC, being pre-execution event */
st.q SP, FRAME_S(FSPC), r2
pta ret_from_syscall, tr0
blink tr0, ZERO
syscall_allowed:
/* Use LINK to deflect the exit point, default is syscall_ret */
pta syscall_ret, tr0
gettr tr0, LINK
pta syscall_notrace, tr0
getcon KCR0, r2
ld.l r2, TI_FLAGS, r4
movi _TIF_WORK_SYSCALL_MASK, r6
and r6, r4, r6
beq/l r6, ZERO, tr0
/* Trace it by calling syscall_trace before and after */
movi do_syscall_trace_enter, r4
or SP, ZERO, r2
ptabs r4, tr0
blink tr0, LINK
/* Save the retval */
st.q SP, FRAME_R(2), r2
/* Reload syscall number as r5 is trashed by do_syscall_trace_enter */
ld.q SP, FRAME_S(FSYSCALL_ID), r5
andi r5, 0x1ff, r5
pta syscall_ret_trace, tr0
gettr tr0, LINK
syscall_notrace:
/* Now point to the appropriate 4th level syscall handler */
movi sys_call_table, r4
shlli r5, 2, r5
ldx.l r4, r5, r5
ptabs r5, tr0
/* Prepare original args */
ld.q SP, FRAME_R(2), r2
ld.q SP, FRAME_R(3), r3
ld.q SP, FRAME_R(4), r4
ld.q SP, FRAME_R(5), r5
ld.q SP, FRAME_R(6), r6
ld.q SP, FRAME_R(7), r7
/* And now the trick for those syscalls requiring regs * ! */
or SP, ZERO, r8
/* Call it */
blink tr0, ZERO /* LINK is already properly set */
syscall_ret_trace:
/* We get back here only if under trace */
st.q SP, FRAME_R(9), r2 /* Save return value */
movi do_syscall_trace_leave, LINK
or SP, ZERO, r2
ptabs LINK, tr0
blink tr0, LINK
/* This needs to be done after any syscall tracing */
ld.q SP, FRAME_S(FSPC), r2
addi r2, 4, r2 /* Move PC, being pre-execution event */
st.q SP, FRAME_S(FSPC), r2
pta ret_from_syscall, tr0
blink tr0, ZERO /* Resume normal return sequence */
/*
* --- Switch to running under a particular ASID and return the previous ASID value
* --- The caller is assumed to have done a cli before calling this.
*
* Input r2 : new ASID
* Output r2 : old ASID
*/
.global switch_and_save_asid
switch_and_save_asid:
getcon sr, r0
movi 255, r4
shlli r4, 16, r4 /* r4 = mask to select ASID */
and r0, r4, r3 /* r3 = shifted old ASID */
andi r2, 255, r2 /* mask down new ASID */
shlli r2, 16, r2 /* align new ASID against SR.ASID */
andc r0, r4, r0 /* efface old ASID from SR */
or r0, r2, r0 /* insert the new ASID */
putcon r0, ssr
movi 1f, r0
putcon r0, spc
rte
nop
1:
ptabs LINK, tr0
shlri r3, 16, r2 /* r2 = old ASID */
blink tr0, r63
.global route_to_panic_handler
route_to_panic_handler:
/* Switch to real mode, goto panic_handler, don't return. Useful for
last-chance debugging, e.g. if no output wants to go to the console.
*/
movi panic_handler - CONFIG_PAGE_OFFSET, r1
ptabs r1, tr0
pta 1f, tr1
gettr tr1, r0
putcon r0, spc
getcon sr, r0
movi 1, r1
shlli r1, 31, r1
andc r0, r1, r0
putcon r0, ssr
rte
nop
1: /* Now in real mode */
blink tr0, r63
nop
.global peek_real_address_q
peek_real_address_q:
/* Two args:
r2 : real mode address to peek
r2(out) : result quadword
This is provided as a cheapskate way of manipulating device
registers for debugging (to avoid the need to ioremap the debug
module, and to avoid the need to ioremap the watchpoint
controller in a way that identity maps sufficient bits to avoid the
SH5-101 cut2 silicon defect).
This code is not performance critical
*/
add.l r2, r63, r2 /* sign extend address */
getcon sr, r0 /* r0 = saved original SR */
movi 1, r1
shlli r1, 28, r1
or r0, r1, r1 /* r0 with block bit set */
putcon r1, sr /* now in critical section */
movi 1, r36
shlli r36, 31, r36
andc r1, r36, r1 /* turn sr.mmu off in real mode section */
putcon r1, ssr
movi .peek0 - CONFIG_PAGE_OFFSET, r36 /* real mode target address */
movi 1f, r37 /* virtual mode return addr */
putcon r36, spc
synco
rte
nop
.peek0: /* come here in real mode, don't touch caches!!
still in critical section (sr.bl==1) */
putcon r0, ssr
putcon r37, spc
/* Here's the actual peek. If the address is bad, all bets are now off
* what will happen (handlers invoked in real-mode = bad news) */
ld.q r2, 0, r2
synco
rte /* Back to virtual mode */
nop
1:
ptabs LINK, tr0
blink tr0, r63
.global poke_real_address_q
poke_real_address_q:
/* Two args:
r2 : real mode address to poke
r3 : quadword value to write.
This is provided as a cheapskate way of manipulating device
registers for debugging (to avoid the need to ioremap the debug
module, and to avoid the need to ioremap the watchpoint
controller in a way that identity maps sufficient bits to avoid the
SH5-101 cut2 silicon defect).
This code is not performance critical
*/
add.l r2, r63, r2 /* sign extend address */
getcon sr, r0 /* r0 = saved original SR */
movi 1, r1
shlli r1, 28, r1
or r0, r1, r1 /* r0 with block bit set */
putcon r1, sr /* now in critical section */
movi 1, r36
shlli r36, 31, r36
andc r1, r36, r1 /* turn sr.mmu off in real mode section */
putcon r1, ssr
movi .poke0-CONFIG_PAGE_OFFSET, r36 /* real mode target address */
movi 1f, r37 /* virtual mode return addr */
putcon r36, spc
synco
rte
nop
.poke0: /* come here in real mode, don't touch caches!!
still in critical section (sr.bl==1) */
putcon r0, ssr
putcon r37, spc
/* Here's the actual poke. If the address is bad, all bets are now off
* what will happen (handlers invoked in real-mode = bad news) */
st.q r2, 0, r3
synco
rte /* Back to virtual mode */
nop
1:
ptabs LINK, tr0
blink tr0, r63
#ifdef CONFIG_MMU
/*
* --- User Access Handling Section
*/
/*
* User Access support. It all moved to non inlined Assembler
* functions in here.
*
* __kernel_size_t __copy_user(void *__to, const void *__from,
* __kernel_size_t __n)
*
* Inputs:
* (r2) target address
* (r3) source address
* (r4) size in bytes
*
* Ouputs:
* (*r2) target data
* (r2) non-copied bytes
*
* If a fault occurs on the user pointer, bail out early and return the
* number of bytes not copied in r2.
* Strategy : for large blocks, call a real memcpy function which can
* move >1 byte at a time using unaligned ld/st instructions, and can
* manipulate the cache using prefetch + alloco to improve the speed
* further. If a fault occurs in that function, just revert to the
* byte-by-byte approach used for small blocks; this is rare so the
* performance hit for that case does not matter.
*
* For small blocks it's not worth the overhead of setting up and calling
* the memcpy routine; do the copy a byte at a time.
*
*/
.global __copy_user
__copy_user:
pta __copy_user_byte_by_byte, tr1
movi 16, r0 ! this value is a best guess, should tune it by benchmarking
bge/u r0, r4, tr1
pta copy_user_memcpy, tr0
addi SP, -32, SP
/* Save arguments in case we have to fix-up unhandled page fault */
st.q SP, 0, r2
st.q SP, 8, r3
st.q SP, 16, r4
st.q SP, 24, r35 ! r35 is callee-save
/* Save LINK in a register to reduce RTS time later (otherwise
ld SP,*,LINK;ptabs LINK;trn;blink trn,r63 becomes a critical path) */
ori LINK, 0, r35
blink tr0, LINK
/* Copy completed normally if we get back here */
ptabs r35, tr0
ld.q SP, 24, r35
/* don't restore r2-r4, pointless */
/* set result=r2 to zero as the copy must have succeeded. */
or r63, r63, r2
addi SP, 32, SP
blink tr0, r63 ! RTS
.global __copy_user_fixup
__copy_user_fixup:
/* Restore stack frame */
ori r35, 0, LINK
ld.q SP, 24, r35
ld.q SP, 16, r4
ld.q SP, 8, r3
ld.q SP, 0, r2
addi SP, 32, SP
/* Fall through to original code, in the 'same' state we entered with */
/* The slow byte-by-byte method is used if the fast copy traps due to a bad
user address. In that rare case, the speed drop can be tolerated. */
__copy_user_byte_by_byte:
pta ___copy_user_exit, tr1
pta ___copy_user1, tr0
beq/u r4, r63, tr1 /* early exit for zero length copy */
sub r2, r3, r0
addi r0, -1, r0
___copy_user1:
ld.b r3, 0, r5 /* Fault address 1 */
/* Could rewrite this to use just 1 add, but the second comes 'free'
due to load latency */
addi r3, 1, r3
addi r4, -1, r4 /* No real fixup required */
___copy_user2:
stx.b r3, r0, r5 /* Fault address 2 */
bne r4, ZERO, tr0
___copy_user_exit:
or r4, ZERO, r2
ptabs LINK, tr0
blink tr0, ZERO
/*
* __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
*
* Inputs:
* (r2) target address
* (r3) size in bytes
*
* Ouputs:
* (*r2) zero-ed target data
* (r2) non-zero-ed bytes
*/
.global __clear_user
__clear_user:
pta ___clear_user_exit, tr1
pta ___clear_user1, tr0
beq/u r3, r63, tr1
___clear_user1:
st.b r2, 0, ZERO /* Fault address */
addi r2, 1, r2
addi r3, -1, r3 /* No real fixup required */
bne r3, ZERO, tr0
___clear_user_exit:
or r3, ZERO, r2
ptabs LINK, tr0
blink tr0, ZERO
#endif /* CONFIG_MMU */
/*
* extern long __get_user_asm_?(void *val, long addr)
*
* Inputs:
* (r2) dest address
* (r3) source address (in User Space)
*
* Ouputs:
* (r2) -EFAULT (faulting)
* 0 (not faulting)
*/
.global __get_user_asm_b
__get_user_asm_b:
or r2, ZERO, r4
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___get_user_asm_b1:
ld.b r3, 0, r5 /* r5 = data */
st.b r4, 0, r5
or ZERO, ZERO, r2
___get_user_asm_b_exit:
ptabs LINK, tr0
blink tr0, ZERO
.global __get_user_asm_w
__get_user_asm_w:
or r2, ZERO, r4
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___get_user_asm_w1:
ld.w r3, 0, r5 /* r5 = data */
st.w r4, 0, r5
or ZERO, ZERO, r2
___get_user_asm_w_exit:
ptabs LINK, tr0
blink tr0, ZERO
.global __get_user_asm_l
__get_user_asm_l:
or r2, ZERO, r4
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___get_user_asm_l1:
ld.l r3, 0, r5 /* r5 = data */
st.l r4, 0, r5
or ZERO, ZERO, r2
___get_user_asm_l_exit:
ptabs LINK, tr0
blink tr0, ZERO
.global __get_user_asm_q
__get_user_asm_q:
or r2, ZERO, r4
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___get_user_asm_q1:
ld.q r3, 0, r5 /* r5 = data */
st.q r4, 0, r5
or ZERO, ZERO, r2
___get_user_asm_q_exit:
ptabs LINK, tr0
blink tr0, ZERO
/*
* extern long __put_user_asm_?(void *pval, long addr)
*
* Inputs:
* (r2) kernel pointer to value
* (r3) dest address (in User Space)
*
* Ouputs:
* (r2) -EFAULT (faulting)
* 0 (not faulting)
*/
.global __put_user_asm_b
__put_user_asm_b:
ld.b r2, 0, r4 /* r4 = data */
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___put_user_asm_b1:
st.b r3, 0, r4
or ZERO, ZERO, r2
___put_user_asm_b_exit:
ptabs LINK, tr0
blink tr0, ZERO
.global __put_user_asm_w
__put_user_asm_w:
ld.w r2, 0, r4 /* r4 = data */
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___put_user_asm_w1:
st.w r3, 0, r4
or ZERO, ZERO, r2
___put_user_asm_w_exit:
ptabs LINK, tr0
blink tr0, ZERO
.global __put_user_asm_l
__put_user_asm_l:
ld.l r2, 0, r4 /* r4 = data */
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___put_user_asm_l1:
st.l r3, 0, r4
or ZERO, ZERO, r2
___put_user_asm_l_exit:
ptabs LINK, tr0
blink tr0, ZERO
.global __put_user_asm_q
__put_user_asm_q:
ld.q r2, 0, r4 /* r4 = data */
movi -(EFAULT), r2 /* r2 = reply, no real fixup */
___put_user_asm_q1:
st.q r3, 0, r4
or ZERO, ZERO, r2
___put_user_asm_q_exit:
ptabs LINK, tr0
blink tr0, ZERO
panic_stash_regs:
/* The idea is : when we get an unhandled panic, we dump the registers
to a known memory location, the just sit in a tight loop.
This allows the human to look at the memory region through the GDB
session (assuming the debug module's SHwy initiator isn't locked up
or anything), to hopefully analyze the cause of the panic. */
/* On entry, former r15 (SP) is in DCR
former r0 is at resvec_saved_area + 0
former r1 is at resvec_saved_area + 8
former tr0 is at resvec_saved_area + 32
DCR is the only register whose value is lost altogether.
*/
movi 0xffffffff80000000, r0 ! phy of dump area
ld.q SP, 0x000, r1 ! former r0
st.q r0, 0x000, r1
ld.q SP, 0x008, r1 ! former r1
st.q r0, 0x008, r1
st.q r0, 0x010, r2
st.q r0, 0x018, r3
st.q r0, 0x020, r4
st.q r0, 0x028, r5
st.q r0, 0x030, r6
st.q r0, 0x038, r7
st.q r0, 0x040, r8
st.q r0, 0x048, r9
st.q r0, 0x050, r10
st.q r0, 0x058, r11
st.q r0, 0x060, r12
st.q r0, 0x068, r13
st.q r0, 0x070, r14
getcon dcr, r14
st.q r0, 0x078, r14
st.q r0, 0x080, r16
st.q r0, 0x088, r17
st.q r0, 0x090, r18
st.q r0, 0x098, r19
st.q r0, 0x0a0, r20
st.q r0, 0x0a8, r21
st.q r0, 0x0b0, r22
st.q r0, 0x0b8, r23
st.q r0, 0x0c0, r24
st.q r0, 0x0c8, r25
st.q r0, 0x0d0, r26
st.q r0, 0x0d8, r27
st.q r0, 0x0e0, r28
st.q r0, 0x0e8, r29
st.q r0, 0x0f0, r30
st.q r0, 0x0f8, r31
st.q r0, 0x100, r32
st.q r0, 0x108, r33
st.q r0, 0x110, r34
st.q r0, 0x118, r35
st.q r0, 0x120, r36
st.q r0, 0x128, r37
st.q r0, 0x130, r38
st.q r0, 0x138, r39
st.q r0, 0x140, r40
st.q r0, 0x148, r41
st.q r0, 0x150, r42
st.q r0, 0x158, r43
st.q r0, 0x160, r44
st.q r0, 0x168, r45
st.q r0, 0x170, r46
st.q r0, 0x178, r47
st.q r0, 0x180, r48
st.q r0, 0x188, r49
st.q r0, 0x190, r50
st.q r0, 0x198, r51
st.q r0, 0x1a0, r52
st.q r0, 0x1a8, r53
st.q r0, 0x1b0, r54
st.q r0, 0x1b8, r55
st.q r0, 0x1c0, r56
st.q r0, 0x1c8, r57
st.q r0, 0x1d0, r58
st.q r0, 0x1d8, r59
st.q r0, 0x1e0, r60
st.q r0, 0x1e8, r61
st.q r0, 0x1f0, r62
st.q r0, 0x1f8, r63 ! bogus, but for consistency's sake...
ld.q SP, 0x020, r1 ! former tr0
st.q r0, 0x200, r1
gettr tr1, r1
st.q r0, 0x208, r1
gettr tr2, r1
st.q r0, 0x210, r1
gettr tr3, r1
st.q r0, 0x218, r1
gettr tr4, r1
st.q r0, 0x220, r1
gettr tr5, r1
st.q r0, 0x228, r1
gettr tr6, r1
st.q r0, 0x230, r1
gettr tr7, r1
st.q r0, 0x238, r1
getcon sr, r1
getcon ssr, r2
getcon pssr, r3
getcon spc, r4
getcon pspc, r5
getcon intevt, r6
getcon expevt, r7
getcon pexpevt, r8
getcon tra, r9
getcon tea, r10
getcon kcr0, r11
getcon kcr1, r12
getcon vbr, r13
getcon resvec, r14
st.q r0, 0x240, r1
st.q r0, 0x248, r2
st.q r0, 0x250, r3
st.q r0, 0x258, r4
st.q r0, 0x260, r5
st.q r0, 0x268, r6
st.q r0, 0x270, r7
st.q r0, 0x278, r8
st.q r0, 0x280, r9
st.q r0, 0x288, r10
st.q r0, 0x290, r11
st.q r0, 0x298, r12
st.q r0, 0x2a0, r13
st.q r0, 0x2a8, r14
getcon SPC,r2
getcon SSR,r3
getcon EXPEVT,r4
/* Prepare to jump to C - physical address */
movi panic_handler-CONFIG_PAGE_OFFSET, r1
ori r1, 1, r1
ptabs r1, tr0
getcon DCR, SP
blink tr0, ZERO
nop
nop
nop
nop
/*
* --- Signal Handling Section
*/
/*
* extern long long _sa_default_rt_restorer
* extern long long _sa_default_restorer
*
* or, better,
*
* extern void _sa_default_rt_restorer(void)
* extern void _sa_default_restorer(void)
*
* Code prototypes to do a sys_rt_sigreturn() or sys_sysreturn()
* from user space. Copied into user space by signal management.
* Both must be quad aligned and 2 quad long (4 instructions).
*
*/
.balign 8
.global sa_default_rt_restorer
sa_default_rt_restorer:
movi 0x10, r9
shori __NR_rt_sigreturn, r9
trapa r9
nop
.balign 8
.global sa_default_restorer
sa_default_restorer:
movi 0x10, r9
shori __NR_sigreturn, r9
trapa r9
nop
/*
* --- __ex_table Section
*/
/*
* User Access Exception Table.
*/
.section __ex_table, "a"
.global asm_uaccess_start /* Just a marker */
asm_uaccess_start:
#ifdef CONFIG_MMU
.long ___copy_user1, ___copy_user_exit
.long ___copy_user2, ___copy_user_exit
.long ___clear_user1, ___clear_user_exit
#endif
.long ___get_user_asm_b1, ___get_user_asm_b_exit
.long ___get_user_asm_w1, ___get_user_asm_w_exit
.long ___get_user_asm_l1, ___get_user_asm_l_exit
.long ___get_user_asm_q1, ___get_user_asm_q_exit
.long ___put_user_asm_b1, ___put_user_asm_b_exit
.long ___put_user_asm_w1, ___put_user_asm_w_exit
.long ___put_user_asm_l1, ___put_user_asm_l_exit
.long ___put_user_asm_q1, ___put_user_asm_q_exit
.global asm_uaccess_end /* Just a marker */
asm_uaccess_end:
/*
* --- .init.text Section
*/
__INIT
/*
* void trap_init (void)
*
*/
.global trap_init
trap_init:
addi SP, -24, SP /* Room to save r28/r29/r30 */
st.q SP, 0, r28
st.q SP, 8, r29
st.q SP, 16, r30
/* Set VBR and RESVEC */
movi LVBR_block, r19
andi r19, -4, r19 /* reset MMUOFF + reserved */
/* For RESVEC exceptions we force the MMU off, which means we need the
physical address. */
movi LRESVEC_block-CONFIG_PAGE_OFFSET, r20
andi r20, -4, r20 /* reset reserved */
ori r20, 1, r20 /* set MMUOFF */
putcon r19, VBR
putcon r20, RESVEC
/* Sanity check */
movi LVBR_block_end, r21
andi r21, -4, r21
movi BLOCK_SIZE, r29 /* r29 = expected size */
or r19, ZERO, r30
add r19, r29, r19
/*
* Ugly, but better loop forever now than crash afterwards.
* We should print a message, but if we touch LVBR or
* LRESVEC blocks we should not be surprised if we get stuck
* in trap_init().
*/
pta trap_init_loop, tr1
gettr tr1, r28 /* r28 = trap_init_loop */
sub r21, r30, r30 /* r30 = actual size */
/*
* VBR/RESVEC handlers overlap by being bigger than
* allowed. Very bad. Just loop forever.
* (r28) panic/loop address
* (r29) expected size
* (r30) actual size
*/
trap_init_loop:
bne r19, r21, tr1
/* Now that exception vectors are set up reset SR.BL */
getcon SR, r22
movi SR_UNBLOCK_EXC, r23
and r22, r23, r22
putcon r22, SR
addi SP, 24, SP
ptabs LINK, tr0
blink tr0, ZERO
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh5/fpu.c
*
* Copyright (C) 2001 Manuela Cirronis, Paolo Alberelli
* Copyright (C) 2002 STMicroelectronics Limited
* Author : Stuart Menefy
*
* Started from SH4 version:
* Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
*/
#include <linux/sched.h>
#include <linux/signal.h>
#include <asm/processor.h>
void save_fpu(struct task_struct *tsk)
{
asm volatile("fst.p %0, (0*8), fp0\n\t"
"fst.p %0, (1*8), fp2\n\t"
"fst.p %0, (2*8), fp4\n\t"
"fst.p %0, (3*8), fp6\n\t"
"fst.p %0, (4*8), fp8\n\t"
"fst.p %0, (5*8), fp10\n\t"
"fst.p %0, (6*8), fp12\n\t"
"fst.p %0, (7*8), fp14\n\t"
"fst.p %0, (8*8), fp16\n\t"
"fst.p %0, (9*8), fp18\n\t"
"fst.p %0, (10*8), fp20\n\t"
"fst.p %0, (11*8), fp22\n\t"
"fst.p %0, (12*8), fp24\n\t"
"fst.p %0, (13*8), fp26\n\t"
"fst.p %0, (14*8), fp28\n\t"
"fst.p %0, (15*8), fp30\n\t"
"fst.p %0, (16*8), fp32\n\t"
"fst.p %0, (17*8), fp34\n\t"
"fst.p %0, (18*8), fp36\n\t"
"fst.p %0, (19*8), fp38\n\t"
"fst.p %0, (20*8), fp40\n\t"
"fst.p %0, (21*8), fp42\n\t"
"fst.p %0, (22*8), fp44\n\t"
"fst.p %0, (23*8), fp46\n\t"
"fst.p %0, (24*8), fp48\n\t"
"fst.p %0, (25*8), fp50\n\t"
"fst.p %0, (26*8), fp52\n\t"
"fst.p %0, (27*8), fp54\n\t"
"fst.p %0, (28*8), fp56\n\t"
"fst.p %0, (29*8), fp58\n\t"
"fst.p %0, (30*8), fp60\n\t"
"fst.p %0, (31*8), fp62\n\t"
"fgetscr fr63\n\t"
"fst.s %0, (32*8), fr63\n\t"
: /* no output */
: "r" (&tsk->thread.xstate->hardfpu)
: "memory");
}
void restore_fpu(struct task_struct *tsk)
{
asm volatile("fld.p %0, (0*8), fp0\n\t"
"fld.p %0, (1*8), fp2\n\t"
"fld.p %0, (2*8), fp4\n\t"
"fld.p %0, (3*8), fp6\n\t"
"fld.p %0, (4*8), fp8\n\t"
"fld.p %0, (5*8), fp10\n\t"
"fld.p %0, (6*8), fp12\n\t"
"fld.p %0, (7*8), fp14\n\t"
"fld.p %0, (8*8), fp16\n\t"
"fld.p %0, (9*8), fp18\n\t"
"fld.p %0, (10*8), fp20\n\t"
"fld.p %0, (11*8), fp22\n\t"
"fld.p %0, (12*8), fp24\n\t"
"fld.p %0, (13*8), fp26\n\t"
"fld.p %0, (14*8), fp28\n\t"
"fld.p %0, (15*8), fp30\n\t"
"fld.p %0, (16*8), fp32\n\t"
"fld.p %0, (17*8), fp34\n\t"
"fld.p %0, (18*8), fp36\n\t"
"fld.p %0, (19*8), fp38\n\t"
"fld.p %0, (20*8), fp40\n\t"
"fld.p %0, (21*8), fp42\n\t"
"fld.p %0, (22*8), fp44\n\t"
"fld.p %0, (23*8), fp46\n\t"
"fld.p %0, (24*8), fp48\n\t"
"fld.p %0, (25*8), fp50\n\t"
"fld.p %0, (26*8), fp52\n\t"
"fld.p %0, (27*8), fp54\n\t"
"fld.p %0, (28*8), fp56\n\t"
"fld.p %0, (29*8), fp58\n\t"
"fld.p %0, (30*8), fp60\n\t"
"fld.s %0, (32*8), fr63\n\t"
"fputscr fr63\n\t"
"fld.p %0, (31*8), fp62\n\t"
: /* no output */
: "r" (&tsk->thread.xstate->hardfpu)
: "memory");
}
asmlinkage void do_fpu_error(unsigned long ex, struct pt_regs *regs)
{
regs->pc += 4;
force_sig(SIGFPE);
}
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh5/probe.c
*
* CPU Subtype Probing for SH-5.
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 - 2007 Paul Mundt
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/string.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/tlb.h>
void cpu_probe(void)
{
unsigned long long cir;
/*
* Do peeks in real mode to avoid having to set up a mapping for
* the WPC registers. On SH5-101 cut2, such a mapping would be
* exposed to an address translation erratum which would make it
* hard to set up correctly.
*/
cir = peek_real_address_q(0x0d000008);
if ((cir & 0xffff) == 0x5103)
boot_cpu_data.type = CPU_SH5_103;
else if (((cir >> 32) & 0xffff) == 0x51e2)
/* CPU.VCR aliased at CIR address on SH5-101 */
boot_cpu_data.type = CPU_SH5_101;
boot_cpu_data.family = CPU_FAMILY_SH5;
/*
* First, setup some sane values for the I-cache.
*/
boot_cpu_data.icache.ways = 4;
boot_cpu_data.icache.sets = 256;
boot_cpu_data.icache.linesz = L1_CACHE_BYTES;
boot_cpu_data.icache.way_incr = (1 << 13);
boot_cpu_data.icache.entry_shift = 5;
boot_cpu_data.icache.way_size = boot_cpu_data.icache.sets *
boot_cpu_data.icache.linesz;
boot_cpu_data.icache.entry_mask = 0x1fe0;
boot_cpu_data.icache.flags = 0;
/*
* Next, setup some sane values for the D-cache.
*
* On the SH5, these are pretty consistent with the I-cache settings,
* so we just copy over the existing definitions.. these can be fixed
* up later, especially if we add runtime CPU probing.
*
* Though in the meantime it saves us from having to duplicate all of
* the above definitions..
*/
boot_cpu_data.dcache = boot_cpu_data.icache;
/*
* Setup any cache-related flags here
*/
#if defined(CONFIG_CACHE_WRITETHROUGH)
set_bit(SH_CACHE_MODE_WT, &(boot_cpu_data.dcache.flags));
#elif defined(CONFIG_CACHE_WRITEBACK)
set_bit(SH_CACHE_MODE_WB, &(boot_cpu_data.dcache.flags));
#endif
/* Setup some I/D TLB defaults */
sh64_tlb_init();
}
// SPDX-License-Identifier: GPL-2.0
/*
* SH5-101/SH5-103 CPU Setup
*
* Copyright (C) 2009 Paul Mundt
*/
#include <linux/platform_device.h>
#include <linux/init.h>
#include <linux/serial.h>
#include <linux/serial_sci.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/sh_timer.h>
#include <asm/addrspace.h>
#include <asm/platform_early.h>
static struct plat_sci_port scif0_platform_data = {
.flags = UPF_IOREMAP,
.scscr = SCSCR_REIE,
.type = PORT_SCIF,
};
static struct resource scif0_resources[] = {
DEFINE_RES_MEM(PHYS_PERIPHERAL_BLOCK + 0x01030000, 0x100),
DEFINE_RES_IRQ(39),
DEFINE_RES_IRQ(40),
DEFINE_RES_IRQ(42),
};
static struct platform_device scif0_device = {
.name = "sh-sci",
.id = 0,
.resource = scif0_resources,
.num_resources = ARRAY_SIZE(scif0_resources),
.dev = {
.platform_data = &scif0_platform_data,
},
};
static struct resource rtc_resources[] = {
[0] = {
.start = PHYS_PERIPHERAL_BLOCK + 0x01040000,
.end = PHYS_PERIPHERAL_BLOCK + 0x01040000 + 0x58 - 1,
.flags = IORESOURCE_IO,
},
[1] = {
/* Period IRQ */
.start = IRQ_PRI,
.flags = IORESOURCE_IRQ,
},
[2] = {
/* Carry IRQ */
.start = IRQ_CUI,
.flags = IORESOURCE_IRQ,
},
[3] = {
/* Alarm IRQ */
.start = IRQ_ATI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device rtc_device = {
.name = "sh-rtc",
.id = -1,
.num_resources = ARRAY_SIZE(rtc_resources),
.resource = rtc_resources,
};
#define TMU_BLOCK_OFF 0x01020000
#define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF
static struct sh_timer_config tmu0_platform_data = {
.channels_mask = 7,
};
static struct resource tmu0_resources[] = {
DEFINE_RES_MEM(TMU_BASE, 0x30),
DEFINE_RES_IRQ(IRQ_TUNI0),
DEFINE_RES_IRQ(IRQ_TUNI1),
DEFINE_RES_IRQ(IRQ_TUNI2),
};
static struct platform_device tmu0_device = {
.name = "sh-tmu",
.id = 0,
.dev = {
.platform_data = &tmu0_platform_data,
},
.resource = tmu0_resources,
.num_resources = ARRAY_SIZE(tmu0_resources),
};
static struct platform_device *sh5_early_devices[] __initdata = {
&scif0_device,
&tmu0_device,
};
static struct platform_device *sh5_devices[] __initdata = {
&rtc_device,
};
static int __init sh5_devices_setup(void)
{
int ret;
ret = platform_add_devices(sh5_early_devices,
ARRAY_SIZE(sh5_early_devices));
if (unlikely(ret != 0))
return ret;
return platform_add_devices(sh5_devices,
ARRAY_SIZE(sh5_devices));
}
arch_initcall(sh5_devices_setup);
void __init plat_early_device_setup(void)
{
sh_early_platform_add_devices(sh5_early_devices,
ARRAY_SIZE(sh5_early_devices));
}
/* SPDX-License-Identifier: GPL-2.0
*
* arch/sh/kernel/cpu/sh5/switchto.S
*
* sh64 context switch
*
* Copyright (C) 2004 Richard Curnow
*/
.section .text..SHmedia32,"ax"
.little
.balign 32
.type sh64_switch_to,@function
.global sh64_switch_to
.global __sh64_switch_to_end
sh64_switch_to:
/* Incoming args
r2 - prev
r3 - &prev->thread
r4 - next
r5 - &next->thread
Outgoing results
r2 - last (=prev) : this just stays in r2 throughout
Want to create a full (struct pt_regs) on the stack to allow backtracing
functions to work. However, we only need to populate the callee-save
register slots in this structure; since we're a function our ancestors must
have themselves preserved all caller saved state in the stack. This saves
some wasted effort since we won't need to look at the values.
In particular, all caller-save registers are immediately available for
scratch use.
*/
#define FRAME_SIZE (76*8 + 8)
movi FRAME_SIZE, r0
sub.l r15, r0, r15
! Do normal-style register save to support backtrace
st.l r15, 0, r18 ! save link reg
st.l r15, 4, r14 ! save fp
add.l r15, r63, r14 ! setup frame pointer
! hopefully this looks normal to the backtrace now.
addi.l r15, 8, r1 ! base of pt_regs
addi.l r1, 24, r0 ! base of pt_regs.regs
addi.l r0, (63*8), r8 ! base of pt_regs.trregs
/* Note : to be fixed?
struct pt_regs is really designed for holding the state on entry
to an exception, i.e. pc,sr,regs etc. However, for the context
switch state, some of this is not required. But the unwinder takes
struct pt_regs * as an arg so we have to build this structure
to allow unwinding switched tasks in show_state() */
st.q r0, ( 9*8), r9
st.q r0, (10*8), r10
st.q r0, (11*8), r11
st.q r0, (12*8), r12
st.q r0, (13*8), r13
st.q r0, (14*8), r14 ! for unwind, want to look as though we took a trap at
! the point where the process is left in suspended animation, i.e. current
! fp here, not the saved one.
st.q r0, (16*8), r16
st.q r0, (24*8), r24
st.q r0, (25*8), r25
st.q r0, (26*8), r26
st.q r0, (27*8), r27
st.q r0, (28*8), r28
st.q r0, (29*8), r29
st.q r0, (30*8), r30
st.q r0, (31*8), r31
st.q r0, (32*8), r32
st.q r0, (33*8), r33
st.q r0, (34*8), r34
st.q r0, (35*8), r35
st.q r0, (44*8), r44
st.q r0, (45*8), r45
st.q r0, (46*8), r46
st.q r0, (47*8), r47
st.q r0, (48*8), r48
st.q r0, (49*8), r49
st.q r0, (50*8), r50
st.q r0, (51*8), r51
st.q r0, (52*8), r52
st.q r0, (53*8), r53
st.q r0, (54*8), r54
st.q r0, (55*8), r55
st.q r0, (56*8), r56
st.q r0, (57*8), r57
st.q r0, (58*8), r58
st.q r0, (59*8), r59
! do this early as pta->gettr has no pipeline forwarding (=> 5 cycle latency)
! Use a local label to avoid creating a symbol that will confuse the !
! backtrace
pta .Lsave_pc, tr0
gettr tr5, r45
gettr tr6, r46
gettr tr7, r47
st.q r8, (5*8), r45
st.q r8, (6*8), r46
st.q r8, (7*8), r47
! Now switch context
gettr tr0, r9
st.l r3, 0, r15 ! prev->thread.sp
st.l r3, 8, r1 ! prev->thread.kregs
st.l r3, 4, r9 ! prev->thread.pc
st.q r1, 0, r9 ! save prev->thread.pc into pt_regs->pc
! Load PC for next task (init value or save_pc later)
ld.l r5, 4, r18 ! next->thread.pc
! Switch stacks
ld.l r5, 0, r15 ! next->thread.sp
ptabs r18, tr0
! Update current
ld.l r4, 4, r9 ! next->thread_info (2nd element of next task_struct)
putcon r9, kcr0 ! current = next->thread_info
! go to save_pc for a reschedule, or the initial thread.pc for a new process
blink tr0, r63
! Restore (when we come back to a previously saved task)
.Lsave_pc:
addi.l r15, 32, r0 ! r0 = next's regs
addi.l r0, (63*8), r8 ! r8 = next's tr_regs
ld.q r8, (5*8), r45
ld.q r8, (6*8), r46
ld.q r8, (7*8), r47
ptabs r45, tr5
ptabs r46, tr6
ptabs r47, tr7
ld.q r0, ( 9*8), r9
ld.q r0, (10*8), r10
ld.q r0, (11*8), r11
ld.q r0, (12*8), r12
ld.q r0, (13*8), r13
ld.q r0, (14*8), r14
ld.q r0, (16*8), r16
ld.q r0, (24*8), r24
ld.q r0, (25*8), r25
ld.q r0, (26*8), r26
ld.q r0, (27*8), r27
ld.q r0, (28*8), r28
ld.q r0, (29*8), r29
ld.q r0, (30*8), r30
ld.q r0, (31*8), r31
ld.q r0, (32*8), r32
ld.q r0, (33*8), r33
ld.q r0, (34*8), r34
ld.q r0, (35*8), r35
ld.q r0, (44*8), r44
ld.q r0, (45*8), r45
ld.q r0, (46*8), r46
ld.q r0, (47*8), r47
ld.q r0, (48*8), r48
ld.q r0, (49*8), r49
ld.q r0, (50*8), r50
ld.q r0, (51*8), r51
ld.q r0, (52*8), r52
ld.q r0, (53*8), r53
ld.q r0, (54*8), r54
ld.q r0, (55*8), r55
ld.q r0, (56*8), r56
ld.q r0, (57*8), r57
ld.q r0, (58*8), r58
ld.q r0, (59*8), r59
! epilogue
ld.l r15, 0, r18
ld.l r15, 4, r14
ptabs r18, tr0
movi FRAME_SIZE, r0
add r15, r0, r15
blink tr0, r63
__sh64_switch_to_end:
.LFE1:
.size sh64_switch_to,.LFE1-sh64_switch_to
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/cpu/sh5/unwind.c
*
* Copyright (C) 2004 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*/
#include <linux/kallsyms.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
#include <asm/io.h>
#include <asm/unwinder.h>
#include <asm/stacktrace.h>
static u8 regcache[63];
/*
* Finding the previous stack frame isn't horribly straightforward as it is
* on some other platforms. In the sh64 case, we don't have "linked" stack
* frames, so we need to do a bit of work to determine the previous frame,
* and in turn, the previous r14/r18 pair.
*
* There are generally a few cases which determine where we can find out
* the r14/r18 values. In the general case, this can be determined by poking
* around the prologue of the symbol PC is in (note that we absolutely must
* have frame pointer support as well as the kernel symbol table mapped,
* otherwise we can't even get this far).
*
* In other cases, such as the interrupt/exception path, we can poke around
* the sp/fp.
*
* Notably, this entire approach is somewhat error prone, and in the event
* that the previous frame cannot be determined, that's all we can do.
* Either way, this still leaves us with a more correct backtrace then what
* we would be able to come up with by walking the stack (which is garbage
* for anything beyond the first frame).
* -- PFM.
*/
static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
unsigned long *pprev_fp, unsigned long *pprev_pc,
struct pt_regs *regs)
{
const char *sym;
char namebuf[128];
unsigned long offset;
unsigned long prologue = 0;
unsigned long fp_displacement = 0;
unsigned long fp_prev = 0;
unsigned long offset_r14 = 0, offset_r18 = 0;
int i, found_prologue_end = 0;
sym = kallsyms_lookup(pc, NULL, &offset, NULL, namebuf);
if (!sym)
return -EINVAL;
prologue = pc - offset;
if (!prologue)
return -EINVAL;
/* Validate fp, to avoid risk of dereferencing a bad pointer later.
Assume 128Mb since that's the amount of RAM on a Cayman. Modify
when there is an SH-5 board with more. */
if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) ||
(fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) ||
((fp & 7) != 0)) {
return -EINVAL;
}
/*
* Depth to walk, depth is completely arbitrary.
*/
for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) {
unsigned long op;
u8 major, minor;
u8 src, dest, disp;
op = *(unsigned long *)prologue;
major = (op >> 26) & 0x3f;
src = (op >> 20) & 0x3f;
minor = (op >> 16) & 0xf;
disp = (op >> 10) & 0x3f;
dest = (op >> 4) & 0x3f;
/*
* Stack frame creation happens in a number of ways.. in the
* general case when the stack frame is less than 511 bytes,
* it's generally created by an addi or addi.l:
*
* addi/addi.l r15, -FRAME_SIZE, r15
*
* in the event that the frame size is bigger than this, it's
* typically created using a movi/sub pair as follows:
*
* movi FRAME_SIZE, rX
* sub r15, rX, r15
*/
switch (major) {
case (0x00 >> 2):
switch (minor) {
case 0x8: /* add.l */
case 0x9: /* add */
/* Look for r15, r63, r14 */
if (src == 15 && disp == 63 && dest == 14)
found_prologue_end = 1;
break;
case 0xa: /* sub.l */
case 0xb: /* sub */
if (src != 15 || dest != 15)
continue;
fp_displacement -= regcache[disp];
fp_prev = fp - fp_displacement;
break;
}
break;
case (0xa8 >> 2): /* st.l */
if (src != 15)
continue;
switch (dest) {
case 14:
if (offset_r14 || fp_displacement == 0)
continue;
offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
offset_r14 *= sizeof(unsigned long);
offset_r14 += fp_displacement;
break;
case 18:
if (offset_r18 || fp_displacement == 0)
continue;
offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
offset_r18 *= sizeof(unsigned long);
offset_r18 += fp_displacement;
break;
}
break;
case (0xcc >> 2): /* movi */
if (dest >= 63) {
printk(KERN_NOTICE "%s: Invalid dest reg %d "
"specified in movi handler. Failed "
"opcode was 0x%lx: ", __func__,
dest, op);
continue;
}
/* Sign extend */
regcache[dest] =
sign_extend64((((u64)op >> 10) & 0xffff), 9);
break;
case (0xd0 >> 2): /* addi */
case (0xd4 >> 2): /* addi.l */
/* Look for r15, -FRAME_SIZE, r15 */
if (src != 15 || dest != 15)
continue;
/* Sign extended frame size.. */
fp_displacement +=
(u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
fp_prev = fp - fp_displacement;
break;
}
if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev)
break;
}
if (offset_r14 == 0 || fp_prev == 0) {
if (!offset_r14)
pr_debug("Unable to find r14 offset\n");
if (!fp_prev)
pr_debug("Unable to find previous fp\n");
return -EINVAL;
}
/* For innermost leaf function, there might not be a offset_r18 */
if (!*pprev_pc && (offset_r18 == 0))
return -EINVAL;
*pprev_fp = *(unsigned long *)(fp_prev + offset_r14);
if (offset_r18)
*pprev_pc = *(unsigned long *)(fp_prev + offset_r18);
*pprev_pc &= ~1;
return 0;
}
/*
* Don't put this on the stack since we'll want to call in to
* sh64_unwinder_dump() when we're close to underflowing the stack
* anyway.
*/
static struct pt_regs here_regs;
extern const char syscall_ret;
extern const char ret_from_syscall;
extern const char ret_from_exception;
extern const char ret_from_irq;
static void sh64_unwind_inner(const struct stacktrace_ops *ops,
void *data, struct pt_regs *regs);
static inline void unwind_nested(const struct stacktrace_ops *ops, void *data,
unsigned long pc, unsigned long fp)
{
if ((fp >= __MEMORY_START) &&
((fp & 7) == 0))
sh64_unwind_inner(ops, data, (struct pt_regs *)fp);
}
static void sh64_unwind_inner(const struct stacktrace_ops *ops,
void *data, struct pt_regs *regs)
{
unsigned long pc, fp;
int ofs = 0;
int first_pass;
pc = regs->pc & ~1;
fp = regs->regs[14];
first_pass = 1;
for (;;) {
int cond;
unsigned long next_fp, next_pc;
if (pc == ((unsigned long)&syscall_ret & ~1)) {
printk("SYSCALL\n");
unwind_nested(ops, data, pc, fp);
return;
}
if (pc == ((unsigned long)&ret_from_syscall & ~1)) {
printk("SYSCALL (PREEMPTED)\n");
unwind_nested(ops, data, pc, fp);
return;
}
/* In this case, the PC is discovered by lookup_prev_stack_frame but
it has 4 taken off it to look like the 'caller' */
if (pc == ((unsigned long)&ret_from_exception & ~1)) {
printk("EXCEPTION\n");
unwind_nested(ops, data, pc, fp);
return;
}
if (pc == ((unsigned long)&ret_from_irq & ~1)) {
printk("IRQ\n");
unwind_nested(ops, data, pc, fp);
return;
}
cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) &&
((pc & 3) == 0) && ((fp & 7) == 0));
pc -= ofs;
ops->address(data, pc, 1);
if (first_pass) {
/* If the innermost frame is a leaf function, it's
* possible that r18 is never saved out to the stack.
*/
next_pc = regs->regs[18];
} else {
next_pc = 0;
}
if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) {
ofs = sizeof(unsigned long);
pc = next_pc & ~1;
fp = next_fp;
} else {
printk("Unable to lookup previous stack frame\n");
break;
}
first_pass = 0;
}
printk("\n");
}
static void sh64_unwinder_dump(struct task_struct *task,
struct pt_regs *regs,
unsigned long *sp,
const struct stacktrace_ops *ops,
void *data)
{
if (!regs) {
/*
* Fetch current regs if we have no other saved state to back
* trace from.
*/
regs = &here_regs;
__asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14]));
__asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15]));
__asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18]));
__asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0]));
__asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1]));
__asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2]));
__asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3]));
__asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4]));
__asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5]));
__asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6]));
__asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7]));
__asm__ __volatile__ (
"pta 0f, tr0\n\t"
"blink tr0, %0\n\t"
"0: nop"
: "=r" (regs->pc)
);
}
sh64_unwind_inner(ops, data, regs);
}
static struct unwinder sh64_unwinder = {
.name = "sh64-unwinder",
.dump = sh64_unwinder_dump,
.rating = 150,
};
static int __init sh64_unwinder_init(void)
{
return unwinder_register(&sh64_unwinder);
}
early_initcall(sh64_unwinder_init);
/* SPDX-License-Identifier: GPL-2.0
*
* arch/sh/kernel/head_64.S
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
*/
#include <linux/init.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/tlb.h>
#include <cpu/registers.h>
#include <cpu/mmu_context.h>
#include <asm/thread_info.h>
/*
* MMU defines: TLB boundaries.
*/
#define MMUIR_FIRST ITLB_FIXED
#define MMUIR_END ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP
#define MMUIR_STEP TLB_STEP
#define MMUDR_FIRST DTLB_FIXED
#define MMUDR_END DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP
#define MMUDR_STEP TLB_STEP
/* Safety check : CONFIG_PAGE_OFFSET has to be a multiple of 512Mb */
#if (CONFIG_PAGE_OFFSET & ((1UL<<29)-1))
#error "CONFIG_PAGE_OFFSET must be a multiple of 512Mb"
#endif
/*
* MMU defines: Fixed TLBs.
*/
/* Deal safely with the case where the base of RAM is not 512Mb aligned */
#define ALIGN_512M_MASK (0xffffffffe0000000)
#define ALIGNED_EFFECTIVE ((CONFIG_PAGE_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK)
#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK)
#define MMUIR_TEXT_H (0x0000000000000003 | ALIGNED_EFFECTIVE)
/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
#define MMUIR_TEXT_L (0x000000000000009a | ALIGNED_PHYSICAL)
/* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */
#define MMUDR_CACHED_H 0x0000000000000003 | ALIGNED_EFFECTIVE
/* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */
#define MMUDR_CACHED_L 0x000000000000015a | ALIGNED_PHYSICAL
/* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */
#ifdef CONFIG_CACHE_OFF
#define ICCR0_INIT_VAL ICCR0_OFF /* ICACHE off */
#else
#define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */
#endif
#define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */
#if defined (CONFIG_CACHE_OFF)
#define OCCR0_INIT_VAL OCCR0_OFF /* D-cache: off */
#elif defined (CONFIG_CACHE_WRITETHROUGH)
#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WT /* D-cache: on, */
/* WT, invalidate */
#elif defined (CONFIG_CACHE_WRITEBACK)
#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* D-cache: on, */
/* WB, invalidate */
#else
#error preprocessor flag CONFIG_CACHE_... not recognized!
#endif
#define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */
.section .empty_zero_page, "aw"
.global empty_zero_page
empty_zero_page:
.long 1 /* MOUNT_ROOT_RDONLY */
.long 0 /* RAMDISK_FLAGS */
.long 0x0200 /* ORIG_ROOT_DEV */
.long 1 /* LOADER_TYPE */
.long 0x00800000 /* INITRD_START */
.long 0x00800000 /* INITRD_SIZE */
.long 0
.text
.balign 4096,0,4096
.section .data, "aw"
.balign PAGE_SIZE
.section .data, "aw"
.balign PAGE_SIZE
.global mmu_pdtp_cache
mmu_pdtp_cache:
.space PAGE_SIZE, 0
.global fpu_in_use
fpu_in_use: .quad 0
__HEAD
.balign L1_CACHE_BYTES
/*
* Condition at the entry of __stext:
* . Reset state:
* . SR.FD = 1 (FPU disabled)
* . SR.BL = 1 (Exceptions disabled)
* . SR.MD = 1 (Privileged Mode)
* . SR.MMU = 0 (MMU Disabled)
* . SR.CD = 0 (CTC User Visible)
* . SR.IMASK = Undefined (Interrupt Mask)
*
* Operations supposed to be performed by __stext:
* . prevent speculative fetch onto device memory while MMU is off
* . reflect as much as possible SH5 ABI (r15, r26, r27, r18)
* . first, save CPU state and set it to something harmless
* . any CPU detection and/or endianness settings (?)
* . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD
* . set initial TLB entries for cached and uncached regions
* (no fine granularity paging)
* . set initial cache state
* . enable MMU and caches
* . set CPU to a consistent state
* . registers (including stack pointer and current/KCR0)
* . NOT expecting to set Exception handling nor VBR/RESVEC/DCR
* at this stage. This is all to later Linux initialization steps.
* . initialize FPU
* . clear BSS
* . jump into start_kernel()
* . be prepared to hopeless start_kernel() returns.
*
*/
.global _stext
_stext:
/*
* Prevent speculative fetch on device memory due to
* uninitialized target registers.
*/
ptabs/u ZERO, tr0
ptabs/u ZERO, tr1
ptabs/u ZERO, tr2
ptabs/u ZERO, tr3
ptabs/u ZERO, tr4
ptabs/u ZERO, tr5
ptabs/u ZERO, tr6
ptabs/u ZERO, tr7
synci
/*
* Read/Set CPU state. After this block:
* r29 = Initial SR
*/
getcon SR, r29
movi SR_HARMLESS, r20
putcon r20, SR
/*
* Initialize EMI/LMI. To Be Done.
*/
/*
* CPU detection and/or endianness settings (?). To Be Done.
* Pure PIC code here, please ! Just save state into r30.
* After this block:
* r30 = CPU type/Platform Endianness
*/
/*
* Set initial TLB entries for cached and uncached regions.
* Note: PTA/BLINK is PIC code, PTABS/BLINK isn't !
*/
/* Clear ITLBs */
pta clear_ITLB, tr1
movi MMUIR_FIRST, r21
movi MMUIR_END, r22
clear_ITLB:
putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */
addi r21, MMUIR_STEP, r21
bne r21, r22, tr1
/* Clear DTLBs */
pta clear_DTLB, tr1
movi MMUDR_FIRST, r21
movi MMUDR_END, r22
clear_DTLB:
putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */
addi r21, MMUDR_STEP, r21
bne r21, r22, tr1
/* Map one big (512Mb) page for ITLB */
movi MMUIR_FIRST, r21
movi MMUIR_TEXT_L, r22 /* PTEL first */
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */
movi MMUIR_TEXT_H, r22 /* PTEH last */
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */
/* Map one big CACHED (512Mb) page for DTLB */
movi MMUDR_FIRST, r21
movi MMUDR_CACHED_L, r22 /* PTEL first */
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */
movi MMUDR_CACHED_H, r22 /* PTEH last */
add.l r22, r63, r22 /* Sign extend */
putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */
/*
* Setup a DTLB translation for SCIF phys.
*/
addi r21, MMUDR_STEP, r21
movi 0x0a03, r22 /* SCIF phys */
shori 0x0148, r22
putcfg r21, 1, r22 /* PTEL first */
movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */
shori 0x0003, r22
putcfg r21, 0, r22 /* PTEH last */
/*
* Set cache behaviours.
*/
/* ICache */
movi ICCR_BASE, r21
movi ICCR0_INIT_VAL, r22
movi ICCR1_INIT_VAL, r23
putcfg r21, ICCR_REG0, r22
putcfg r21, ICCR_REG1, r23
/* OCache */
movi OCCR_BASE, r21
movi OCCR0_INIT_VAL, r22
movi OCCR1_INIT_VAL, r23
putcfg r21, OCCR_REG0, r22
putcfg r21, OCCR_REG1, r23
/*
* Enable Caches and MMU. Do the first non-PIC jump.
* Now head.S global variables, constants and externs
* can be used.
*/
getcon SR, r21
movi SR_ENABLE_MMU, r22
or r21, r22, r21
putcon r21, SSR
movi hyperspace, r22
ori r22, 1, r22 /* Make it SHmedia, not required but..*/
putcon r22, SPC
synco
rte /* And now go into the hyperspace ... */
hyperspace: /* ... that's the next instruction ! */
/*
* Set CPU to a consistent state.
* r31 = FPU support flag
* tr0/tr7 in use. Others give a chance to loop somewhere safe
*/
movi start_kernel, r32
ori r32, 1, r32
ptabs r32, tr0 /* r32 = _start_kernel address */
pta/u hopeless, tr1
pta/u hopeless, tr2
pta/u hopeless, tr3
pta/u hopeless, tr4
pta/u hopeless, tr5
pta/u hopeless, tr6
pta/u hopeless, tr7
gettr tr1, r28 /* r28 = hopeless address */
/* Set initial stack pointer */
movi init_thread_union, SP
putcon SP, KCR0 /* Set current to init_task */
movi THREAD_SIZE, r22 /* Point to the end */
add SP, r22, SP
/*
* Initialize FPU.
* Keep FPU flag in r31. After this block:
* r31 = FPU flag
*/
movi fpu_in_use, r31 /* Temporary */
#ifdef CONFIG_SH_FPU
getcon SR, r21
movi SR_ENABLE_FPU, r22
and r21, r22, r22
putcon r22, SR /* Try to enable */
getcon SR, r22
xor r21, r22, r21
shlri r21, 15, r21 /* Supposedly 0/1 */
st.q r31, 0 , r21 /* Set fpu_in_use */
#else
movi 0, r21
st.q r31, 0 , r21 /* Set fpu_in_use */
#endif
or r21, ZERO, r31 /* Set FPU flag at last */
#ifndef CONFIG_SH_NO_BSS_INIT
/* Don't clear BSS if running on slow platforms such as an RTL simulation,
remote memory via SHdebug link, etc. For these the memory can be guaranteed
to be all zero on boot anyway. */
/*
* Clear bss
*/
pta clear_quad, tr1
movi __bss_start, r22
movi _end, r23
clear_quad:
st.q r22, 0, ZERO
addi r22, 8, r22
bne r22, r23, tr1 /* Both quad aligned, see vmlinux.lds.S */
#endif
pta/u hopeless, tr1
/* Say bye to head.S but be prepared to wrongly get back ... */
blink tr0, LINK
/* If we ever get back here through LINK/tr1-tr7 */
pta/u hopeless, tr7
hopeless:
/*
* Something's badly wrong here. Loop endlessly,
* there's nothing more we can do about it.
*
* Note on hopeless: it can be jumped into invariably
* before or after jumping into hyperspace. The only
* requirement is to be PIC called (PTA) before and
* any way (PTA/PTABS) after. According to Virtual
* to Physical mapping a simulator/emulator can easily
* tell where we came here from just looking at hopeless
* (PC) address.
*
* For debugging purposes:
* (r28) hopeless/loop address
* (r29) Original SR
* (r30) CPU type/Platform endianness
* (r31) FPU Support
* (r32) _start_kernel address
*/
blink tr7, ZERO
// SPDX-License-Identifier: GPL-2.0
/*
* SHmedia irqflags support
*
* Copyright (C) 2006 - 2009 Paul Mundt
*/
#include <linux/irqflags.h>
#include <linux/module.h>
#include <cpu/registers.h>
void notrace arch_local_irq_restore(unsigned long flags)
{
unsigned long long __dummy;
if (flags == ARCH_IRQ_DISABLED) {
__asm__ __volatile__ (
"getcon " __SR ", %0\n\t"
"or %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy)
: "r" (ARCH_IRQ_DISABLED)
);
} else {
__asm__ __volatile__ (
"getcon " __SR ", %0\n\t"
"and %0, %1, %0\n\t"
"putcon %0, " __SR "\n\t"
: "=&r" (__dummy)
: "r" (~ARCH_IRQ_DISABLED)
);
}
}
EXPORT_SYMBOL(arch_local_irq_restore);
unsigned long notrace arch_local_save_flags(void)
{
unsigned long flags;
__asm__ __volatile__ (
"getcon " __SR ", %0\n\t"
"and %0, %1, %0"
: "=&r" (flags)
: "r" (ARCH_IRQ_DISABLED)
);
return flags;
}
EXPORT_SYMBOL(arch_local_save_flags);
......@@ -46,15 +46,6 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
+ ELF32_R_SYM(rel[i].r_info);
relocation = sym->st_value + rel[i].r_addend;
#ifdef CONFIG_SUPERH64
/* For text addresses, bit2 of the st_other field indicates
* whether the symbol is SHmedia (1) or SHcompact (0). If
* SHmedia, the LSB of the symbol needs to be asserted
* for the CPU to be in SHmedia mode when it starts executing
* the branch target. */
relocation |= !!(sym->st_other & 4);
#endif
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_SH_NONE:
break;
......
......@@ -23,9 +23,7 @@ EXPORT_SYMBOL(__stack_chk_guard);
*/
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
{
#ifdef CONFIG_SUPERH32
unlazy_fpu(src, task_pt_regs(src));
#endif
*dst = *src;
if (src->thread.xstate) {
......
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/process_64.c
*
* This file handles the architecture-dependent parts of process handling..
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 - 2007 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*
* Started from SH3/4 version:
* Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima
*
* In turn started from i386 version:
* Copyright (C) 1995 Linus Torvalds
*/
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/ptrace.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/io.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <asm/syscalls.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/fpu.h>
#include <asm/switch_to.h>
struct task_struct *last_task_used_math = NULL;
struct pt_regs fake_swapper_regs = { 0, };
void show_regs(struct pt_regs *regs)
{
unsigned long long ah, al, bh, bl, ch, cl;
printk("\n");
show_regs_print_info(KERN_DEFAULT);
ah = (regs->pc) >> 32;
al = (regs->pc) & 0xffffffff;
bh = (regs->regs[18]) >> 32;
bl = (regs->regs[18]) & 0xffffffff;
ch = (regs->regs[15]) >> 32;
cl = (regs->regs[15]) & 0xffffffff;
printk("PC : %08Lx%08Lx LINK: %08Lx%08Lx SP : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->sr) >> 32;
al = (regs->sr) & 0xffffffff;
asm volatile ("getcon " __TEA ", %0" : "=r" (bh));
asm volatile ("getcon " __TEA ", %0" : "=r" (bl));
bh = (bh) >> 32;
bl = (bl) & 0xffffffff;
asm volatile ("getcon " __KCR0 ", %0" : "=r" (ch));
asm volatile ("getcon " __KCR0 ", %0" : "=r" (cl));
ch = (ch) >> 32;
cl = (cl) & 0xffffffff;
printk("SR : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[0]) >> 32;
al = (regs->regs[0]) & 0xffffffff;
bh = (regs->regs[1]) >> 32;
bl = (regs->regs[1]) & 0xffffffff;
ch = (regs->regs[2]) >> 32;
cl = (regs->regs[2]) & 0xffffffff;
printk("R0 : %08Lx%08Lx R1 : %08Lx%08Lx R2 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[3]) >> 32;
al = (regs->regs[3]) & 0xffffffff;
bh = (regs->regs[4]) >> 32;
bl = (regs->regs[4]) & 0xffffffff;
ch = (regs->regs[5]) >> 32;
cl = (regs->regs[5]) & 0xffffffff;
printk("R3 : %08Lx%08Lx R4 : %08Lx%08Lx R5 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[6]) >> 32;
al = (regs->regs[6]) & 0xffffffff;
bh = (regs->regs[7]) >> 32;
bl = (regs->regs[7]) & 0xffffffff;
ch = (regs->regs[8]) >> 32;
cl = (regs->regs[8]) & 0xffffffff;
printk("R6 : %08Lx%08Lx R7 : %08Lx%08Lx R8 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[9]) >> 32;
al = (regs->regs[9]) & 0xffffffff;
bh = (regs->regs[10]) >> 32;
bl = (regs->regs[10]) & 0xffffffff;
ch = (regs->regs[11]) >> 32;
cl = (regs->regs[11]) & 0xffffffff;
printk("R9 : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[12]) >> 32;
al = (regs->regs[12]) & 0xffffffff;
bh = (regs->regs[13]) >> 32;
bl = (regs->regs[13]) & 0xffffffff;
ch = (regs->regs[14]) >> 32;
cl = (regs->regs[14]) & 0xffffffff;
printk("R12 : %08Lx%08Lx R13 : %08Lx%08Lx R14 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[16]) >> 32;
al = (regs->regs[16]) & 0xffffffff;
bh = (regs->regs[17]) >> 32;
bl = (regs->regs[17]) & 0xffffffff;
ch = (regs->regs[19]) >> 32;
cl = (regs->regs[19]) & 0xffffffff;
printk("R16 : %08Lx%08Lx R17 : %08Lx%08Lx R19 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[20]) >> 32;
al = (regs->regs[20]) & 0xffffffff;
bh = (regs->regs[21]) >> 32;
bl = (regs->regs[21]) & 0xffffffff;
ch = (regs->regs[22]) >> 32;
cl = (regs->regs[22]) & 0xffffffff;
printk("R20 : %08Lx%08Lx R21 : %08Lx%08Lx R22 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[23]) >> 32;
al = (regs->regs[23]) & 0xffffffff;
bh = (regs->regs[24]) >> 32;
bl = (regs->regs[24]) & 0xffffffff;
ch = (regs->regs[25]) >> 32;
cl = (regs->regs[25]) & 0xffffffff;
printk("R23 : %08Lx%08Lx R24 : %08Lx%08Lx R25 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[26]) >> 32;
al = (regs->regs[26]) & 0xffffffff;
bh = (regs->regs[27]) >> 32;
bl = (regs->regs[27]) & 0xffffffff;
ch = (regs->regs[28]) >> 32;
cl = (regs->regs[28]) & 0xffffffff;
printk("R26 : %08Lx%08Lx R27 : %08Lx%08Lx R28 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[29]) >> 32;
al = (regs->regs[29]) & 0xffffffff;
bh = (regs->regs[30]) >> 32;
bl = (regs->regs[30]) & 0xffffffff;
ch = (regs->regs[31]) >> 32;
cl = (regs->regs[31]) & 0xffffffff;
printk("R29 : %08Lx%08Lx R30 : %08Lx%08Lx R31 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[32]) >> 32;
al = (regs->regs[32]) & 0xffffffff;
bh = (regs->regs[33]) >> 32;
bl = (regs->regs[33]) & 0xffffffff;
ch = (regs->regs[34]) >> 32;
cl = (regs->regs[34]) & 0xffffffff;
printk("R32 : %08Lx%08Lx R33 : %08Lx%08Lx R34 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[35]) >> 32;
al = (regs->regs[35]) & 0xffffffff;
bh = (regs->regs[36]) >> 32;
bl = (regs->regs[36]) & 0xffffffff;
ch = (regs->regs[37]) >> 32;
cl = (regs->regs[37]) & 0xffffffff;
printk("R35 : %08Lx%08Lx R36 : %08Lx%08Lx R37 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[38]) >> 32;
al = (regs->regs[38]) & 0xffffffff;
bh = (regs->regs[39]) >> 32;
bl = (regs->regs[39]) & 0xffffffff;
ch = (regs->regs[40]) >> 32;
cl = (regs->regs[40]) & 0xffffffff;
printk("R38 : %08Lx%08Lx R39 : %08Lx%08Lx R40 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[41]) >> 32;
al = (regs->regs[41]) & 0xffffffff;
bh = (regs->regs[42]) >> 32;
bl = (regs->regs[42]) & 0xffffffff;
ch = (regs->regs[43]) >> 32;
cl = (regs->regs[43]) & 0xffffffff;
printk("R41 : %08Lx%08Lx R42 : %08Lx%08Lx R43 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[44]) >> 32;
al = (regs->regs[44]) & 0xffffffff;
bh = (regs->regs[45]) >> 32;
bl = (regs->regs[45]) & 0xffffffff;
ch = (regs->regs[46]) >> 32;
cl = (regs->regs[46]) & 0xffffffff;
printk("R44 : %08Lx%08Lx R45 : %08Lx%08Lx R46 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[47]) >> 32;
al = (regs->regs[47]) & 0xffffffff;
bh = (regs->regs[48]) >> 32;
bl = (regs->regs[48]) & 0xffffffff;
ch = (regs->regs[49]) >> 32;
cl = (regs->regs[49]) & 0xffffffff;
printk("R47 : %08Lx%08Lx R48 : %08Lx%08Lx R49 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[50]) >> 32;
al = (regs->regs[50]) & 0xffffffff;
bh = (regs->regs[51]) >> 32;
bl = (regs->regs[51]) & 0xffffffff;
ch = (regs->regs[52]) >> 32;
cl = (regs->regs[52]) & 0xffffffff;
printk("R50 : %08Lx%08Lx R51 : %08Lx%08Lx R52 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[53]) >> 32;
al = (regs->regs[53]) & 0xffffffff;
bh = (regs->regs[54]) >> 32;
bl = (regs->regs[54]) & 0xffffffff;
ch = (regs->regs[55]) >> 32;
cl = (regs->regs[55]) & 0xffffffff;
printk("R53 : %08Lx%08Lx R54 : %08Lx%08Lx R55 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[56]) >> 32;
al = (regs->regs[56]) & 0xffffffff;
bh = (regs->regs[57]) >> 32;
bl = (regs->regs[57]) & 0xffffffff;
ch = (regs->regs[58]) >> 32;
cl = (regs->regs[58]) & 0xffffffff;
printk("R56 : %08Lx%08Lx R57 : %08Lx%08Lx R58 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[59]) >> 32;
al = (regs->regs[59]) & 0xffffffff;
bh = (regs->regs[60]) >> 32;
bl = (regs->regs[60]) & 0xffffffff;
ch = (regs->regs[61]) >> 32;
cl = (regs->regs[61]) & 0xffffffff;
printk("R59 : %08Lx%08Lx R60 : %08Lx%08Lx R61 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->regs[62]) >> 32;
al = (regs->regs[62]) & 0xffffffff;
bh = (regs->tregs[0]) >> 32;
bl = (regs->tregs[0]) & 0xffffffff;
ch = (regs->tregs[1]) >> 32;
cl = (regs->tregs[1]) & 0xffffffff;
printk("R62 : %08Lx%08Lx T0 : %08Lx%08Lx T1 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->tregs[2]) >> 32;
al = (regs->tregs[2]) & 0xffffffff;
bh = (regs->tregs[3]) >> 32;
bl = (regs->tregs[3]) & 0xffffffff;
ch = (regs->tregs[4]) >> 32;
cl = (regs->tregs[4]) & 0xffffffff;
printk("T2 : %08Lx%08Lx T3 : %08Lx%08Lx T4 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
ah = (regs->tregs[5]) >> 32;
al = (regs->tregs[5]) & 0xffffffff;
bh = (regs->tregs[6]) >> 32;
bl = (regs->tregs[6]) & 0xffffffff;
ch = (regs->tregs[7]) >> 32;
cl = (regs->tregs[7]) & 0xffffffff;
printk("T5 : %08Lx%08Lx T6 : %08Lx%08Lx T7 : %08Lx%08Lx\n",
ah, al, bh, bl, ch, cl);
/*
* If we're in kernel mode, dump the stack too..
*/
if (!user_mode(regs)) {
void show_stack(struct task_struct *tsk, unsigned long *sp);
unsigned long sp = regs->regs[15] & 0xffffffff;
struct task_struct *tsk = get_current();
tsk->thread.kregs = regs;
show_stack(tsk, (unsigned long *)sp);
}
}
/*
* Free current thread data structures etc..
*/
void exit_thread(struct task_struct *tsk)
{
/*
* See arch/sparc/kernel/process.c for the precedent for doing
* this -- RPC.
*
* The SH-5 FPU save/restore approach relies on
* last_task_used_math pointing to a live task_struct. When
* another task tries to use the FPU for the 1st time, the FPUDIS
* trap handling (see arch/sh/kernel/cpu/sh5/fpu.c) will save the
* existing FPU state to the FP regs field within
* last_task_used_math before re-loading the new task's FPU state
* (or initialising it if the FPU has been used before). So if
* last_task_used_math is stale, and its page has already been
* re-allocated for another use, the consequences are rather
* grim. Unless we null it here, there is no other path through
* which it would get safely nulled.
*/
#ifdef CONFIG_SH_FPU
if (last_task_used_math == tsk)
last_task_used_math = NULL;
#endif
}
void flush_thread(void)
{
/* Called by fs/exec.c (setup_new_exec) to remove traces of a
* previously running executable. */
#ifdef CONFIG_SH_FPU
if (last_task_used_math == current) {
last_task_used_math = NULL;
}
/* Force FPU state to be reinitialised after exec */
clear_used_math();
#endif
/* if we are a kernel thread, about to change to user thread,
* update kreg
*/
if(current->thread.kregs==&fake_swapper_regs) {
current->thread.kregs =
((struct pt_regs *)(THREAD_SIZE + (unsigned long) current) - 1);
current->thread.uregs = current->thread.kregs;
}
}
void release_thread(struct task_struct *dead_task)
{
/* do nothing */
}
/* Fill in the fpu structure for a core dump.. */
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
{
#ifdef CONFIG_SH_FPU
int fpvalid;
struct task_struct *tsk = current;
fpvalid = !!tsk_used_math(tsk);
if (fpvalid) {
if (current == last_task_used_math) {
enable_fpu();
save_fpu(tsk);
disable_fpu();
last_task_used_math = 0;
regs->sr |= SR_FD;
}
memcpy(fpu, &tsk->thread.xstate->hardfpu, sizeof(*fpu));
}
return fpvalid;
#else
return 0; /* Task didn't use the fpu at all. */
#endif
}
EXPORT_SYMBOL(dump_fpu);
asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void);
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long arg, struct task_struct *p)
{
struct pt_regs *childregs;
#ifdef CONFIG_SH_FPU
/* can't happen for a kernel thread */
if (last_task_used_math == current) {
enable_fpu();
save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
current_pt_regs()->sr |= SR_FD;
}
#endif
/* Copy from sh version */
childregs = (struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1;
p->thread.sp = (unsigned long) childregs;
if (unlikely(p->flags & PF_KTHREAD)) {
memset(childregs, 0, sizeof(struct pt_regs));
childregs->regs[2] = (unsigned long)arg;
childregs->regs[3] = (unsigned long)usp;
childregs->sr = (1 << 30); /* not user_mode */
childregs->sr |= SR_FD; /* Invalidate FPU flag */
p->thread.pc = (unsigned long) ret_from_kernel_thread;
return 0;
}
*childregs = *current_pt_regs();
/*
* Sign extend the edited stack.
* Note that thread.pc and thread.pc will stay
* 32-bit wide and context switch must take care
* of NEFF sign extension.
*/
if (usp)
childregs->regs[15] = neff_sign_extend(usp);
p->thread.uregs = childregs;
childregs->regs[9] = 0; /* Set return value for child */
childregs->sr |= SR_FD; /* Invalidate FPU flag */
p->thread.pc = (unsigned long) ret_from_fork;
return 0;
}
#ifdef CONFIG_FRAME_POINTER
static int in_sh64_switch_to(unsigned long pc)
{
extern char __sh64_switch_to_end;
/* For a sleeping task, the PC is somewhere in the middle of the function,
so we don't have to worry about masking the LSB off */
return (pc >= (unsigned long) sh64_switch_to) &&
(pc < (unsigned long) &__sh64_switch_to_end);
}
#endif
unsigned long get_wchan(struct task_struct *p)
{
unsigned long pc;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
/*
* The same comment as on the Alpha applies here, too ...
*/
pc = thread_saved_pc(p);
#ifdef CONFIG_FRAME_POINTER
if (in_sh64_switch_to(pc)) {
unsigned long schedule_fp;
unsigned long sh64_switch_to_fp;
unsigned long schedule_caller_pc;
sh64_switch_to_fp = (long) p->thread.sp;
/* r14 is saved at offset 4 in the sh64_switch_to frame */
schedule_fp = *(unsigned long *) (long)(sh64_switch_to_fp + 4);
/* and the caller of 'schedule' is (currently!) saved at offset 24
in the frame of schedule (from disasm) */
schedule_caller_pc = *(unsigned long *) (long)(schedule_fp + 24);
return schedule_caller_pc;
}
#endif
return pc;
}
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/ptrace_64.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 - 2008 Paul Mundt
*
* Started from SH3/4 version:
* SuperH version: Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
*
* Original x86 implementation:
* By Ross Biro 1/23/92
* edited by Linus Torvalds
*/
#include <linux/kernel.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/signal.h>
#include <linux/syscalls.h>
#include <linux/audit.h>
#include <linux/seccomp.h>
#include <linux/tracehook.h>
#include <linux/elf.h>
#include <linux/regset.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
#include <asm/syscalls.h>
#include <asm/fpu.h>
#include <asm/traps.h>
#define CREATE_TRACE_POINTS
#include <trace/events/syscalls.h>
/* This mask defines the bits of the SR which the user is not allowed to
change, which are everything except S, Q, M, PR, SZ, FR. */
#define SR_MASK (0xffff8cfd)
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
/*
* This routine will get a word from the user area in the process kernel stack.
*/
static inline int get_stack_long(struct task_struct *task, int offset)
{
unsigned char *stack;
stack = (unsigned char *)(task->thread.uregs);
stack += offset;
return (*((int *)stack));
}
static inline unsigned long
get_fpu_long(struct task_struct *task, unsigned long addr)
{
unsigned long tmp;
struct pt_regs *regs;
regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
if (!tsk_used_math(task)) {
if (addr == offsetof(struct user_fpu_struct, fpscr)) {
tmp = FPSCR_INIT;
} else {
tmp = 0xffffffffUL; /* matches initial value in fpu.c */
}
return tmp;
}
if (last_task_used_math == task) {
enable_fpu();
save_fpu(task);
disable_fpu();
last_task_used_math = 0;
regs->sr |= SR_FD;
}
tmp = ((long *)task->thread.xstate)[addr / sizeof(unsigned long)];
return tmp;
}
/*
* This routine will put a word into the user area in the process kernel stack.
*/
static inline int put_stack_long(struct task_struct *task, int offset,
unsigned long data)
{
unsigned char *stack;
stack = (unsigned char *)(task->thread.uregs);
stack += offset;
*(unsigned long *) stack = data;
return 0;
}
static inline int
put_fpu_long(struct task_struct *task, unsigned long addr, unsigned long data)
{
struct pt_regs *regs;
regs = (struct pt_regs*)((unsigned char *)task + THREAD_SIZE) - 1;
if (!tsk_used_math(task)) {
init_fpu(task);
} else if (last_task_used_math == task) {
enable_fpu();
save_fpu(task);
disable_fpu();
last_task_used_math = 0;
regs->sr |= SR_FD;
}
((long *)task->thread.xstate)[addr / sizeof(unsigned long)] = data;
return 0;
}
void user_enable_single_step(struct task_struct *child)
{
struct pt_regs *regs = child->thread.uregs;
regs->sr |= SR_SSTEP; /* auto-resetting upon exception */
set_tsk_thread_flag(child, TIF_SINGLESTEP);
}
void user_disable_single_step(struct task_struct *child)
{
struct pt_regs *regs = child->thread.uregs;
regs->sr &= ~SR_SSTEP;
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
}
static int genregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
const struct pt_regs *regs = task_pt_regs(target);
int ret;
/* PC, SR, SYSCALL */
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&regs->pc,
0, 3 * sizeof(unsigned long long));
/* R1 -> R63 */
if (!ret)
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
regs->regs,
offsetof(struct pt_regs, regs[0]),
63 * sizeof(unsigned long long));
/* TR0 -> TR7 */
if (!ret)
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
regs->tregs,
offsetof(struct pt_regs, tregs[0]),
8 * sizeof(unsigned long long));
if (!ret)
ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_regs), -1);
return ret;
}
static int genregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
struct pt_regs *regs = task_pt_regs(target);
int ret;
/* PC, SR, SYSCALL */
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&regs->pc,
0, 3 * sizeof(unsigned long long));
/* R1 -> R63 */
if (!ret && count > 0)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
regs->regs,
offsetof(struct pt_regs, regs[0]),
63 * sizeof(unsigned long long));
/* TR0 -> TR7 */
if (!ret && count > 0)
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
regs->tregs,
offsetof(struct pt_regs, tregs[0]),
8 * sizeof(unsigned long long));
if (!ret)
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
sizeof(struct pt_regs), -1);
return ret;
}
#ifdef CONFIG_SH_FPU
int fpregs_get(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
{
int ret;
ret = init_fpu(target);
if (ret)
return ret;
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->hardfpu, 0, -1);
}
static int fpregs_set(struct task_struct *target,
const struct user_regset *regset,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
int ret;
ret = init_fpu(target);
if (ret)
return ret;
set_stopped_child_used_math(target);
return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&target->thread.xstate->hardfpu, 0, -1);
}
static int fpregs_active(struct task_struct *target,
const struct user_regset *regset)
{
return tsk_used_math(target) ? regset->n : 0;
}
#endif
const struct pt_regs_offset regoffset_table[] = {
REG_OFFSET_NAME(pc),
REG_OFFSET_NAME(sr),
REG_OFFSET_NAME(syscall_nr),
REGS_OFFSET_NAME(0),
REGS_OFFSET_NAME(1),
REGS_OFFSET_NAME(2),
REGS_OFFSET_NAME(3),
REGS_OFFSET_NAME(4),
REGS_OFFSET_NAME(5),
REGS_OFFSET_NAME(6),
REGS_OFFSET_NAME(7),
REGS_OFFSET_NAME(8),
REGS_OFFSET_NAME(9),
REGS_OFFSET_NAME(10),
REGS_OFFSET_NAME(11),
REGS_OFFSET_NAME(12),
REGS_OFFSET_NAME(13),
REGS_OFFSET_NAME(14),
REGS_OFFSET_NAME(15),
REGS_OFFSET_NAME(16),
REGS_OFFSET_NAME(17),
REGS_OFFSET_NAME(18),
REGS_OFFSET_NAME(19),
REGS_OFFSET_NAME(20),
REGS_OFFSET_NAME(21),
REGS_OFFSET_NAME(22),
REGS_OFFSET_NAME(23),
REGS_OFFSET_NAME(24),
REGS_OFFSET_NAME(25),
REGS_OFFSET_NAME(26),
REGS_OFFSET_NAME(27),
REGS_OFFSET_NAME(28),
REGS_OFFSET_NAME(29),
REGS_OFFSET_NAME(30),
REGS_OFFSET_NAME(31),
REGS_OFFSET_NAME(32),
REGS_OFFSET_NAME(33),
REGS_OFFSET_NAME(34),
REGS_OFFSET_NAME(35),
REGS_OFFSET_NAME(36),
REGS_OFFSET_NAME(37),
REGS_OFFSET_NAME(38),
REGS_OFFSET_NAME(39),
REGS_OFFSET_NAME(40),
REGS_OFFSET_NAME(41),
REGS_OFFSET_NAME(42),
REGS_OFFSET_NAME(43),
REGS_OFFSET_NAME(44),
REGS_OFFSET_NAME(45),
REGS_OFFSET_NAME(46),
REGS_OFFSET_NAME(47),
REGS_OFFSET_NAME(48),
REGS_OFFSET_NAME(49),
REGS_OFFSET_NAME(50),
REGS_OFFSET_NAME(51),
REGS_OFFSET_NAME(52),
REGS_OFFSET_NAME(53),
REGS_OFFSET_NAME(54),
REGS_OFFSET_NAME(55),
REGS_OFFSET_NAME(56),
REGS_OFFSET_NAME(57),
REGS_OFFSET_NAME(58),
REGS_OFFSET_NAME(59),
REGS_OFFSET_NAME(60),
REGS_OFFSET_NAME(61),
REGS_OFFSET_NAME(62),
REGS_OFFSET_NAME(63),
TREGS_OFFSET_NAME(0),
TREGS_OFFSET_NAME(1),
TREGS_OFFSET_NAME(2),
TREGS_OFFSET_NAME(3),
TREGS_OFFSET_NAME(4),
TREGS_OFFSET_NAME(5),
TREGS_OFFSET_NAME(6),
TREGS_OFFSET_NAME(7),
REG_OFFSET_END,
};
/*
* These are our native regset flavours.
*/
enum sh_regset {
REGSET_GENERAL,
#ifdef CONFIG_SH_FPU
REGSET_FPU,
#endif
};
static const struct user_regset sh_regsets[] = {
/*
* Format is:
* PC, SR, SYSCALL,
* R1 --> R63,
* TR0 --> TR7,
*/
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.n = ELF_NGREG,
.size = sizeof(long long),
.align = sizeof(long long),
.get = genregs_get,
.set = genregs_set,
},
#ifdef CONFIG_SH_FPU
[REGSET_FPU] = {
.core_note_type = NT_PRFPREG,
.n = sizeof(struct user_fpu_struct) /
sizeof(long long),
.size = sizeof(long long),
.align = sizeof(long long),
.get = fpregs_get,
.set = fpregs_set,
.active = fpregs_active,
},
#endif
};
static const struct user_regset_view user_sh64_native_view = {
.name = "sh64",
.e_machine = EM_SH,
.regsets = sh_regsets,
.n = ARRAY_SIZE(sh_regsets),
};
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
{
return &user_sh64_native_view;
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret;
unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
unsigned long tmp;
ret = -EIO;
if ((addr & 3) || addr < 0)
break;
if (addr < sizeof(struct pt_regs))
tmp = get_stack_long(child, addr);
else if ((addr >= offsetof(struct user, fpu)) &&
(addr < offsetof(struct user, u_fpvalid))) {
unsigned long index;
ret = init_fpu(child);
if (ret)
break;
index = addr - offsetof(struct user, fpu);
tmp = get_fpu_long(child, index);
} else if (addr == offsetof(struct user, u_fpvalid)) {
tmp = !!tsk_used_math(child);
} else {
break;
}
ret = put_user(tmp, datap);
break;
}
case PTRACE_POKEUSR:
/* write the word at location addr in the USER area. We must
disallow any changes to certain SR bits or u_fpvalid, since
this could crash the kernel or result in a security
loophole. */
ret = -EIO;
if ((addr & 3) || addr < 0)
break;
if (addr < sizeof(struct pt_regs)) {
/* Ignore change of top 32 bits of SR */
if (addr == offsetof (struct pt_regs, sr)+4)
{
ret = 0;
break;
}
/* If lower 32 bits of SR, ignore non-user bits */
if (addr == offsetof (struct pt_regs, sr))
{
long cursr = get_stack_long(child, addr);
data &= ~(SR_MASK);
data |= (cursr & SR_MASK);
}
ret = put_stack_long(child, addr, data);
}
else if ((addr >= offsetof(struct user, fpu)) &&
(addr < offsetof(struct user, u_fpvalid))) {
unsigned long index;
ret = init_fpu(child);
if (ret)
break;
index = addr - offsetof(struct user, fpu);
ret = put_fpu_long(child, index, data);
}
break;
case PTRACE_GETREGS:
return copy_regset_to_user(child, &user_sh64_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
datap);
case PTRACE_SETREGS:
return copy_regset_from_user(child, &user_sh64_native_view,
REGSET_GENERAL,
0, sizeof(struct pt_regs),
datap);
#ifdef CONFIG_SH_FPU
case PTRACE_GETFPREGS:
return copy_regset_to_user(child, &user_sh64_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
datap);
case PTRACE_SETFPREGS:
return copy_regset_from_user(child, &user_sh64_native_view,
REGSET_FPU,
0, sizeof(struct user_fpu_struct),
datap);
#endif
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
asmlinkage int sh64_ptrace(long request, long pid,
unsigned long addr, unsigned long data)
{
#define WPC_DBRMODE 0x0d104008
static unsigned long first_call;
if (!test_and_set_bit(0, &first_call)) {
/* Set WPC.DBRMODE to 0. This makes all debug events get
* delivered through RESVEC, i.e. into the handlers in entry.S.
* (If the kernel was downloaded using a remote gdb, WPC.DBRMODE
* would normally be left set to 1, which makes debug events get
* delivered through DBRVEC, i.e. into the remote gdb's
* handlers. This prevents ptrace getting them, and confuses
* the remote gdb.) */
printk("DBRMODE set to 0 to permit native debugging\n");
poke_real_address_q(WPC_DBRMODE, 0);
}
return sys_ptrace(request, pid, addr, data);
}
asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs)
{
long long ret = 0;
secure_computing_strict(regs->regs[9]);
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
/*
* Tracing decided this syscall should not happen.
* We'll return a bogus call number to get an ENOSYS
* error, but leave the original number in regs->regs[0].
*/
ret = -1LL;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[9]);
audit_syscall_entry(regs->regs[1], regs->regs[2], regs->regs[3],
regs->regs[4], regs->regs[5]);
return ret ?: regs->regs[9];
}
asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
{
int step;
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_exit(regs, regs->regs[9]);
step = test_thread_flag(TIF_SINGLESTEP);
if (step || test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, step);
}
/* Called with interrupts disabled */
asmlinkage void do_single_step(unsigned long long vec, struct pt_regs *regs)
{
/* This is called after a single step exception (DEBUGSS).
There is no need to change the PC, as it is a post-execution
exception, as entry.S does not do anything to the PC for DEBUGSS.
We need to clear the Single Step setting in SR to avoid
continually stepping. */
local_irq_enable();
regs->sr &= ~SR_SSTEP;
force_sig(SIGTRAP);
}
/* Called with interrupts disabled */
BUILD_TRAP_HANDLER(breakpoint)
{
TRAP_HANDLER_DECL;
/* We need to forward step the PC, to counteract the backstep done
in signal.c. */
local_irq_enable();
force_sig(SIGTRAP);
regs->pc += 4;
}
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
user_disable_single_step(child);
}
......@@ -4,9 +4,7 @@
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/module.h>
#ifdef CONFIG_SUPERH32
#include <asm/watchdog.h>
#endif
#include <asm/addrspace.h>
#include <asm/reboot.h>
#include <asm/tlbflush.h>
......@@ -15,13 +13,11 @@
void (*pm_power_off)(void);
EXPORT_SYMBOL(pm_power_off);
#ifdef CONFIG_SUPERH32
static void watchdog_trigger_immediate(void)
{
sh_wdt_write_cnt(0xFF);
sh_wdt_write_csr(0xC2);
}
#endif
static void native_machine_restart(char * __unused)
{
......@@ -33,10 +29,8 @@ static void native_machine_restart(char * __unused)
/* Address error with SR.BL=1 first. */
trigger_address_error();
#ifdef CONFIG_SUPERH32
/* If that fails or is unsupported, go for the watchdog next. */
watchdog_trigger_immediate();
#endif
/*
* Give up and sleep.
......
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/sh_ksyms_64.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
*/
#include <linux/rwsem.h>
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/user.h>
#include <linux/elfcore.h>
#include <linux/sched.h>
#include <linux/in6.h>
#include <linux/interrupt.h>
#include <linux/screen_info.h>
#include <asm/cacheflush.h>
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <asm/checksum.h>
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/irq.h>
EXPORT_SYMBOL(__put_user_asm_b);
EXPORT_SYMBOL(__put_user_asm_w);
EXPORT_SYMBOL(__put_user_asm_l);
EXPORT_SYMBOL(__put_user_asm_q);
EXPORT_SYMBOL(__get_user_asm_b);
EXPORT_SYMBOL(__get_user_asm_w);
EXPORT_SYMBOL(__get_user_asm_l);
EXPORT_SYMBOL(__get_user_asm_q);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(empty_zero_page);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__ndelay);
EXPORT_SYMBOL(__const_udelay);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strcpy);
/* Ugh. These come in from libgcc.a at link time. */
#define DECLARE_EXPORT(name) extern void name(void);EXPORT_SYMBOL(name)
DECLARE_EXPORT(__sdivsi3);
DECLARE_EXPORT(__sdivsi3_1);
DECLARE_EXPORT(__sdivsi3_2);
DECLARE_EXPORT(__udivsi3);
DECLARE_EXPORT(__div_table);
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/signal_64.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 - 2008 Paul Mundt
* Copyright (C) 2004 Richard Curnow
*/
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/personality.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/tracehook.h>
#include <asm/ucontext.h>
#include <linux/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/fpu.h>
#define REG_RET 9
#define REG_ARG1 2
#define REG_ARG2 3
#define REG_ARG3 4
#define REG_SP 15
#define REG_PR 18
#define REF_REG_RET regs->regs[REG_RET]
#define REF_REG_SP regs->regs[REG_SP]
#define DEREF_REG_PR regs->regs[REG_PR]
#define DEBUG_SIG 0
static void
handle_signal(struct ksignal *ksig, struct pt_regs *regs);
static inline void
handle_syscall_restart(struct pt_regs *regs, struct sigaction *sa)
{
/* If we're not from a syscall, bail out */
if (regs->syscall_nr < 0)
return;
/* check for system call restart.. */
switch (regs->regs[REG_RET]) {
case -ERESTART_RESTARTBLOCK:
case -ERESTARTNOHAND:
no_system_call_restart:
regs->regs[REG_RET] = -EINTR;
break;
case -ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
/* fallthrough */
case -ERESTARTNOINTR:
/* Decode syscall # */
regs->regs[REG_RET] = regs->syscall_nr;
regs->pc -= 4;
break;
}
}
/*
* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*
* Note that we go through the signals twice: once to check the signals that
* the kernel can handle, and then we build all the user-level signal handling
* stack-frames in one go after that.
*/
static void do_signal(struct pt_regs *regs)
{
struct ksignal ksig;
/*
* We want the common case to go fast, which
* is why we may in certain cases get here from
* kernel mode. Just return without doing anything
* if so.
*/
if (!user_mode(regs))
return;
if (get_signal(&ksig)) {
handle_syscall_restart(regs, &ksig.ka.sa);
/* Whee! Actually deliver the signal. */
handle_signal(&ksig, regs);
return;
}
/* Did we come from a system call? */
if (regs->syscall_nr >= 0) {
/* Restart the system call - no handlers present */
switch (regs->regs[REG_RET]) {
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
/* Decode Syscall # */
regs->regs[REG_RET] = regs->syscall_nr;
regs->pc -= 4;
break;
case -ERESTART_RESTARTBLOCK:
regs->regs[REG_RET] = __NR_restart_syscall;
regs->pc -= 4;
break;
}
}
/* No signal to deliver -- put the saved sigmask back */
restore_saved_sigmask();
}
/*
* Do a signal return; undo the signal stack.
*/
struct sigframe {
struct sigcontext sc;
unsigned long extramask[_NSIG_WORDS-1];
long long retcode[2];
};
struct rt_sigframe {
struct siginfo __user *pinfo;
void *puc;
struct siginfo info;
struct ucontext uc;
long long retcode[2];
};
#ifdef CONFIG_SH_FPU
static inline int
restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
{
int err = 0;
int fpvalid;
err |= __get_user (fpvalid, &sc->sc_fpvalid);
conditional_used_math(fpvalid);
if (! fpvalid)
return err;
if (current == last_task_used_math) {
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
err |= __copy_from_user(&current->thread.xstate->hardfpu, &sc->sc_fpregs[0],
(sizeof(long long) * 32) + (sizeof(int) * 1));
return err;
}
static inline int
setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
{
int err = 0;
int fpvalid;
fpvalid = !!used_math();
err |= __put_user(fpvalid, &sc->sc_fpvalid);
if (! fpvalid)
return err;
if (current == last_task_used_math) {
enable_fpu();
save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
err |= __copy_to_user(&sc->sc_fpregs[0], &current->thread.xstate->hardfpu,
(sizeof(long long) * 32) + (sizeof(int) * 1));
clear_used_math();
return err;
}
#else
static inline int
restore_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
{
return 0;
}
static inline int
setup_sigcontext_fpu(struct pt_regs *regs, struct sigcontext __user *sc)
{
return 0;
}
#endif
static int
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, long long *r2_p)
{
unsigned int err = 0;
unsigned long long current_sr, new_sr;
#define SR_MASK 0xffff8cfd
#define COPY(x) err |= __get_user(regs->x, &sc->sc_##x)
COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]);
COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]);
COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]);
COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]);
COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]);
COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]);
COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]);
COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]);
COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]);
COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]);
COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]);
COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]);
COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]);
COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]);
COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]);
COPY(regs[60]); COPY(regs[61]); COPY(regs[62]);
COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]);
COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]);
/* Prevent the signal handler manipulating SR in a way that can
crash the kernel. i.e. only allow S, Q, M, PR, SZ, FR to be
modified */
current_sr = regs->sr;
err |= __get_user(new_sr, &sc->sc_sr);
regs->sr &= SR_MASK;
regs->sr |= (new_sr & ~SR_MASK);
COPY(pc);
#undef COPY
/* Must do this last in case it sets regs->sr.fd (i.e. after rest of sr
* has been restored above.) */
err |= restore_sigcontext_fpu(regs, sc);
regs->syscall_nr = -1; /* disable syscall checks */
err |= __get_user(*r2_p, &sc->sc_regs[REG_RET]);
return err;
}
asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs * regs)
{
struct sigframe __user *frame = (struct sigframe __user *) (long) REF_REG_SP;
sigset_t set;
long long ret;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
if (__get_user(set.sig[0], &frame->sc.oldmask)
|| (_NSIG_WORDS > 1
&& __copy_from_user(&set.sig[1], &frame->extramask,
sizeof(frame->extramask))))
goto badframe;
set_current_blocked(&set);
if (restore_sigcontext(regs, &frame->sc, &ret))
goto badframe;
regs->pc -= 4;
return (int) ret;
badframe:
force_sig(SIGSEGV);
return 0;
}
asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
unsigned long r4, unsigned long r5,
unsigned long r6, unsigned long r7,
struct pt_regs * regs)
{
struct rt_sigframe __user *frame = (struct rt_sigframe __user *) (long) REF_REG_SP;
sigset_t set;
long long ret;
/* Always make any pending restarted system calls return -EINTR */
current->restart_block.fn = do_no_restart_syscall;
if (!access_ok(frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
set_current_blocked(&set);
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret))
goto badframe;
regs->pc -= 4;
if (restore_altstack(&frame->uc.uc_stack))
goto badframe;
return (int) ret;
badframe:
force_sig(SIGSEGV);
return 0;
}
/*
* Set up a signal frame.
*/
static int
setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
unsigned long mask)
{
int err = 0;
/* Do this first, otherwise is this sets sr->fd, that value isn't preserved. */
err |= setup_sigcontext_fpu(regs, sc);
#define COPY(x) err |= __put_user(regs->x, &sc->sc_##x)
COPY(regs[0]); COPY(regs[1]); COPY(regs[2]); COPY(regs[3]);
COPY(regs[4]); COPY(regs[5]); COPY(regs[6]); COPY(regs[7]);
COPY(regs[8]); COPY(regs[9]); COPY(regs[10]); COPY(regs[11]);
COPY(regs[12]); COPY(regs[13]); COPY(regs[14]); COPY(regs[15]);
COPY(regs[16]); COPY(regs[17]); COPY(regs[18]); COPY(regs[19]);
COPY(regs[20]); COPY(regs[21]); COPY(regs[22]); COPY(regs[23]);
COPY(regs[24]); COPY(regs[25]); COPY(regs[26]); COPY(regs[27]);
COPY(regs[28]); COPY(regs[29]); COPY(regs[30]); COPY(regs[31]);
COPY(regs[32]); COPY(regs[33]); COPY(regs[34]); COPY(regs[35]);
COPY(regs[36]); COPY(regs[37]); COPY(regs[38]); COPY(regs[39]);
COPY(regs[40]); COPY(regs[41]); COPY(regs[42]); COPY(regs[43]);
COPY(regs[44]); COPY(regs[45]); COPY(regs[46]); COPY(regs[47]);
COPY(regs[48]); COPY(regs[49]); COPY(regs[50]); COPY(regs[51]);
COPY(regs[52]); COPY(regs[53]); COPY(regs[54]); COPY(regs[55]);
COPY(regs[56]); COPY(regs[57]); COPY(regs[58]); COPY(regs[59]);
COPY(regs[60]); COPY(regs[61]); COPY(regs[62]);
COPY(tregs[0]); COPY(tregs[1]); COPY(tregs[2]); COPY(tregs[3]);
COPY(tregs[4]); COPY(tregs[5]); COPY(tregs[6]); COPY(tregs[7]);
COPY(sr); COPY(pc);
#undef COPY
err |= __put_user(mask, &sc->oldmask);
return err;
}
/*
* Determine which stack to use..
*/
static inline void __user *
get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size)
{
if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp))
sp = current->sas_ss_sp + current->sas_ss_size;
return (void __user *)((sp - frame_size) & -8ul);
}
void sa_default_restorer(void); /* See comments below */
void sa_default_rt_restorer(void); /* See comments below */
static int setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
{
struct sigframe __user *frame;
int err = 0, sig = ksig->sig;
int signal;
frame = get_sigframe(&ksig->ka, regs->regs[REG_SP], sizeof(*frame));
if (!access_ok(frame, sizeof(*frame)))
return -EFAULT;
err |= setup_sigcontext(&frame->sc, regs, set->sig[0]);
/* Give up earlier as i386, in case */
if (err)
return -EFAULT;
if (_NSIG_WORDS > 1) {
err |= __copy_to_user(frame->extramask, &set->sig[1],
sizeof(frame->extramask)); }
/* Give up earlier as i386, in case */
if (err)
return -EFAULT;
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ksig->ka.sa.sa_flags & SA_RESTORER) {
/*
* On SH5 all edited pointers are subject to NEFF
*/
DEREF_REG_PR = neff_sign_extend((unsigned long)
ksig->ka->sa.sa_restorer | 0x1);
} else {
/*
* Different approach on SH5.
* . Endianness independent asm code gets placed in entry.S .
* This is limited to four ASM instructions corresponding
* to two long longs in size.
* . err checking is done on the else branch only
* . flush_icache_range() is called upon __put_user() only
* . all edited pointers are subject to NEFF
* . being code, linker turns ShMedia bit on, always
* dereference index -1.
*/
DEREF_REG_PR = neff_sign_extend((unsigned long)
frame->retcode | 0x01);
if (__copy_to_user(frame->retcode,
(void *)((unsigned long)sa_default_restorer & (~1)), 16) != 0)
return -EFAULT;
/* Cohere the trampoline with the I-cache. */
flush_cache_sigtramp(DEREF_REG_PR-1);
}
/*
* Set up registers for signal handler.
* All edited pointers are subject to NEFF.
*/
regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame);
regs->regs[REG_ARG1] = sig; /* Arg for signal handler */
/* FIXME:
The glibc profiling support for SH-5 needs to be passed a sigcontext
so it can retrieve the PC. At some point during 2003 the glibc
support was changed to receive the sigcontext through the 2nd
argument, but there are still versions of libc.so in use that use
the 3rd argument. Until libc.so is stabilised, pass the sigcontext
through both 2nd and 3rd arguments.
*/
regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->sc;
regs->pc = neff_sign_extend((unsigned long)ksig->ka.sa.sa_handler);
/* Broken %016Lx */
pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
sig, current->comm, current->pid, frame,
regs->pc >> 32, regs->pc & 0xffffffff,
DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
return 0;
}
static int setup_rt_frame(struct ksignal *kig, sigset_t *set,
struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
int err = 0, sig = ksig->sig;
frame = get_sigframe(&ksig->ka, regs->regs[REG_SP], sizeof(*frame));
if (!access_ok(frame, sizeof(*frame)))
return -EFAULT;
err |= __put_user(&frame->info, &frame->pinfo);
err |= __put_user(&frame->uc, &frame->puc);
err |= copy_siginfo_to_user(&frame->info, &ksig->info);
/* Give up earlier as i386, in case */
if (err)
return -EFAULT;
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(0, &frame->uc.uc_link);
err |= __save_altstack(&frame->uc.uc_stack, regs->regs[REG_SP]);
err |= setup_sigcontext(&frame->uc.uc_mcontext,
regs, set->sig[0]);
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
/* Give up earlier as i386, in case */
if (err)
return -EFAULT;
/* Set up to return from userspace. If provided, use a stub
already in userspace. */
if (ksig->ka.sa.sa_flags & SA_RESTORER) {
/*
* On SH5 all edited pointers are subject to NEFF
*/
DEREF_REG_PR = neff_sign_extend((unsigned long)
ksig->ka.sa.sa_restorer | 0x1);
} else {
/*
* Different approach on SH5.
* . Endianness independent asm code gets placed in entry.S .
* This is limited to four ASM instructions corresponding
* to two long longs in size.
* . err checking is done on the else branch only
* . flush_icache_range() is called upon __put_user() only
* . all edited pointers are subject to NEFF
* . being code, linker turns ShMedia bit on, always
* dereference index -1.
*/
DEREF_REG_PR = neff_sign_extend((unsigned long)
frame->retcode | 0x01);
if (__copy_to_user(frame->retcode,
(void *)((unsigned long)sa_default_rt_restorer & (~1)), 16) != 0)
return -EFAULT;
/* Cohere the trampoline with the I-cache. */
flush_icache_range(DEREF_REG_PR-1, DEREF_REG_PR-1+15);
}
/*
* Set up registers for signal handler.
* All edited pointers are subject to NEFF.
*/
regs->regs[REG_SP] = neff_sign_extend((unsigned long)frame);
regs->regs[REG_ARG1] = sig; /* Arg for signal handler */
regs->regs[REG_ARG2] = (unsigned long long)(unsigned long)(signed long)&frame->info;
regs->regs[REG_ARG3] = (unsigned long long)(unsigned long)(signed long)&frame->uc.uc_mcontext;
regs->pc = neff_sign_extend((unsigned long)ksig->ka.sa.sa_handler);
pr_debug("SIG deliver (#%d,%s:%d): sp=%p pc=%08Lx%08Lx link=%08Lx%08Lx\n",
sig, current->comm, current->pid, frame,
regs->pc >> 32, regs->pc & 0xffffffff,
DEREF_REG_PR >> 32, DEREF_REG_PR & 0xffffffff);
return 0;
}
/*
* OK, we're invoking a handler
*/
static void
handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
sigset_t *oldset = sigmask_to_save();
int ret;
/* Set up the stack frame */
if (ksig->ka.sa.sa_flags & SA_SIGINFO)
ret = setup_rt_frame(ksig, oldset, regs);
else
ret = setup_frame(ksig, oldset, regs);
signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
}
asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
{
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs);
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
}
}
/* SPDX-License-Identifier: GPL-2.0
*
* arch/sh/kernel/syscalls_64.S
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2004 - 2007 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*/
#include <linux/sys.h>
.section .data, "aw"
.balign 32
/*
* System calls jump table
*/
.globl sys_call_table
sys_call_table:
.long sys_restart_syscall /* 0 - old "setup()" system call */
.long sys_exit
.long sys_fork
.long sys_read
.long sys_write
.long sys_open /* 5 */
.long sys_close
.long sys_waitpid
.long sys_creat
.long sys_link
.long sys_unlink /* 10 */
.long sys_execve
.long sys_chdir
.long sys_time
.long sys_mknod
.long sys_chmod /* 15 */
.long sys_lchown16
.long sys_ni_syscall /* old break syscall holder */
.long sys_stat
.long sys_lseek
.long sys_getpid /* 20 */
.long sys_mount
.long sys_oldumount
.long sys_setuid16
.long sys_getuid16
.long sys_stime /* 25 */
.long sh64_ptrace
.long sys_alarm
.long sys_fstat
.long sys_pause
.long sys_utime /* 30 */
.long sys_ni_syscall /* old stty syscall holder */
.long sys_ni_syscall /* old gtty syscall holder */
.long sys_access
.long sys_nice
.long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
.long sys_sync
.long sys_kill
.long sys_rename
.long sys_mkdir
.long sys_rmdir /* 40 */
.long sys_dup
.long sys_pipe
.long sys_times
.long sys_ni_syscall /* old prof syscall holder */
.long sys_brk /* 45 */
.long sys_setgid16
.long sys_getgid16
.long sys_signal
.long sys_geteuid16
.long sys_getegid16 /* 50 */
.long sys_acct
.long sys_umount /* recycled never used phys( */
.long sys_ni_syscall /* old lock syscall holder */
.long sys_ioctl
.long sys_fcntl /* 55 */
.long sys_ni_syscall /* old mpx syscall holder */
.long sys_setpgid
.long sys_ni_syscall /* old ulimit syscall holder */
.long sys_ni_syscall /* sys_olduname */
.long sys_umask /* 60 */
.long sys_chroot
.long sys_ustat
.long sys_dup2
.long sys_getppid
.long sys_getpgrp /* 65 */
.long sys_setsid
.long sys_sigaction
.long sys_sgetmask
.long sys_ssetmask
.long sys_setreuid16 /* 70 */
.long sys_setregid16
.long sys_sigsuspend
.long sys_sigpending
.long sys_sethostname
.long sys_setrlimit /* 75 */
.long sys_old_getrlimit
.long sys_getrusage
.long sys_gettimeofday
.long sys_settimeofday
.long sys_getgroups16 /* 80 */
.long sys_setgroups16
.long sys_ni_syscall /* sys_oldselect */
.long sys_symlink
.long sys_lstat
.long sys_readlink /* 85 */
.long sys_uselib
.long sys_swapon
.long sys_reboot
.long sys_old_readdir
.long old_mmap /* 90 */
.long sys_munmap
.long sys_truncate
.long sys_ftruncate
.long sys_fchmod
.long sys_fchown16 /* 95 */
.long sys_getpriority
.long sys_setpriority
.long sys_ni_syscall /* old profil syscall holder */
.long sys_statfs
.long sys_fstatfs /* 100 */
.long sys_ni_syscall /* ioperm */
.long sys_socketcall /* Obsolete implementation of socket syscall */
.long sys_syslog
.long sys_setitimer
.long sys_getitimer /* 105 */
.long sys_newstat
.long sys_newlstat
.long sys_newfstat
.long sys_uname
.long sys_ni_syscall /* 110 */ /* iopl */
.long sys_vhangup
.long sys_ni_syscall /* idle */
.long sys_ni_syscall /* vm86old */
.long sys_wait4
.long sys_swapoff /* 115 */
.long sys_sysinfo
.long sys_ipc /* Obsolete ipc syscall implementation */
.long sys_fsync
.long sys_sigreturn
.long sys_clone /* 120 */
.long sys_setdomainname
.long sys_newuname
.long sys_cacheflush /* x86: sys_modify_ldt */
.long sys_adjtimex
.long sys_mprotect /* 125 */
.long sys_sigprocmask
.long sys_ni_syscall /* old "create_module" */
.long sys_init_module
.long sys_delete_module
.long sys_ni_syscall /* 130: old "get_kernel_syms" */
.long sys_quotactl
.long sys_getpgid
.long sys_fchdir
.long sys_bdflush
.long sys_sysfs /* 135 */
.long sys_personality
.long sys_ni_syscall /* for afs_syscall */
.long sys_setfsuid16
.long sys_setfsgid16
.long sys_llseek /* 140 */
.long sys_getdents
.long sys_select
.long sys_flock
.long sys_msync
.long sys_readv /* 145 */
.long sys_writev
.long sys_getsid
.long sys_fdatasync
.long sys_sysctl
.long sys_mlock /* 150 */
.long sys_munlock
.long sys_mlockall
.long sys_munlockall
.long sys_sched_setparam
.long sys_sched_getparam /* 155 */
.long sys_sched_setscheduler
.long sys_sched_getscheduler
.long sys_sched_yield
.long sys_sched_get_priority_max
.long sys_sched_get_priority_min /* 160 */
.long sys_sched_rr_get_interval
.long sys_nanosleep
.long sys_mremap
.long sys_setresuid16
.long sys_getresuid16 /* 165 */
.long sys_ni_syscall /* vm86 */
.long sys_ni_syscall /* old "query_module" */
.long sys_poll
.long sys_ni_syscall /* was nfsservctl */
.long sys_setresgid16 /* 170 */
.long sys_getresgid16
.long sys_prctl
.long sys_rt_sigreturn
.long sys_rt_sigaction
.long sys_rt_sigprocmask /* 175 */
.long sys_rt_sigpending
.long sys_rt_sigtimedwait
.long sys_rt_sigqueueinfo
.long sys_rt_sigsuspend
.long sys_pread64 /* 180 */
.long sys_pwrite64
.long sys_chown16
.long sys_getcwd
.long sys_capget
.long sys_capset /* 185 */
.long sys_sigaltstack
.long sys_sendfile
.long sys_ni_syscall /* getpmsg */
.long sys_ni_syscall /* putpmsg */
.long sys_vfork /* 190 */
.long sys_getrlimit
.long sys_mmap2
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
.long sys_lstat64
.long sys_fstat64
.long sys_lchown
.long sys_getuid
.long sys_getgid /* 200 */
.long sys_geteuid
.long sys_getegid
.long sys_setreuid
.long sys_setregid
.long sys_getgroups /* 205 */
.long sys_setgroups
.long sys_fchown
.long sys_setresuid
.long sys_getresuid
.long sys_setresgid /* 210 */
.long sys_getresgid
.long sys_chown
.long sys_setuid
.long sys_setgid
.long sys_setfsuid /* 215 */
.long sys_setfsgid
.long sys_pivot_root
.long sys_mincore
.long sys_madvise
/* Broken-out socket family (maintain backwards compatibility in syscall
numbering with 2.4) */
.long sys_socket /* 220 */
.long sys_bind
.long sys_connect
.long sys_listen
.long sys_accept
.long sys_getsockname /* 225 */
.long sys_getpeername
.long sys_socketpair
.long sys_send
.long sys_sendto
.long sys_recv /* 230*/
.long sys_recvfrom
.long sys_shutdown
.long sys_setsockopt
.long sys_getsockopt
.long sys_sendmsg /* 235 */
.long sys_recvmsg
/* Broken-out IPC family (maintain backwards compatibility in syscall
numbering with 2.4) */
.long sys_semop
.long sys_semget
.long sys_semctl
.long sys_msgsnd /* 240 */
.long sys_msgrcv
.long sys_msgget
.long sys_msgctl
.long sys_shmat
.long sys_shmdt /* 245 */
.long sys_shmget
.long sys_shmctl
/* Rest of syscalls listed in 2.4 i386 unistd.h */
.long sys_getdents64
.long sys_fcntl64
.long sys_ni_syscall /* 250 reserved for TUX */
.long sys_ni_syscall /* Reserved for Security */
.long sys_gettid
.long sys_readahead
.long sys_setxattr
.long sys_lsetxattr /* 255 */
.long sys_fsetxattr
.long sys_getxattr
.long sys_lgetxattr
.long sys_fgetxattr
.long sys_listxattr /* 260 */
.long sys_llistxattr
.long sys_flistxattr
.long sys_removexattr
.long sys_lremovexattr
.long sys_fremovexattr /* 265 */
.long sys_tkill
.long sys_sendfile64
.long sys_futex
.long sys_sched_setaffinity
.long sys_sched_getaffinity /* 270 */
.long sys_ni_syscall /* reserved for set_thread_area */
.long sys_ni_syscall /* reserved for get_thread_area */
.long sys_io_setup
.long sys_io_destroy
.long sys_io_getevents /* 275 */
.long sys_io_submit
.long sys_io_cancel
.long sys_fadvise64
.long sys_ni_syscall
.long sys_exit_group /* 280 */
/* Rest of new 2.6 syscalls */
.long sys_lookup_dcookie
.long sys_epoll_create
.long sys_epoll_ctl
.long sys_epoll_wait
.long sys_remap_file_pages /* 285 */
.long sys_set_tid_address
.long sys_timer_create
.long sys_timer_settime
.long sys_timer_gettime
.long sys_timer_getoverrun /* 290 */
.long sys_timer_delete
.long sys_clock_settime
.long sys_clock_gettime
.long sys_clock_getres
.long sys_clock_nanosleep /* 295 */
.long sys_statfs64
.long sys_fstatfs64
.long sys_tgkill
.long sys_utimes
.long sys_fadvise64_64 /* 300 */
.long sys_ni_syscall /* Reserved for vserver */
.long sys_ni_syscall /* Reserved for mbind */
.long sys_ni_syscall /* get_mempolicy */
.long sys_ni_syscall /* set_mempolicy */
.long sys_mq_open /* 305 */
.long sys_mq_unlink
.long sys_mq_timedsend
.long sys_mq_timedreceive
.long sys_mq_notify
.long sys_mq_getsetattr /* 310 */
.long sys_ni_syscall /* Reserved for kexec */
.long sys_waitid
.long sys_add_key
.long sys_request_key
.long sys_keyctl /* 315 */
.long sys_ioprio_set
.long sys_ioprio_get
.long sys_inotify_init
.long sys_inotify_add_watch
.long sys_inotify_rm_watch /* 320 */
.long sys_ni_syscall
.long sys_migrate_pages
.long sys_openat
.long sys_mkdirat
.long sys_mknodat /* 325 */
.long sys_fchownat
.long sys_futimesat
.long sys_fstatat64
.long sys_unlinkat
.long sys_renameat /* 330 */
.long sys_linkat
.long sys_symlinkat
.long sys_readlinkat
.long sys_fchmodat
.long sys_faccessat /* 335 */
.long sys_pselect6
.long sys_ppoll
.long sys_unshare
.long sys_set_robust_list
.long sys_get_robust_list /* 340 */
.long sys_splice
.long sys_sync_file_range
.long sys_tee
.long sys_vmsplice
.long sys_move_pages /* 345 */
.long sys_getcpu
.long sys_epoll_pwait
.long sys_utimensat
.long sys_signalfd
.long sys_timerfd_create /* 350 */
.long sys_eventfd
.long sys_fallocate
.long sys_timerfd_settime
.long sys_timerfd_gettime
.long sys_signalfd4 /* 355 */
.long sys_eventfd2
.long sys_epoll_create1
.long sys_dup3
.long sys_pipe2
.long sys_inotify_init1 /* 360 */
.long sys_preadv
.long sys_pwritev
.long sys_rt_tgsigqueueinfo
.long sys_perf_event_open
.long sys_recvmmsg /* 365 */
.long sys_accept4
.long sys_fanotify_init
.long sys_fanotify_mark
.long sys_prlimit64
.long sys_name_to_handle_at /* 370 */
.long sys_open_by_handle_at
.long sys_clock_adjtime
.long sys_syncfs
.long sys_sendmmsg
.long sys_setns /* 375 */
.long sys_process_vm_readv
.long sys_process_vm_writev
.long sys_kcmp
.long sys_finit_module
.long sys_sched_getattr /* 380 */
.long sys_sched_setattr
.long sys_renameat2
.long sys_seccomp
.long sys_getrandom
.long sys_memfd_create /* 385 */
.long sys_bpf
.long sys_execveat
.long sys_userfaultfd
.long sys_membarrier
.long sys_mlock2 /* 390 */
.long sys_copy_file_range
.long sys_preadv2
.long sys_pwritev2
// SPDX-License-Identifier: GPL-2.0
/*
* arch/sh/kernel/traps_64.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
* Copyright (C) 2003, 2004 Richard Curnow
*/
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/interrupt.h>
#include <linux/sysctl.h>
#include <linux/module.h>
#include <linux/perf_event.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/alignment.h>
#include <asm/processor.h>
#include <asm/pgtable.h>
#include <asm/fpu.h>
static int read_opcode(reg_size_t pc, insn_size_t *result_opcode, int from_user_mode)
{
int get_user_error;
unsigned long aligned_pc;
insn_size_t opcode;
if ((pc & 3) == 1) {
/* SHmedia */
aligned_pc = pc & ~3;
if (from_user_mode) {
if (!access_ok(aligned_pc, sizeof(insn_size_t))) {
get_user_error = -EFAULT;
} else {
get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
*result_opcode = opcode;
}
return get_user_error;
} else {
/* If the fault was in the kernel, we can either read
* this directly, or if not, we fault.
*/
*result_opcode = *(insn_size_t *)aligned_pc;
return 0;
}
} else if ((pc & 1) == 0) {
/* SHcompact */
/* TODO : provide handling for this. We don't really support
user-mode SHcompact yet, and for a kernel fault, this would
have to come from a module built for SHcompact. */
return -EFAULT;
} else {
/* misaligned */
return -EFAULT;
}
}
static int address_is_sign_extended(__u64 a)
{
__u64 b;
#if (NEFF == 32)
b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
return (b == a) ? 1 : 0;
#else
#error "Sign extend check only works for NEFF==32"
#endif
}
/* return -1 for fault, 0 for OK */
static int generate_and_check_address(struct pt_regs *regs,
insn_size_t opcode,
int displacement_not_indexed,
int width_shift,
__u64 *address)
{
__u64 base_address, addr;
int basereg;
switch (1 << width_shift) {
case 1: inc_unaligned_byte_access(); break;
case 2: inc_unaligned_word_access(); break;
case 4: inc_unaligned_dword_access(); break;
case 8: inc_unaligned_multi_access(); break;
}
basereg = (opcode >> 20) & 0x3f;
base_address = regs->regs[basereg];
if (displacement_not_indexed) {
__s64 displacement;
displacement = (opcode >> 10) & 0x3ff;
displacement = sign_extend64(displacement, 9);
addr = (__u64)((__s64)base_address + (displacement << width_shift));
} else {
__u64 offset;
int offsetreg;
offsetreg = (opcode >> 10) & 0x3f;
offset = regs->regs[offsetreg];
addr = base_address + offset;
}
/* Check sign extended */
if (!address_is_sign_extended(addr))
return -1;
/* Check accessible. For misaligned access in the kernel, assume the
address is always accessible (and if not, just fault when the
load/store gets done.) */
if (user_mode(regs)) {
inc_unaligned_user_access();
if (addr >= TASK_SIZE)
return -1;
} else
inc_unaligned_kernel_access();
*address = addr;
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, addr);
unaligned_fixups_notify(current, opcode, regs);
return 0;
}
static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
{
unsigned short x;
unsigned char *p, *q;
p = (unsigned char *) (int) address;
q = (unsigned char *) &x;
q[0] = p[0];
q[1] = p[1];
if (do_sign_extend) {
*result = (__u64)(__s64) *(short *) &x;
} else {
*result = (__u64) x;
}
}
static void misaligned_kernel_word_store(__u64 address, __u64 value)
{
unsigned short x;
unsigned char *p, *q;
p = (unsigned char *) (int) address;
q = (unsigned char *) &x;
x = (__u16) value;
p[0] = q[0];
p[1] = q[1];
}
static int misaligned_load(struct pt_regs *regs,
insn_size_t opcode,
int displacement_not_indexed,
int width_shift,
int do_sign_extend)
{
/* Return -1 for a fault, 0 for OK */
int error;
int destreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0)
return error;
destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
return -1;
}
if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
return -1; /* fault */
}
switch (width_shift) {
case 1:
if (do_sign_extend) {
regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
} else {
regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
}
break;
case 2:
regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
break;
case 3:
regs->regs[destreg] = buffer;
break;
default:
printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
} else {
/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
__u64 lo, hi;
switch (width_shift) {
case 1:
misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
break;
case 2:
asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
regs->regs[destreg] = lo | hi;
break;
case 3:
asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
regs->regs[destreg] = lo | hi;
break;
default:
printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
}
return 0;
}
static int misaligned_store(struct pt_regs *regs,
insn_size_t opcode,
int displacement_not_indexed,
int width_shift)
{
/* Return -1 for a fault, 0 for OK */
int error;
int srcreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0)
return error;
srcreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
return -1;
}
switch (width_shift) {
case 1:
*(__u16 *) &buffer = (__u16) regs->regs[srcreg];
break;
case 2:
*(__u32 *) &buffer = (__u32) regs->regs[srcreg];
break;
case 3:
buffer = regs->regs[srcreg];
break;
default:
printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
return -1; /* fault */
}
} else {
/* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
__u64 val = regs->regs[srcreg];
switch (width_shift) {
case 1:
misaligned_kernel_word_store(address, val);
break;
case 2:
asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
break;
case 3:
asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
break;
default:
printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
}
return 0;
}
/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
error. */
static int misaligned_fpu_load(struct pt_regs *regs,
insn_size_t opcode,
int displacement_not_indexed,
int width_shift,
int do_paired_load)
{
/* Return -1 for a fault, 0 for OK */
int error;
int destreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0)
return error;
destreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
__u32 buflo, bufhi;
if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
return -1;
}
if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
return -1; /* fault */
}
/* 'current' may be the current owner of the FPU state, so
context switch the registers into memory so they can be
indexed by register number. */
if (last_task_used_math == current) {
enable_fpu();
save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
buflo = *(__u32*) &buffer;
bufhi = *(1 + (__u32*) &buffer);
switch (width_shift) {
case 2:
current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
break;
case 3:
if (do_paired_load) {
current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
} else {
#if defined(CONFIG_CPU_LITTLE_ENDIAN)
current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi;
current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo;
#else
current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
#endif
}
break;
default:
printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
return 0;
} else {
die ("Misaligned FPU load inside kernel", regs, 0);
return -1;
}
}
static int misaligned_fpu_store(struct pt_regs *regs,
insn_size_t opcode,
int displacement_not_indexed,
int width_shift,
int do_paired_load)
{
/* Return -1 for a fault, 0 for OK */
int error;
int srcreg;
__u64 address;
error = generate_and_check_address(regs, opcode,
displacement_not_indexed, width_shift, &address);
if (error < 0)
return error;
srcreg = (opcode >> 4) & 0x3f;
if (user_mode(regs)) {
__u64 buffer;
/* Initialise these to NaNs. */
__u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
if (!access_ok((unsigned long) address, 1UL<<width_shift)) {
return -1;
}
/* 'current' may be the current owner of the FPU state, so
context switch the registers into memory so they can be
indexed by register number. */
if (last_task_used_math == current) {
enable_fpu();
save_fpu(current);
disable_fpu();
last_task_used_math = NULL;
regs->sr |= SR_FD;
}
switch (width_shift) {
case 2:
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
break;
case 3:
if (do_paired_load) {
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
} else {
#if defined(CONFIG_CPU_LITTLE_ENDIAN)
bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg];
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
#else
buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
#endif
}
break;
default:
printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
width_shift, (unsigned long) regs->pc);
break;
}
*(__u32*) &buffer = buflo;
*(1 + (__u32*) &buffer) = bufhi;
if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
return -1; /* fault */
}
return 0;
} else {
die ("Misaligned FPU load inside kernel", regs, 0);
return -1;
}
}
static int misaligned_fixup(struct pt_regs *regs)
{
insn_size_t opcode;
int error;
int major, minor;
unsigned int user_action;
user_action = unaligned_user_action();
if (!(user_action & UM_FIXUP))
return -1;
error = read_opcode(regs->pc, &opcode, user_mode(regs));
if (error < 0) {
return error;
}
major = (opcode >> 26) & 0x3f;
minor = (opcode >> 16) & 0xf;
switch (major) {
case (0x84>>2): /* LD.W */
error = misaligned_load(regs, opcode, 1, 1, 1);
break;
case (0xb0>>2): /* LD.UW */
error = misaligned_load(regs, opcode, 1, 1, 0);
break;
case (0x88>>2): /* LD.L */
error = misaligned_load(regs, opcode, 1, 2, 1);
break;
case (0x8c>>2): /* LD.Q */
error = misaligned_load(regs, opcode, 1, 3, 0);
break;
case (0xa4>>2): /* ST.W */
error = misaligned_store(regs, opcode, 1, 1);
break;
case (0xa8>>2): /* ST.L */
error = misaligned_store(regs, opcode, 1, 2);
break;
case (0xac>>2): /* ST.Q */
error = misaligned_store(regs, opcode, 1, 3);
break;
case (0x40>>2): /* indexed loads */
switch (minor) {
case 0x1: /* LDX.W */
error = misaligned_load(regs, opcode, 0, 1, 1);
break;
case 0x5: /* LDX.UW */
error = misaligned_load(regs, opcode, 0, 1, 0);
break;
case 0x2: /* LDX.L */
error = misaligned_load(regs, opcode, 0, 2, 1);
break;
case 0x3: /* LDX.Q */
error = misaligned_load(regs, opcode, 0, 3, 0);
break;
default:
error = -1;
break;
}
break;
case (0x60>>2): /* indexed stores */
switch (minor) {
case 0x1: /* STX.W */
error = misaligned_store(regs, opcode, 0, 1);
break;
case 0x2: /* STX.L */
error = misaligned_store(regs, opcode, 0, 2);
break;
case 0x3: /* STX.Q */
error = misaligned_store(regs, opcode, 0, 3);
break;
default:
error = -1;
break;
}
break;
case (0x94>>2): /* FLD.S */
error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
break;
case (0x98>>2): /* FLD.P */
error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
break;
case (0x9c>>2): /* FLD.D */
error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
break;
case (0x1c>>2): /* floating indexed loads */
switch (minor) {
case 0x8: /* FLDX.S */
error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
break;
case 0xd: /* FLDX.P */
error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
break;
case 0x9: /* FLDX.D */
error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
break;
default:
error = -1;
break;
}
break;
case (0xb4>>2): /* FLD.S */
error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
break;
case (0xb8>>2): /* FLD.P */
error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
break;
case (0xbc>>2): /* FLD.D */
error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
break;
case (0x3c>>2): /* floating indexed stores */
switch (minor) {
case 0x8: /* FSTX.S */
error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
break;
case 0xd: /* FSTX.P */
error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
break;
case 0x9: /* FSTX.D */
error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
break;
default:
error = -1;
break;
}
break;
default:
/* Fault */
error = -1;
break;
}
if (error < 0) {
return error;
} else {
regs->pc += 4; /* Skip the instruction that's just been emulated */
return 0;
}
}
static void do_unhandled_exception(int signr, char *str, unsigned long error,
struct pt_regs *regs)
{
if (user_mode(regs))
force_sig(signr);
die_if_no_fixup(str, regs, error);
}
#define DO_ERROR(signr, str, name) \
asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
{ \
do_unhandled_exception(signr, str, error_code, regs); \
}
DO_ERROR(SIGILL, "illegal slot instruction", illegal_slot_inst)
DO_ERROR(SIGSEGV, "address error (exec)", address_error_exec)
#if defined(CONFIG_SH64_ID2815_WORKAROUND)
#define OPCODE_INVALID 0
#define OPCODE_USER_VALID 1
#define OPCODE_PRIV_VALID 2
/* getcon/putcon - requires checking which control register is referenced. */
#define OPCODE_CTRL_REG 3
/* Table of valid opcodes for SHmedia mode.
Form a 10-bit value by concatenating the major/minor opcodes i.e.
opcode[31:26,20:16]. The 6 MSBs of this value index into the following
array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
LSBs==4'b0000 etc). */
static unsigned long shmedia_opcode_table[64] = {
0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
};
/* Workaround SH5-101 cut2 silicon defect #2815 :
in some situations, inter-mode branches from SHcompact -> SHmedia
which should take ITLBMISS or EXECPROT exceptions at the target
falsely take RESINST at the target instead. */
void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
{
insn_size_t opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
unsigned long pc, aligned_pc;
unsigned long index, shift;
unsigned long major, minor, combined;
unsigned long reserved_field;
int opcode_state;
int get_user_error;
int signr = SIGILL;
char *exception_name = "reserved_instruction";
pc = regs->pc;
/* SHcompact is not handled */
if (unlikely((pc & 3) == 0))
goto out;
/* SHmedia : check for defect. This requires executable vmas
to be readable too. */
aligned_pc = pc & ~3;
if (!access_ok(aligned_pc, sizeof(insn_size_t)))
get_user_error = -EFAULT;
else
get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
if (get_user_error < 0) {
/*
* Error trying to read opcode. This typically means a
* real fault, not a RESINST any more. So change the
* codes.
*/
exception_name = "address error (exec)";
signr = SIGSEGV;
goto out;
}
/* These bits are currently reserved as zero in all valid opcodes */
reserved_field = opcode & 0xf;
if (unlikely(reserved_field))
goto out; /* invalid opcode */
major = (opcode >> 26) & 0x3f;
minor = (opcode >> 16) & 0xf;
combined = (major << 4) | minor;
index = major;
shift = minor << 1;
opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
switch (opcode_state) {
case OPCODE_INVALID:
/* Trap. */
break;
case OPCODE_USER_VALID:
/*
* Restart the instruction: the branch to the instruction
* will now be from an RTE not from SHcompact so the
* silicon defect won't be triggered.
*/
return;
case OPCODE_PRIV_VALID:
if (!user_mode(regs)) {
/*
* Should only ever get here if a module has
* SHcompact code inside it. If so, the same fix
* up is needed.
*/
return; /* same reason */
}
/*
* Otherwise, user mode trying to execute a privileged
* instruction - fall through to trap.
*/
break;
case OPCODE_CTRL_REG:
/* If in privileged mode, return as above. */
if (!user_mode(regs))
return;
/* In user mode ... */
if (combined == 0x9f) { /* GETCON */
unsigned long regno = (opcode >> 20) & 0x3f;
if (regno >= 62)
return;
/* reserved/privileged control register => trap */
} else if (combined == 0x1bf) { /* PUTCON */
unsigned long regno = (opcode >> 4) & 0x3f;
if (regno >= 62)
return;
/* reserved/privileged control register => trap */
}
break;
default:
/* Fall through to trap. */
break;
}
out:
do_unhandled_exception(signr, exception_name, error_code, regs);
}
#else /* CONFIG_SH64_ID2815_WORKAROUND */
/* If the workaround isn't needed, this is just a straightforward reserved
instruction */
DO_ERROR(SIGILL, "reserved instruction", reserved_inst)
#endif /* CONFIG_SH64_ID2815_WORKAROUND */
/* Called with interrupts disabled */
asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
{
die_if_kernel("exception", regs, ex);
}
asmlinkage int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
{
/* Syscall debug */
printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId);
die_if_kernel("unknown trapa", regs, scId);
return -ENOSYS;
}
/* Implement misaligned load/store handling for kernel (and optionally for user
mode too). Limitation : only SHmedia mode code is handled - there is no
handling at all for misaligned accesses occurring in SHcompact code yet. */
asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
{
if (misaligned_fixup(regs) < 0)
do_unhandled_exception(SIGSEGV, "address error(load)",
error_code, regs);
}
asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
{
if (misaligned_fixup(regs) < 0)
do_unhandled_exception(SIGSEGV, "address error(store)",
error_code, regs);
}
asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
{
u64 peek_real_address_q(u64 addr);
u64 poke_real_address_q(u64 addr, u64 val);
unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
unsigned long long exp_cause;
/* It's not worth ioremapping the debug module registers for the amount
of access we make to them - just go direct to their physical
addresses. */
exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
if (exp_cause & ~4)
printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
(unsigned long)(exp_cause & 0xffffffff));
show_state();
/* Clear all DEBUGINT causes */
poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
}
void per_cpu_trap_init(void)
{
/* Nothing to do for now, VBR initialization later. */
}
......@@ -3,14 +3,7 @@
* ld script to make SuperH Linux kernel
* Written by Niibe Yutaka and Paul Mundt
*/
#ifdef CONFIG_SUPERH64
#define LOAD_OFFSET PAGE_OFFSET
OUTPUT_ARCH(sh:sh5)
#else
#define LOAD_OFFSET 0
OUTPUT_ARCH(sh)
#endif
#include <asm/thread_info.h>
#include <asm/cache.h>
#include <asm/vmlinux.lds.h>
......@@ -28,14 +21,13 @@ SECTIONS
_text = .; /* Text and read-only data */
.empty_zero_page : AT(ADDR(.empty_zero_page) - LOAD_OFFSET) {
.empty_zero_page : AT(ADDR(.empty_zero_page)) {
*(.empty_zero_page)
} = 0
.text : AT(ADDR(.text) - LOAD_OFFSET) {
.text : AT(ADDR(.text)) {
HEAD_TEXT
TEXT_TEXT
EXTRA_TEXT
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
......@@ -62,7 +54,7 @@ SECTIONS
INIT_DATA_SECTION(16)
. = ALIGN(4);
.machvec.init : AT(ADDR(.machvec.init) - LOAD_OFFSET) {
.machvec.init : AT(ADDR(.machvec.init)) {
__machvec_start = .;
*(.machvec.init)
__machvec_end = .;
......@@ -74,8 +66,8 @@ SECTIONS
* .exit.text is discarded at runtime, not link time, to deal with
* references from __bug_table
*/
.exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { EXIT_TEXT }
.exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { EXIT_DATA }
.exit.text : AT(ADDR(.exit.text)) { EXIT_TEXT }
.exit.data : AT(ADDR(.exit.data)) { EXIT_DATA }
. = ALIGN(PAGE_SIZE);
__init_end = .;
......
#
# Makefile for the SH-5 specific library files..
#
# Copyright (C) 2000, 2001 Paolo Alberelli
# Copyright (C) 2003 - 2008 Paul Mundt
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Panic should really be compiled as PIC
lib-y := udelay.o panic.o memcpy.o memset.o \
copy_user_memcpy.o copy_page.o strcpy.o strlen.o
# Extracted from libgcc
lib-y += udivsi3.o udivdi3.o sdivsi3.o
/*
Copyright 2003 Richard Curnow, SuperH (UK) Ltd.
This file is subject to the terms and conditions of the GNU General Public
License. See the file "COPYING" in the main directory of this archive
for more details.
Tight version of mempy for the case of just copying a page.
Prefetch strategy empirically optimised against RTL simulations
of SH5-101 cut2 eval chip with Cayman board DDR memory.
Parameters:
r2 : destination effective address (start of page)
r3 : source effective address (start of page)
Always copies 4096 bytes.
Points to review.
* Currently the prefetch is 4 lines ahead and the alloco is 2 lines ahead.
It seems like the prefetch needs to be at at least 4 lines ahead to get
the data into the cache in time, and the allocos contend with outstanding
prefetches for the same cache set, so it's better to have the numbers
different.
*/
.section .text..SHmedia32,"ax"
.little
.balign 8
.global copy_page
copy_page:
/* Copy 4096 bytes worth of data from r3 to r2.
Do prefetches 4 lines ahead.
Do alloco 2 lines ahead */
pta 1f, tr1
pta 2f, tr2
pta 3f, tr3
ptabs r18, tr0
#if 0
/* TAKum03020 */
ld.q r3, 0x00, r63
ld.q r3, 0x20, r63
ld.q r3, 0x40, r63
ld.q r3, 0x60, r63
#endif
alloco r2, 0x00
synco ! TAKum03020
alloco r2, 0x20
synco ! TAKum03020
movi 3968, r6
add r2, r6, r6
addi r6, 64, r7
addi r7, 64, r8
sub r3, r2, r60
addi r60, 8, r61
addi r61, 8, r62
addi r62, 8, r23
addi r60, 0x80, r22
/* Minimal code size. The extra branches inside the loop don't cost much
because they overlap with the time spent waiting for prefetches to
complete. */
1:
#if 0
/* TAKum03020 */
bge/u r2, r6, tr2 ! skip prefetch for last 4 lines
ldx.q r2, r22, r63 ! prefetch 4 lines hence
#endif
2:
bge/u r2, r7, tr3 ! skip alloco for last 2 lines
alloco r2, 0x40 ! alloc destination line 2 lines ahead
synco ! TAKum03020
3:
ldx.q r2, r60, r36
ldx.q r2, r61, r37
ldx.q r2, r62, r38
ldx.q r2, r23, r39
st.q r2, 0, r36
st.q r2, 8, r37
st.q r2, 16, r38
st.q r2, 24, r39
addi r2, 32, r2
bgt/l r8, r2, tr1
blink tr0, r63 ! return
! SPDX-License-Identifier: GPL-2.0
!
! Fast SH memcpy
!
! by Toshiyasu Morita (tm@netcom.com)
! hacked by J"orn Rernnecke (joern.rennecke@superh.com) ("o for o-umlaut)
! SH5 code Copyright 2002 SuperH Ltd.
!
! Entry: ARG0: destination pointer
! ARG1: source pointer
! ARG2: byte count
!
! Exit: RESULT: destination pointer
! any other registers in the range r0-r7: trashed
!
! Notes: Usually one wants to do small reads and write a longword, but
! unfortunately it is difficult in some cases to concatanate bytes
! into a longword on the SH, so this does a longword read and small
! writes.
!
! This implementation makes two assumptions about how it is called:
!
! 1.: If the byte count is nonzero, the address of the last byte to be
! copied is unsigned greater than the address of the first byte to
! be copied. This could be easily swapped for a signed comparison,
! but the algorithm used needs some comparison.
!
! 2.: When there are two or three bytes in the last word of an 11-or-more
! bytes memory chunk to b copied, the rest of the word can be read
! without side effects.
! This could be easily changed by increasing the minimum size of
! a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2,
! however, this would cost a few extra cyles on average.
! For SHmedia, the assumption is that any quadword can be read in its
! enirety if at least one byte is included in the copy.
/* Imported into Linux kernel by Richard Curnow. This is used to implement the
__copy_user function in the general case, so it has to be a distinct
function from intra-kernel memcpy to allow for exception fix-ups in the
event that the user pointer is bad somewhere in the copy (e.g. due to
running off the end of the vma).
Note, this algorithm will be slightly wasteful in the case where the source
and destination pointers are equally aligned, because the stlo/sthi pairs
could then be merged back into single stores. If there are a lot of cache
misses, this is probably offset by the stall lengths on the preloads.
*/
/* NOTE : Prefetches removed and allocos guarded by synco to avoid TAKum03020
* erratum. The first two prefetches are nop-ed out to avoid upsetting the
* instruction counts used in the jump address calculation.
* */
.section .text..SHmedia32,"ax"
.little
.balign 32
.global copy_user_memcpy
.global copy_user_memcpy_end
copy_user_memcpy:
#define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1
#define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1
#define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1
#define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1
nop ! ld.b r3,0,r63 ! TAKum03020
pta/l Large,tr0
movi 25,r0
bgeu/u r4,r0,tr0
nsb r4,r0
shlli r0,5,r0
movi (L1-L0+63*32 + 1) & 0xffff,r1
sub r1, r0, r0
L0: ptrel r0,tr0
add r2,r4,r5
ptabs r18,tr1
add r3,r4,r6
blink tr0,r63
/* Rearranged to make cut2 safe */
.balign 8
L4_7: /* 4..7 byte memcpy cntd. */
stlo.l r2, 0, r0
or r6, r7, r6
sthi.l r5, -1, r6
stlo.l r5, -4, r6
blink tr1,r63
.balign 8
L1: /* 0 byte memcpy */
nop
blink tr1,r63
nop
nop
nop
nop
L2_3: /* 2 or 3 byte memcpy cntd. */
st.b r5,-1,r6
blink tr1,r63
/* 1 byte memcpy */
ld.b r3,0,r0
st.b r2,0,r0
blink tr1,r63
L8_15: /* 8..15 byte memcpy cntd. */
stlo.q r2, 0, r0
or r6, r7, r6
sthi.q r5, -1, r6
stlo.q r5, -8, r6
blink tr1,r63
/* 2 or 3 byte memcpy */
ld.b r3,0,r0
nop ! ld.b r2,0,r63 ! TAKum03020
ld.b r3,1,r1
st.b r2,0,r0
pta/l L2_3,tr0
ld.b r6,-1,r6
st.b r2,1,r1
blink tr0, r63
/* 4 .. 7 byte memcpy */
LDUAL (r3, 0, r0, r1)
pta L4_7, tr0
ldlo.l r6, -4, r7
or r0, r1, r0
sthi.l r2, 3, r0
ldhi.l r6, -1, r6
blink tr0, r63
/* 8 .. 15 byte memcpy */
LDUAQ (r3, 0, r0, r1)
pta L8_15, tr0
ldlo.q r6, -8, r7
or r0, r1, r0
sthi.q r2, 7, r0
ldhi.q r6, -1, r6
blink tr0, r63
/* 16 .. 24 byte memcpy */
LDUAQ (r3, 0, r0, r1)
LDUAQ (r3, 8, r8, r9)
or r0, r1, r0
sthi.q r2, 7, r0
or r8, r9, r8
sthi.q r2, 15, r8
ldlo.q r6, -8, r7
ldhi.q r6, -1, r6
stlo.q r2, 8, r8
stlo.q r2, 0, r0
or r6, r7, r6
sthi.q r5, -1, r6
stlo.q r5, -8, r6
blink tr1,r63
Large:
! ld.b r2, 0, r63 ! TAKum03020
pta/l Loop_ua, tr1
ori r3, -8, r7
sub r2, r7, r22
sub r3, r2, r6
add r2, r4, r5
ldlo.q r3, 0, r0
addi r5, -16, r5
movi 64+8, r27 ! could subtract r7 from that.
stlo.q r2, 0, r0
sthi.q r2, 7, r0
ldx.q r22, r6, r0
bgtu/l r27, r4, tr1
addi r5, -48, r27
pta/l Loop_line, tr0
addi r6, 64, r36
addi r6, -24, r19
addi r6, -16, r20
addi r6, -8, r21
Loop_line:
! ldx.q r22, r36, r63 ! TAKum03020
alloco r22, 32
synco
addi r22, 32, r22
ldx.q r22, r19, r23
sthi.q r22, -25, r0
ldx.q r22, r20, r24
ldx.q r22, r21, r25
stlo.q r22, -32, r0
ldx.q r22, r6, r0
sthi.q r22, -17, r23
sthi.q r22, -9, r24
sthi.q r22, -1, r25
stlo.q r22, -24, r23
stlo.q r22, -16, r24
stlo.q r22, -8, r25
bgeu r27, r22, tr0
Loop_ua:
addi r22, 8, r22
sthi.q r22, -1, r0
stlo.q r22, -8, r0
ldx.q r22, r6, r0
bgtu/l r5, r22, tr1
add r3, r4, r7
ldlo.q r7, -8, r1
sthi.q r22, 7, r0
ldhi.q r7, -1, r7
ptabs r18,tr1
stlo.q r22, 0, r0
or r1, r7, r1
sthi.q r5, 15, r1
stlo.q r5, 8, r1
blink tr1, r63
copy_user_memcpy_end:
nop
/* SPDX-License-Identifier: GPL-2.0 */
/* Cloned and hacked for uClibc by Paul Mundt, December 2003 */
/* Modified by SuperH, Inc. September 2003 */
!
! Fast SH memcpy
!
! by Toshiyasu Morita (tm@netcom.com)
! hacked by J"orn Rernnecke (joern.rennecke@superh.com) ("o for o-umlaut)
! SH5 code Copyright 2002 SuperH Ltd.
!
! Entry: ARG0: destination pointer
! ARG1: source pointer
! ARG2: byte count
!
! Exit: RESULT: destination pointer
! any other registers in the range r0-r7: trashed
!
! Notes: Usually one wants to do small reads and write a longword, but
! unfortunately it is difficult in some cases to concatanate bytes
! into a longword on the SH, so this does a longword read and small
! writes.
!
! This implementation makes two assumptions about how it is called:
!
! 1.: If the byte count is nonzero, the address of the last byte to be
! copied is unsigned greater than the address of the first byte to
! be copied. This could be easily swapped for a signed comparison,
! but the algorithm used needs some comparison.
!
! 2.: When there are two or three bytes in the last word of an 11-or-more
! bytes memory chunk to b copied, the rest of the word can be read
! without side effects.
! This could be easily changed by increasing the minimum size of
! a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2,
! however, this would cost a few extra cyles on average.
! For SHmedia, the assumption is that any quadword can be read in its
! enirety if at least one byte is included in the copy.
!
.section .text..SHmedia32,"ax"
.globl memcpy
.type memcpy, @function
.align 5
memcpy:
#define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1
#define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1
#define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1
#define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1
ld.b r3,0,r63
pta/l Large,tr0
movi 25,r0
bgeu/u r4,r0,tr0
nsb r4,r0
shlli r0,5,r0
movi (L1-L0+63*32 + 1) & 0xffff,r1
sub r1, r0, r0
L0: ptrel r0,tr0
add r2,r4,r5
ptabs r18,tr1
add r3,r4,r6
blink tr0,r63
/* Rearranged to make cut2 safe */
.balign 8
L4_7: /* 4..7 byte memcpy cntd. */
stlo.l r2, 0, r0
or r6, r7, r6
sthi.l r5, -1, r6
stlo.l r5, -4, r6
blink tr1,r63
.balign 8
L1: /* 0 byte memcpy */
nop
blink tr1,r63
nop
nop
nop
nop
L2_3: /* 2 or 3 byte memcpy cntd. */
st.b r5,-1,r6
blink tr1,r63
/* 1 byte memcpy */
ld.b r3,0,r0
st.b r2,0,r0
blink tr1,r63
L8_15: /* 8..15 byte memcpy cntd. */
stlo.q r2, 0, r0
or r6, r7, r6
sthi.q r5, -1, r6
stlo.q r5, -8, r6
blink tr1,r63
/* 2 or 3 byte memcpy */
ld.b r3,0,r0
ld.b r2,0,r63
ld.b r3,1,r1
st.b r2,0,r0
pta/l L2_3,tr0
ld.b r6,-1,r6
st.b r2,1,r1
blink tr0, r63
/* 4 .. 7 byte memcpy */
LDUAL (r3, 0, r0, r1)
pta L4_7, tr0
ldlo.l r6, -4, r7
or r0, r1, r0
sthi.l r2, 3, r0
ldhi.l r6, -1, r6
blink tr0, r63
/* 8 .. 15 byte memcpy */
LDUAQ (r3, 0, r0, r1)
pta L8_15, tr0
ldlo.q r6, -8, r7
or r0, r1, r0
sthi.q r2, 7, r0
ldhi.q r6, -1, r6
blink tr0, r63
/* 16 .. 24 byte memcpy */
LDUAQ (r3, 0, r0, r1)
LDUAQ (r3, 8, r8, r9)
or r0, r1, r0
sthi.q r2, 7, r0
or r8, r9, r8
sthi.q r2, 15, r8
ldlo.q r6, -8, r7
ldhi.q r6, -1, r6
stlo.q r2, 8, r8
stlo.q r2, 0, r0
or r6, r7, r6
sthi.q r5, -1, r6
stlo.q r5, -8, r6
blink tr1,r63
Large:
ld.b r2, 0, r63
pta/l Loop_ua, tr1
ori r3, -8, r7
sub r2, r7, r22
sub r3, r2, r6
add r2, r4, r5
ldlo.q r3, 0, r0
addi r5, -16, r5
movi 64+8, r27 // could subtract r7 from that.
stlo.q r2, 0, r0
sthi.q r2, 7, r0
ldx.q r22, r6, r0
bgtu/l r27, r4, tr1
addi r5, -48, r27
pta/l Loop_line, tr0
addi r6, 64, r36
addi r6, -24, r19
addi r6, -16, r20
addi r6, -8, r21
Loop_line:
ldx.q r22, r36, r63
alloco r22, 32
addi r22, 32, r22
ldx.q r22, r19, r23
sthi.q r22, -25, r0
ldx.q r22, r20, r24
ldx.q r22, r21, r25
stlo.q r22, -32, r0
ldx.q r22, r6, r0
sthi.q r22, -17, r23
sthi.q r22, -9, r24
sthi.q r22, -1, r25
stlo.q r22, -24, r23
stlo.q r22, -16, r24
stlo.q r22, -8, r25
bgeu r27, r22, tr0
Loop_ua:
addi r22, 8, r22
sthi.q r22, -1, r0
stlo.q r22, -8, r0
ldx.q r22, r6, r0
bgtu/l r5, r22, tr1
add r3, r4, r7
ldlo.q r7, -8, r1
sthi.q r22, 7, r0
ldhi.q r7, -1, r7
ptabs r18,tr1
stlo.q r22, 0, r0
or r1, r7, r1
sthi.q r5, 15, r1
stlo.q r5, 8, r1
blink tr1, r63
.size memcpy,.-memcpy
/* SPDX-License-Identifier: GPL-2.0 */
/* Cloned and hacked for uClibc by Paul Mundt, December 2003 */
/* Modified by SuperH, Inc. September 2003 */
!
! Fast SH memset
!
! by Toshiyasu Morita (tm@netcom.com)
!
! SH5 code by J"orn Rennecke (joern.rennecke@superh.com)
! Copyright 2002 SuperH Ltd.
!
#if __BYTE_ORDER == __LITTLE_ENDIAN
#define SHHI shlld
#define SHLO shlrd
#else
#define SHHI shlrd
#define SHLO shlld
#endif
.section .text..SHmedia32,"ax"
.globl memset
.type memset, @function
.align 5
memset:
pta/l multiquad, tr0
andi r2, 7, r22
ptabs r18, tr2
mshflo.b r3,r3,r3
add r4, r22, r23
mperm.w r3, r63, r3 // Fill pattern now in every byte of r3
movi 8, r9
bgtu/u r23, r9, tr0 // multiquad
beqi/u r4, 0, tr2 // Return with size 0 - ensures no mem accesses
ldlo.q r2, 0, r7
shlli r4, 2, r4
movi -1, r8
SHHI r8, r4, r8
SHHI r8, r4, r8
mcmv r7, r8, r3
stlo.q r2, 0, r3
blink tr2, r63
multiquad:
pta/l lastquad, tr0
stlo.q r2, 0, r3
shlri r23, 3, r24
add r2, r4, r5
beqi/u r24, 1, tr0 // lastquad
pta/l loop, tr1
sub r2, r22, r25
andi r5, -8, r20 // calculate end address and
addi r20, -7*8, r8 // loop end address; This might overflow, so we need
// to use a different test before we start the loop
bge/u r24, r9, tr1 // loop
st.q r25, 8, r3
st.q r20, -8, r3
shlri r24, 1, r24
beqi/u r24, 1, tr0 // lastquad
st.q r25, 16, r3
st.q r20, -16, r3
beqi/u r24, 2, tr0 // lastquad
st.q r25, 24, r3
st.q r20, -24, r3
lastquad:
sthi.q r5, -1, r3
blink tr2,r63
loop:
!!! alloco r25, 32 // QQQ comment out for short-term fix to SHUK #3895.
// QQQ commenting out is locically correct, but sub-optimal
// QQQ Sean McGoogan - 4th April 2003.
st.q r25, 8, r3
st.q r25, 16, r3
st.q r25, 24, r3
st.q r25, 32, r3
addi r25, 32, r25
bgeu/l r8, r25, tr1 // loop
st.q r20, -40, r3
st.q r20, -32, r3
st.q r20, -24, r3
st.q r20, -16, r3
st.q r20, -8, r3
sthi.q r5, -1, r3
blink tr2,r63
.size memset,.-memset
/*
* Copyright (C) 2003 Richard Curnow, SuperH UK Limited
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
void
panic_handler(unsigned long panicPC, unsigned long panicSSR,
unsigned long panicEXPEVT)
{
/* Never return from the panic handler */
for (;;) ;
}
/* SPDX-License-Identifier: GPL-2.0 */
.global __sdivsi3
.global __sdivsi3_1
.global __sdivsi3_2
.section .text..SHmedia32,"ax"
.align 2
/* inputs: r4,r5 */
/* clobbered: r1,r18,r19,r20,r21,r25,tr0 */
/* result in r0 */
__sdivsi3:
__sdivsi3_1:
ptb __div_table,tr0
gettr tr0,r20
__sdivsi3_2:
nsb r5, r1
shlld r5, r1, r25 /* normalize; [-2 ..1, 1..2) in s2.62 */
shari r25, 58, r21 /* extract 5(6) bit index (s2.4 with hole -1..1) */
/* bubble */
ldx.ub r20, r21, r19 /* u0.8 */
shari r25, 32, r25 /* normalize to s2.30 */
shlli r21, 1, r21
muls.l r25, r19, r19 /* s2.38 */
ldx.w r20, r21, r21 /* s2.14 */
ptabs r18, tr0
shari r19, 24, r19 /* truncate to s2.14 */
sub r21, r19, r19 /* some 11 bit inverse in s1.14 */
muls.l r19, r19, r21 /* u0.28 */
sub r63, r1, r1
addi r1, 92, r1
muls.l r25, r21, r18 /* s2.58 */
shlli r19, 45, r19 /* multiply by two and convert to s2.58 */
/* bubble */
sub r19, r18, r18
shari r18, 28, r18 /* some 22 bit inverse in s1.30 */
muls.l r18, r25, r0 /* s2.60 */
muls.l r18, r4, r25 /* s32.30 */
/* bubble */
shari r0, 16, r19 /* s-16.44 */
muls.l r19, r18, r19 /* s-16.74 */
shari r25, 63, r0
shari r4, 14, r18 /* s19.-14 */
shari r19, 30, r19 /* s-16.44 */
muls.l r19, r18, r19 /* s15.30 */
xor r21, r0, r21 /* You could also use the constant 1 << 27. */
add r21, r25, r21
sub r21, r19, r21
shard r21, r1, r21
sub r21, r0, r0
blink tr0, r63
/* This table has been generated by divtab.c .
Defects for bias -330:
Max defect: 6.081536e-07 at -1.000000e+00
Min defect: 2.849516e-08 at 1.030651e+00
Max 2nd step defect: 9.606539e-12 at -1.000000e+00
Min 2nd step defect: 0.000000e+00 at 0.000000e+00
Defect at 1: 1.238659e-07
Defect at -2: 1.061708e-07 */
.balign 2
.type __div_table,@object
.size __div_table,128
/* negative division constants */
.word -16638
.word -17135
.word -17737
.word -18433
.word -19103
.word -19751
.word -20583
.word -21383
.word -22343
.word -23353
.word -24407
.word -25582
.word -26863
.word -28382
.word -29965
.word -31800
/* negative division factors */
.byte 66
.byte 70
.byte 75
.byte 81
.byte 87
.byte 93
.byte 101
.byte 109
.byte 119
.byte 130
.byte 142
.byte 156
.byte 172
.byte 192
.byte 214
.byte 241
.skip 16
.global __div_table
__div_table:
.skip 16
/* positive division factors */
.byte 241
.byte 214
.byte 192
.byte 172
.byte 156
.byte 142
.byte 130
.byte 119
.byte 109
.byte 101
.byte 93
.byte 87
.byte 81
.byte 75
.byte 70
.byte 66
/* positive division constants */
.word 31801
.word 29966
.word 28383
.word 26864
.word 25583
.word 24408
.word 23354
.word 22344
.word 21384
.word 20584
.word 19752
.word 19104
.word 18434
.word 17738
.word 17136
.word 16639
/* SPDX-License-Identifier: GPL-2.0 */
/* Cloned and hacked for uClibc by Paul Mundt, December 2003 */
/* Modified by SuperH, Inc. September 2003 */
! Entry: arg0: destination
! arg1: source
! Exit: result: destination
!
! SH5 code Copyright 2002 SuperH Ltd.
#if __BYTE_ORDER == __LITTLE_ENDIAN
#define SHHI shlld
#define SHLO shlrd
#else
#define SHHI shlrd
#define SHLO shlld
#endif
.section .text..SHmedia32,"ax"
.globl strcpy
.type strcpy, @function
.align 5
strcpy:
pta/l shortstring,tr1
ldlo.q r3,0,r4
ptabs r18,tr4
shlli r3,3,r7
addi r2, 8, r0
mcmpeq.b r4,r63,r6
SHHI r6,r7,r6
bnei/u r6,0,tr1 // shortstring
pta/l no_lddst, tr2
ori r3,-8,r23
sub r2, r23, r0
sub r3, r2, r21
addi r21, 8, r20
ldx.q r0, r21, r5
pta/l loop, tr0
ori r2,-8,r22
mcmpeq.b r5, r63, r6
bgt/u r22, r23, tr2 // no_lddst
// r22 < r23 : Need to do a load from the destination.
// r22 == r23 : Doesn't actually need to load from destination,
// but still can be handled here.
ldlo.q r2, 0, r9
movi -1, r8
SHLO r8, r7, r8
mcmv r4, r8, r9
stlo.q r2, 0, r9
beqi/l r6, 0, tr0 // loop
add r5, r63, r4
addi r0, 8, r0
blink tr1, r63 // shortstring
no_lddst:
// r22 > r23: note that for r22 == r23 the sthi.q would clobber
// bytes before the destination region.
stlo.q r2, 0, r4
SHHI r4, r7, r4
sthi.q r0, -1, r4
beqi/l r6, 0, tr0 // loop
add r5, r63, r4
addi r0, 8, r0
shortstring:
#if __BYTE_ORDER != __LITTLE_ENDIAN
pta/l shortstring2,tr1
byterev r4,r4
#endif
shortstring2:
st.b r0,-8,r4
andi r4,0xff,r5
shlri r4,8,r4
addi r0,1,r0
bnei/l r5,0,tr1
blink tr4,r63 // return
.balign 8
loop:
stlo.q r0, 0, r5
ldx.q r0, r20, r4
addi r0, 16, r0
sthi.q r0, -9, r5
mcmpeq.b r4, r63, r6
bnei/u r6, 0, tr1 // shortstring
ldx.q r0, r21, r5
stlo.q r0, -8, r4
sthi.q r0, -1, r4
mcmpeq.b r5, r63, r6
beqi/l r6, 0, tr0 // loop
add r5, r63, r4
addi r0, 8, r0
blink tr1, r63 // shortstring
.size strcpy,.-strcpy
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Simplistic strlen() implementation for SHmedia.
*
* Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org>
*/
.section .text..SHmedia32,"ax"
.globl strlen
.type strlen,@function
.balign 16
strlen:
ptabs r18, tr4
/*
* Note: We could easily deal with the NULL case here with a simple
* sanity check, though it seems that the behavior we want is to fault
* in the event that r2 == NULL, so we don't bother.
*/
/* beqi r2, 0, tr4 */ ! Sanity check
movi -1, r0
pta/l loop, tr0
loop:
ld.b r2, 0, r1
addi r2, 1, r2
addi r0, 1, r0
bnei/l r1, 0, tr0
or r0, r63, r2
blink tr4, r63
.size strlen,.-strlen
/*
* arch/sh/lib64/udelay.c
*
* Delay routines, using a pre-computed "loops_per_jiffy" value.
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003, 2004 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/sched.h>
#include <asm/param.h>
/*
* Use only for very small delays (< 1 msec).
*
* The active part of our cycle counter is only 32-bits wide, and
* we're treating the difference between two marks as signed. On
* a 1GHz box, that's about 2 seconds.
*/
void __delay(unsigned long loops)
{
long long dummy;
__asm__ __volatile__("gettr tr0, %1\n\t"
"pta $+4, tr0\n\t"
"addi %0, -1, %0\n\t"
"bne %0, r63, tr0\n\t"
"ptabs %1, tr0\n\t":"=r"(loops),
"=r"(dummy)
:"0"(loops));
}
void __const_udelay(unsigned long xloops)
{
__delay(xloops * (HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy));
}
void __udelay(unsigned long usecs)
{
__const_udelay(usecs * 0x000010c6); /* 2**32 / 1000000 */
}
void __ndelay(unsigned long nsecs)
{
__const_udelay(nsecs * 0x00000005);
}
/* SPDX-License-Identifier: GPL-2.0 */
.section .text..SHmedia32,"ax"
.align 2
.global __udivdi3
__udivdi3:
shlri r3,1,r4
nsb r4,r22
shlld r3,r22,r6
shlri r6,49,r5
movi 0xffffffffffffbaf1,r21 /* .l shift count 17. */
sub r21,r5,r1
mmulfx.w r1,r1,r4
mshflo.w r1,r63,r1
sub r63,r22,r20 // r63 == 64 % 64
mmulfx.w r5,r4,r4
pta large_divisor,tr0
addi r20,32,r9
msub.w r1,r4,r1
madd.w r1,r1,r1
mmulfx.w r1,r1,r4
shlri r6,32,r7
bgt/u r9,r63,tr0 // large_divisor
mmulfx.w r5,r4,r4
shlri r2,32+14,r19
addi r22,-31,r0
msub.w r1,r4,r1
mulu.l r1,r7,r4
addi r1,-3,r5
mulu.l r5,r19,r5
sub r63,r4,r4 // Negate to make sure r1 ends up <= 1/r2
shlri r4,2,r4 /* chop off leading %0000000000000000 001.00000000000 - or, as
the case may be, %0000000000000000 000.11111111111, still */
muls.l r1,r4,r4 /* leaving at least one sign bit. */
mulu.l r5,r3,r8
mshalds.l r1,r21,r1
shari r4,26,r4
shlld r8,r0,r8
add r1,r4,r1 // 31 bit unsigned reciprocal now in r1 (msb equiv. 0.5)
sub r2,r8,r2
/* Can do second step of 64 : 32 div now, using r1 and the rest in r2. */
shlri r2,22,r21
mulu.l r21,r1,r21
shlld r5,r0,r8
addi r20,30-22,r0
shlrd r21,r0,r21
mulu.l r21,r3,r5
add r8,r21,r8
mcmpgt.l r21,r63,r21 // See Note 1
addi r20,30,r0
mshfhi.l r63,r21,r21
sub r2,r5,r2
andc r2,r21,r2
/* small divisor: need a third divide step */
mulu.l r2,r1,r7
ptabs r18,tr0
addi r2,1,r2
shlrd r7,r0,r7
mulu.l r7,r3,r5
add r8,r7,r8
sub r2,r3,r2
cmpgt r2,r5,r5
add r8,r5,r2
/* could test r3 here to check for divide by zero. */
blink tr0,r63
large_divisor:
mmulfx.w r5,r4,r4
shlrd r2,r9,r25
shlri r25,32,r8
msub.w r1,r4,r1
mulu.l r1,r7,r4
addi r1,-3,r5
mulu.l r5,r8,r5
sub r63,r4,r4 // Negate to make sure r1 ends up <= 1/r2
shlri r4,2,r4 /* chop off leading %0000000000000000 001.00000000000 - or, as
the case may be, %0000000000000000 000.11111111111, still */
muls.l r1,r4,r4 /* leaving at least one sign bit. */
shlri r5,14-1,r8
mulu.l r8,r7,r5
mshalds.l r1,r21,r1
shari r4,26,r4
add r1,r4,r1 // 31 bit unsigned reciprocal now in r1 (msb equiv. 0.5)
sub r25,r5,r25
/* Can do second step of 64 : 32 div now, using r1 and the rest in r25. */
shlri r25,22,r21
mulu.l r21,r1,r21
pta no_lo_adj,tr0
addi r22,32,r0
shlri r21,40,r21
mulu.l r21,r7,r5
add r8,r21,r8
shlld r2,r0,r2
sub r25,r5,r25
bgtu/u r7,r25,tr0 // no_lo_adj
addi r8,1,r8
sub r25,r7,r25
no_lo_adj:
mextr4 r2,r25,r2
/* large_divisor: only needs a few adjustments. */
mulu.l r8,r6,r5
ptabs r18,tr0
/* bubble */
cmpgtu r5,r2,r5
sub r8,r5,r2
blink tr0,r63
/* Note 1: To shift the result of the second divide stage so that the result
always fits into 32 bits, yet we still reduce the rest sufficiently
would require a lot of instructions to do the shifts just right. Using
the full 64 bit shift result to multiply with the divisor would require
four extra instructions for the upper 32 bits (shift / mulu / shift / sub).
Fortunately, if the upper 32 bits of the shift result are nonzero, we
know that the rest after taking this partial result into account will
fit into 32 bits. So we just clear the upper 32 bits of the rest if the
upper 32 bits of the partial result are nonzero. */
/* SPDX-License-Identifier: GPL-2.0 */
.global __udivsi3
.section .text..SHmedia32,"ax"
.align 2
/*
inputs: r4,r5
clobbered: r18,r19,r20,r21,r22,r25,tr0
result in r0.
*/
__udivsi3:
addz.l r5,r63,r22
nsb r22,r0
shlld r22,r0,r25
shlri r25,48,r25
movi 0xffffffffffffbb0c,r20 /* shift count eqiv 76 */
sub r20,r25,r21
mmulfx.w r21,r21,r19
mshflo.w r21,r63,r21
ptabs r18,tr0
mmulfx.w r25,r19,r19
sub r20,r0,r0
/* bubble */
msub.w r21,r19,r19
/*
* It would be nice for scheduling to do this add to r21 before
* the msub.w, but we need a different value for r19 to keep
* errors under control.
*/
addi r19,-2,r21
mulu.l r4,r21,r18
mmulfx.w r19,r19,r19
shlli r21,15,r21
shlrd r18,r0,r18
mulu.l r18,r22,r20
mmacnfx.wl r25,r19,r21
/* bubble */
sub r4,r20,r25
mulu.l r25,r21,r19
addi r0,14,r0
/* bubble */
shlrd r19,r0,r19
mulu.l r19,r22,r20
add r18,r19,r18
/* bubble */
sub.l r25,r20,r25
mulu.l r25,r21,r19
addz.l r25,r63,r25
sub r25,r22,r25
shlrd r19,r0,r19
mulu.l r19,r22,r20
addi r25,1,r25
add r18,r19,r18
cmpgt r25,r20,r25
add.l r18,r25,r0
blink tr0,r63
......@@ -15,8 +15,7 @@ config MMU
config PAGE_OFFSET
hex
default "0x80000000" if MMU && SUPERH32
default "0x20000000" if MMU && SUPERH64
default "0x80000000" if MMU
default "0x00000000"
config FORCE_MAX_ZONEORDER
......@@ -72,12 +71,11 @@ config MEMORY_SIZE
config 29BIT
def_bool !32BIT
depends on SUPERH32
select UNCACHED_MAPPING
config 32BIT
bool
default y if CPU_SH5 || !MMU
default !MMU
config PMB
bool "Support 32-bit physical addressing through PMB"
......@@ -152,7 +150,7 @@ config ARCH_MEMORY_PROBE
config IOREMAP_FIXED
def_bool y
depends on X2TLB || SUPERH64
depends on X2TLB
config UNCACHED_MAPPING
bool
......@@ -184,7 +182,7 @@ config PAGE_SIZE_16KB
config PAGE_SIZE_64KB
bool "64kB"
depends on !MMU || CPU_SH4 || CPU_SH5
depends on !MMU || CPU_SH4
help
This enables support for 64kB pages, possible on all SH-4
CPUs and later.
......@@ -216,10 +214,6 @@ config HUGETLB_PAGE_SIZE_64MB
bool "64MB"
depends on X2TLB
config HUGETLB_PAGE_SIZE_512MB
bool "512MB"
depends on CPU_SH5
endchoice
config SCHED_MC
......@@ -242,7 +236,7 @@ config SH7705_CACHE_32KB
choice
prompt "Cache mode"
default CACHE_WRITEBACK if CPU_SH2A || CPU_SH3 || CPU_SH4 || CPU_SH5
default CACHE_WRITEBACK if CPU_SH2A || CPU_SH3 || CPU_SH4
default CACHE_WRITETHROUGH if (CPU_SH2 && !CPU_SH2A)
config CACHE_WRITEBACK
......
......@@ -10,15 +10,14 @@ cacheops-$(CONFIG_CPU_SUBTYPE_SH7619) := cache-sh2.o
cacheops-$(CONFIG_CPU_SH2A) := cache-sh2a.o
cacheops-$(CONFIG_CPU_SH3) := cache-sh3.o
cacheops-$(CONFIG_CPU_SH4) := cache-sh4.o flush-sh4.o
cacheops-$(CONFIG_CPU_SH5) := cache-sh5.o flush-sh4.o
cacheops-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o
cacheops-$(CONFIG_CPU_SHX3) += cache-shx3.o
obj-y += $(cacheops-y)
mmu-y := nommu.o extable_32.o
mmu-$(CONFIG_MMU) := extable_$(BITS).o fault.o ioremap.o kmap.o \
pgtable.o tlbex_$(BITS).o tlbflush_$(BITS).o
mmu-$(CONFIG_MMU) := extable_32.o fault.o ioremap.o kmap.o \
pgtable.o tlbex_32.o tlbflush_32.o
obj-y += $(mmu-y)
......@@ -31,7 +30,6 @@ ifdef CONFIG_MMU
debugfs-$(CONFIG_CPU_SH4) += tlb-debugfs.o
tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o
tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o tlb-urb.o
tlb-$(CONFIG_CPU_SH5) := tlb-sh5.o
tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o tlb-urb.o
obj-y += $(tlb-y)
endif
......@@ -46,29 +44,4 @@ obj-$(CONFIG_HAVE_SRAM_POOL) += sram.o
GCOV_PROFILE_pmb.o := n
# Special flags for tlbex_64.o. This puts restrictions on the number of
# caller-save registers that the compiler can target when building this file.
# This is required because the code is called from a context in entry.S where
# very few registers have been saved in the exception handler (for speed
# reasons).
# The caller save registers that have been saved and which can be used are
# r2,r3,r4,r5 : argument passing
# r15, r18 : SP and LINK
# tr0-4 : allow all caller-save TR's. The compiler seems to be able to make
# use of them, so it's probably beneficial to performance to save them
# and have them available for it.
#
# The resources not listed below are callee save, i.e. the compiler is free to
# use any of them and will spill them to the stack itself.
CFLAGS_tlbex_64.o += -ffixed-r7 \
-ffixed-r8 -ffixed-r9 -ffixed-r10 -ffixed-r11 -ffixed-r12 \
-ffixed-r13 -ffixed-r14 -ffixed-r16 -ffixed-r17 -ffixed-r19 \
-ffixed-r20 -ffixed-r21 -ffixed-r22 -ffixed-r23 \
-ffixed-r24 -ffixed-r25 -ffixed-r26 -ffixed-r27 \
-ffixed-r36 -ffixed-r37 -ffixed-r38 -ffixed-r39 -ffixed-r40 \
-ffixed-r41 -ffixed-r42 -ffixed-r43 \
-ffixed-r60 -ffixed-r61 -ffixed-r62 \
-fomit-frame-pointer
ccflags-y := -Werror
/*
* arch/sh/mm/cache-sh5.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2002 Benedict Gaster
* Copyright (C) 2003 Richard Curnow
* Copyright (C) 2003 - 2008 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <asm/tlb.h>
#include <asm/processor.h>
#include <asm/cache.h>
#include <asm/pgalloc.h>
#include <linux/uaccess.h>
#include <asm/mmu_context.h>
extern void __weak sh4__flush_region_init(void);
/* Wired TLB entry for the D-cache */
static unsigned long long dtlb_cache_slot;
/*
* The following group of functions deal with mapping and unmapping a
* temporary page into a DTLB slot that has been set aside for exclusive
* use.
*/
static inline void
sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid,
unsigned long paddr)
{
local_irq_disable();
sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr);
}
static inline void sh64_teardown_dtlb_cache_slot(void)
{
sh64_teardown_tlb_slot(dtlb_cache_slot);
local_irq_enable();
}
static inline void sh64_icache_inv_all(void)
{
unsigned long long addr, flag, data;
unsigned long flags;
addr = ICCR0;
flag = ICCR0_ICI;
data = 0;
/* Make this a critical section for safety (probably not strictly necessary.) */
local_irq_save(flags);
/* Without %1 it gets unexplicably wrong */
__asm__ __volatile__ (
"getcfg %3, 0, %0\n\t"
"or %0, %2, %0\n\t"
"putcfg %3, 0, %0\n\t"
"synci"
: "=&r" (data)
: "0" (data), "r" (flag), "r" (addr));
local_irq_restore(flags);
}
static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end)
{
/* Invalidate range of addresses [start,end] from the I-cache, where
* the addresses lie in the kernel superpage. */
unsigned long long ullend, addr, aligned_start;
aligned_start = (unsigned long long)(signed long long)(signed long) start;
addr = L1_CACHE_ALIGN(aligned_start);
ullend = (unsigned long long) (signed long long) (signed long) end;
while (addr <= ullend) {
__asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
addr += L1_CACHE_BYTES;
}
}
static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr)
{
/* If we get called, we know that vma->vm_flags contains VM_EXEC.
Also, eaddr is page-aligned. */
unsigned int cpu = smp_processor_id();
unsigned long long addr, end_addr;
unsigned long flags = 0;
unsigned long running_asid, vma_asid;
addr = eaddr;
end_addr = addr + PAGE_SIZE;
/* Check whether we can use the current ASID for the I-cache
invalidation. For example, if we're called via
access_process_vm->flush_cache_page->here, (e.g. when reading from
/proc), 'running_asid' will be that of the reader, not of the
victim.
Also, note the risk that we might get pre-empted between the ASID
compare and blocking IRQs, and before we regain control, the
pid->ASID mapping changes. However, the whole cache will get
invalidated when the mapping is renewed, so the worst that can
happen is that the loop below ends up invalidating somebody else's
cache entries.
*/
running_asid = get_asid();
vma_asid = cpu_asid(cpu, vma->vm_mm);
if (running_asid != vma_asid) {
local_irq_save(flags);
switch_and_save_asid(vma_asid);
}
while (addr < end_addr) {
/* Worth unrolling a little */
__asm__ __volatile__("icbi %0, 0" : : "r" (addr));
__asm__ __volatile__("icbi %0, 32" : : "r" (addr));
__asm__ __volatile__("icbi %0, 64" : : "r" (addr));
__asm__ __volatile__("icbi %0, 96" : : "r" (addr));
addr += 128;
}
if (running_asid != vma_asid) {
switch_and_save_asid(running_asid);
local_irq_restore(flags);
}
}
static void sh64_icache_inv_user_page_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
/* Used for invalidating big chunks of I-cache, i.e. assume the range
is whole pages. If 'start' or 'end' is not page aligned, the code
is conservative and invalidates to the ends of the enclosing pages.
This is functionally OK, just a performance loss. */
/* See the comments below in sh64_dcache_purge_user_range() regarding
the choice of algorithm. However, for the I-cache option (2) isn't
available because there are no physical tags so aliases can't be
resolved. The icbi instruction has to be used through the user
mapping. Because icbi is cheaper than ocbp on a cache hit, it
would be cheaper to use the selective code for a large range than is
possible with the D-cache. Just assume 64 for now as a working
figure.
*/
int n_pages;
if (!mm)
return;
n_pages = ((end - start) >> PAGE_SHIFT);
if (n_pages >= 64) {
sh64_icache_inv_all();
} else {
unsigned long aligned_start;
unsigned long eaddr;
unsigned long after_last_page_start;
unsigned long mm_asid, current_asid;
unsigned long flags = 0;
mm_asid = cpu_asid(smp_processor_id(), mm);
current_asid = get_asid();
if (mm_asid != current_asid) {
/* Switch ASID and run the invalidate loop under cli */
local_irq_save(flags);
switch_and_save_asid(mm_asid);
}
aligned_start = start & PAGE_MASK;
after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK);
while (aligned_start < after_last_page_start) {
struct vm_area_struct *vma;
unsigned long vma_end;
vma = find_vma(mm, aligned_start);
if (!vma || (aligned_start <= vma->vm_end)) {
/* Avoid getting stuck in an error condition */
aligned_start += PAGE_SIZE;
continue;
}
vma_end = vma->vm_end;
if (vma->vm_flags & VM_EXEC) {
/* Executable */
eaddr = aligned_start;
while (eaddr < vma_end) {
sh64_icache_inv_user_page(vma, eaddr);
eaddr += PAGE_SIZE;
}
}
aligned_start = vma->vm_end; /* Skip to start of next region */
}
if (mm_asid != current_asid) {
switch_and_save_asid(current_asid);
local_irq_restore(flags);
}
}
}
static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end)
{
/* The icbi instruction never raises ITLBMISS. i.e. if there's not a
cache hit on the virtual tag the instruction ends there, without a
TLB lookup. */
unsigned long long aligned_start;
unsigned long long ull_end;
unsigned long long addr;
ull_end = end;
/* Just invalidate over the range using the natural addresses. TLB
miss handling will be OK (TBC). Since it's for the current process,
either we're already in the right ASID context, or the ASIDs have
been recycled since we were last active in which case we might just
invalidate another processes I-cache entries : no worries, just a
performance drop for him. */
aligned_start = L1_CACHE_ALIGN(start);
addr = aligned_start;
while (addr < ull_end) {
__asm__ __volatile__ ("icbi %0, 0" : : "r" (addr));
__asm__ __volatile__ ("nop");
__asm__ __volatile__ ("nop");
addr += L1_CACHE_BYTES;
}
}
/* Buffer used as the target of alloco instructions to purge data from cache
sets by natural eviction. -- RPC */
#define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4))
static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, };
static inline void sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets)
{
/* Purge all ways in a particular block of sets, specified by the base
set number and number of sets. Can handle wrap-around, if that's
needed. */
int dummy_buffer_base_set;
unsigned long long eaddr, eaddr0, eaddr1;
int j;
int set_offset;
dummy_buffer_base_set = ((int)&dummy_alloco_area &
cpu_data->dcache.entry_mask) >>
cpu_data->dcache.entry_shift;
set_offset = sets_to_purge_base - dummy_buffer_base_set;
for (j = 0; j < n_sets; j++, set_offset++) {
set_offset &= (cpu_data->dcache.sets - 1);
eaddr0 = (unsigned long long)dummy_alloco_area +
(set_offset << cpu_data->dcache.entry_shift);
/*
* Do one alloco which hits the required set per cache
* way. For write-back mode, this will purge the #ways
* resident lines. There's little point unrolling this
* loop because the allocos stall more if they're too
* close together.
*/
eaddr1 = eaddr0 + cpu_data->dcache.way_size *
cpu_data->dcache.ways;
for (eaddr = eaddr0; eaddr < eaddr1;
eaddr += cpu_data->dcache.way_size) {
__asm__ __volatile__ ("alloco %0, 0" : : "r" (eaddr));
__asm__ __volatile__ ("synco"); /* TAKum03020 */
}
eaddr1 = eaddr0 + cpu_data->dcache.way_size *
cpu_data->dcache.ways;
for (eaddr = eaddr0; eaddr < eaddr1;
eaddr += cpu_data->dcache.way_size) {
/*
* Load from each address. Required because
* alloco is a NOP if the cache is write-through.
*/
if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags)))
__raw_readb((unsigned long)eaddr);
}
}
/*
* Don't use OCBI to invalidate the lines. That costs cycles
* directly. If the dummy block is just left resident, it will
* naturally get evicted as required.
*/
}
/*
* Purge the entire contents of the dcache. The most efficient way to
* achieve this is to use alloco instructions on a region of unused
* memory equal in size to the cache, thereby causing the current
* contents to be discarded by natural eviction. The alternative, namely
* reading every tag, setting up a mapping for the corresponding page and
* doing an OCBP for the line, would be much more expensive.
*/
static void sh64_dcache_purge_all(void)
{
sh64_dcache_purge_sets(0, cpu_data->dcache.sets);
}
/* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for
anything else in the kernel */
#define MAGIC_PAGE0_START 0xffffffffec000000ULL
/* Purge the physical page 'paddr' from the cache. It's known that any
* cache lines requiring attention have the same page colour as the the
* address 'eaddr'.
*
* This relies on the fact that the D-cache matches on physical tags when
* no virtual tag matches. So we create an alias for the original page
* and purge through that. (Alternatively, we could have done this by
* switching ASID to match the original mapping and purged through that,
* but that involves ASID switching cost + probably a TLBMISS + refill
* anyway.)
*/
static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr,
unsigned long eaddr)
{
unsigned long long magic_page_start;
unsigned long long magic_eaddr, magic_eaddr_end;
magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK);
/* As long as the kernel is not pre-emptible, this doesn't need to be
under cli/sti. */
sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr);
magic_eaddr = magic_page_start;
magic_eaddr_end = magic_eaddr + PAGE_SIZE;
while (magic_eaddr < magic_eaddr_end) {
/* Little point in unrolling this loop - the OCBPs are blocking
and won't go any quicker (i.e. the loop overhead is parallel
to part of the OCBP execution.) */
__asm__ __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr));
magic_eaddr += L1_CACHE_BYTES;
}
sh64_teardown_dtlb_cache_slot();
}
/*
* Purge a page given its physical start address, by creating a temporary
* 1 page mapping and purging across that. Even if we know the virtual
* address (& vma or mm) of the page, the method here is more elegant
* because it avoids issues of coping with page faults on the purge
* instructions (i.e. no special-case code required in the critical path
* in the TLB miss handling).
*/
static void sh64_dcache_purge_phy_page(unsigned long paddr)
{
unsigned long long eaddr_start, eaddr, eaddr_end;
int i;
/* As long as the kernel is not pre-emptible, this doesn't need to be
under cli/sti. */
eaddr_start = MAGIC_PAGE0_START;
for (i = 0; i < (1 << CACHE_OC_N_SYNBITS); i++) {
sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr);
eaddr = eaddr_start;
eaddr_end = eaddr + PAGE_SIZE;
while (eaddr < eaddr_end) {
__asm__ __volatile__ ("ocbp %0, 0" : : "r" (eaddr));
eaddr += L1_CACHE_BYTES;
}
sh64_teardown_dtlb_cache_slot();
eaddr_start += PAGE_SIZE;
}
}
static void sh64_dcache_purge_user_pages(struct mm_struct *mm,
unsigned long addr, unsigned long end)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pte_t entry;
spinlock_t *ptl;
unsigned long paddr;
if (!mm)
return; /* No way to find physical address of page */
pgd = pgd_offset(mm, addr);
if (pgd_bad(*pgd))
return;
pud = pud_offset(pgd, addr);
if (pud_none(*pud) || pud_bad(*pud))
return;
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd) || pmd_bad(*pmd))
return;
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
do {
entry = *pte;
if (pte_none(entry) || !pte_present(entry))
continue;
paddr = pte_val(entry) & PAGE_MASK;
sh64_dcache_purge_coloured_phy_page(paddr, addr);
} while (pte++, addr += PAGE_SIZE, addr != end);
pte_unmap_unlock(pte - 1, ptl);
}
/*
* There are at least 5 choices for the implementation of this, with
* pros (+), cons(-), comments(*):
*
* 1. ocbp each line in the range through the original user's ASID
* + no lines spuriously evicted
* - tlbmiss handling (must either handle faults on demand => extra
* special-case code in tlbmiss critical path), or map the page in
* advance (=> flush_tlb_range in advance to avoid multiple hits)
* - ASID switching
* - expensive for large ranges
*
* 2. temporarily map each page in the range to a special effective
* address and ocbp through the temporary mapping; relies on the
* fact that SH-5 OCB* always do TLB lookup and match on ptags (they
* never look at the etags)
* + no spurious evictions
* - expensive for large ranges
* * surely cheaper than (1)
*
* 3. walk all the lines in the cache, check the tags, if a match
* occurs create a page mapping to ocbp the line through
* + no spurious evictions
* - tag inspection overhead
* - (especially for small ranges)
* - potential cost of setting up/tearing down page mapping for
* every line that matches the range
* * cost partly independent of range size
*
* 4. walk all the lines in the cache, check the tags, if a match
* occurs use 4 * alloco to purge the line (+3 other probably
* innocent victims) by natural eviction
* + no tlb mapping overheads
* - spurious evictions
* - tag inspection overhead
*
* 5. implement like flush_cache_all
* + no tag inspection overhead
* - spurious evictions
* - bad for small ranges
*
* (1) can be ruled out as more expensive than (2). (2) appears best
* for small ranges. The choice between (3), (4) and (5) for large
* ranges and the range size for the large/small boundary need
* benchmarking to determine.
*
* For now use approach (2) for small ranges and (5) for large ones.
*/
static void sh64_dcache_purge_user_range(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
int n_pages = ((end - start) >> PAGE_SHIFT);
if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) {
sh64_dcache_purge_all();
} else {
/* Small range, covered by a single page table page */
start &= PAGE_MASK; /* should already be so */
end = PAGE_ALIGN(end); /* should already be so */
sh64_dcache_purge_user_pages(mm, start, end);
}
}
/*
* Invalidate the entire contents of both caches, after writing back to
* memory any dirty data from the D-cache.
*/
static void sh5_flush_cache_all(void *unused)
{
sh64_dcache_purge_all();
sh64_icache_inv_all();
}
/*
* Invalidate an entire user-address space from both caches, after
* writing back dirty data (e.g. for shared mmap etc).
*
* This could be coded selectively by inspecting all the tags then
* doing 4*alloco on any set containing a match (as for
* flush_cache_range), but fork/exit/execve (where this is called from)
* are expensive anyway.
*
* Have to do a purge here, despite the comments re I-cache below.
* There could be odd-coloured dirty data associated with the mm still
* in the cache - if this gets written out through natural eviction
* after the kernel has reused the page there will be chaos.
*
* The mm being torn down won't ever be active again, so any Icache
* lines tagged with its ASID won't be visible for the rest of the
* lifetime of this ASID cycle. Before the ASID gets reused, there
* will be a flush_cache_all. Hence we don't need to touch the
* I-cache. This is similar to the lack of action needed in
* flush_tlb_mm - see fault.c.
*/
static void sh5_flush_cache_mm(void *unused)
{
sh64_dcache_purge_all();
}
/*
* Invalidate (from both caches) the range [start,end) of virtual
* addresses from the user address space specified by mm, after writing
* back any dirty data.
*
* Note, 'end' is 1 byte beyond the end of the range to flush.
*/
static void sh5_flush_cache_range(void *args)
{
struct flusher_data *data = args;
struct vm_area_struct *vma;
unsigned long start, end;
vma = data->vma;
start = data->addr1;
end = data->addr2;
sh64_dcache_purge_user_range(vma->vm_mm, start, end);
sh64_icache_inv_user_page_range(vma->vm_mm, start, end);
}
/*
* Invalidate any entries in either cache for the vma within the user
* address space vma->vm_mm for the page starting at virtual address
* 'eaddr'. This seems to be used primarily in breaking COW. Note,
* the I-cache must be searched too in case the page in question is
* both writable and being executed from (e.g. stack trampolines.)
*
* Note, this is called with pte lock held.
*/
static void sh5_flush_cache_page(void *args)
{
struct flusher_data *data = args;
struct vm_area_struct *vma;
unsigned long eaddr, pfn;
vma = data->vma;
eaddr = data->addr1;
pfn = data->addr2;
sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT);
if (vma->vm_flags & VM_EXEC)
sh64_icache_inv_user_page(vma, eaddr);
}
static void sh5_flush_dcache_page(void *page)
{
sh64_dcache_purge_phy_page(page_to_phys((struct page *)page));
wmb();
}
/*
* Flush the range [start,end] of kernel virtual address space from
* the I-cache. The corresponding range must be purged from the
* D-cache also because the SH-5 doesn't have cache snooping between
* the caches. The addresses will be visible through the superpage
* mapping, therefore it's guaranteed that there no cache entries for
* the range in cache sets of the wrong colour.
*/
static void sh5_flush_icache_range(void *args)
{
struct flusher_data *data = args;
unsigned long start, end;
start = data->addr1;
end = data->addr2;
__flush_purge_region((void *)start, end);
wmb();
sh64_icache_inv_kernel_range(start, end);
}
/*
* For the address range [start,end), write back the data from the
* D-cache and invalidate the corresponding region of the I-cache for the
* current process. Used to flush signal trampolines on the stack to
* make them executable.
*/
static void sh5_flush_cache_sigtramp(void *vaddr)
{
unsigned long end = (unsigned long)vaddr + L1_CACHE_BYTES;
__flush_wback_region(vaddr, L1_CACHE_BYTES);
wmb();
sh64_icache_inv_current_user_range((unsigned long)vaddr, end);
}
void __init sh5_cache_init(void)
{
local_flush_cache_all = sh5_flush_cache_all;
local_flush_cache_mm = sh5_flush_cache_mm;
local_flush_cache_dup_mm = sh5_flush_cache_mm;
local_flush_cache_page = sh5_flush_cache_page;
local_flush_cache_range = sh5_flush_cache_range;
local_flush_dcache_page = sh5_flush_dcache_page;
local_flush_icache_range = sh5_flush_icache_range;
local_flush_cache_sigtramp = sh5_flush_cache_sigtramp;
/* Reserve a slot for dcache colouring in the DTLB */
dtlb_cache_slot = sh64_get_wired_dtlb_entry();
sh4__flush_region_init();
}
......@@ -355,12 +355,6 @@ void __init cpu_cache_init(void)
}
}
if (boot_cpu_data.family == CPU_FAMILY_SH5) {
extern void __weak sh5_cache_init(void);
sh5_cache_init();
}
skip:
emit_cache_params();
}
/*
* arch/sh/mm/extable_64.c
*
* Copyright (C) 2003 Richard Curnow
* Copyright (C) 2003, 2004 Paul Mundt
*
* Cloned from the 2.5 SH version..
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/bsearch.h>
#include <linux/rwsem.h>
#include <linux/extable.h>
#include <linux/uaccess.h>
extern unsigned long copy_user_memcpy, copy_user_memcpy_end;
extern void __copy_user_fixup(void);
static const struct exception_table_entry __copy_user_fixup_ex = {
.fixup = (unsigned long)&__copy_user_fixup,
};
/*
* Some functions that may trap due to a bad user-mode address have too
* many loads and stores in them to make it at all practical to label
* each one and put them all in the main exception table.
*
* In particular, the fast memcpy routine is like this. It's fix-up is
* just to fall back to a slow byte-at-a-time copy, which is handled the
* conventional way. So it's functionally OK to just handle any trap
* occurring in the fast memcpy with that fixup.
*/
static const struct exception_table_entry *check_exception_ranges(unsigned long addr)
{
if ((addr >= (unsigned long)&copy_user_memcpy) &&
(addr <= (unsigned long)&copy_user_memcpy_end))
return &__copy_user_fixup_ex;
return NULL;
}
static int cmp_ex_search(const void *key, const void *elt)
{
const struct exception_table_entry *_elt = elt;
unsigned long _key = *(unsigned long *)key;
/* avoid overflow */
if (_key > _elt->insn)
return 1;
if (_key < _elt->insn)
return -1;
return 0;
}
/* Simple binary search */
const struct exception_table_entry *
search_extable(const struct exception_table_entry *base,
const size_t num,
unsigned long value)
{
const struct exception_table_entry *mid;
mid = check_exception_ranges(value);
if (mid)
return mid;
return bsearch(&value, base, num,
sizeof(struct exception_table_entry), cmp_ex_search);
}
int fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
fixup = search_exception_tables(regs->pc);
if (fixup) {
regs->pc = fixup->fixup;
return 1;
}
return 0;
}
/*
* arch/sh/mm/tlb-sh5.c
*
* Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org>
* Copyright (C) 2003 Richard Curnow <richard.curnow@superh.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/mm.h>
#include <linux/init.h>
#include <asm/page.h>
#include <asm/tlb.h>
#include <asm/mmu_context.h>
/**
* sh64_tlb_init - Perform initial setup for the DTLB and ITLB.
*/
int sh64_tlb_init(void)
{
/* Assign some sane DTLB defaults */
cpu_data->dtlb.entries = 64;
cpu_data->dtlb.step = 0x10;
cpu_data->dtlb.first = DTLB_FIXED | cpu_data->dtlb.step;
cpu_data->dtlb.next = cpu_data->dtlb.first;
cpu_data->dtlb.last = DTLB_FIXED |
((cpu_data->dtlb.entries - 1) *
cpu_data->dtlb.step);
/* And again for the ITLB */
cpu_data->itlb.entries = 64;
cpu_data->itlb.step = 0x10;
cpu_data->itlb.first = ITLB_FIXED | cpu_data->itlb.step;
cpu_data->itlb.next = cpu_data->itlb.first;
cpu_data->itlb.last = ITLB_FIXED |
((cpu_data->itlb.entries - 1) *
cpu_data->itlb.step);
return 0;
}
/**
* sh64_next_free_dtlb_entry - Find the next available DTLB entry
*/
unsigned long long sh64_next_free_dtlb_entry(void)
{
return cpu_data->dtlb.next;
}
/**
* sh64_get_wired_dtlb_entry - Allocate a wired (locked-in) entry in the DTLB
*/
unsigned long long sh64_get_wired_dtlb_entry(void)
{
unsigned long long entry = sh64_next_free_dtlb_entry();
cpu_data->dtlb.first += cpu_data->dtlb.step;
cpu_data->dtlb.next += cpu_data->dtlb.step;
return entry;
}
/**
* sh64_put_wired_dtlb_entry - Free a wired (locked-in) entry in the DTLB.
*
* @entry: Address of TLB slot.
*
* Works like a stack, last one to allocate must be first one to free.
*/
int sh64_put_wired_dtlb_entry(unsigned long long entry)
{
__flush_tlb_slot(entry);
/*
* We don't do any particularly useful tracking of wired entries,
* so this approach works like a stack .. last one to be allocated
* has to be the first one to be freed.
*
* We could potentially load wired entries into a list and work on
* rebalancing the list periodically (which also entails moving the
* contents of a TLB entry) .. though I have a feeling that this is
* more trouble than it's worth.
*/
/*
* Entry must be valid .. we don't want any ITLB addresses!
*/
if (entry <= DTLB_FIXED)
return -EINVAL;
/*
* Next, check if we're within range to be freed. (ie, must be the
* entry beneath the first 'free' entry!
*/
if (entry < (cpu_data->dtlb.first - cpu_data->dtlb.step))
return -EINVAL;
/* If we are, then bring this entry back into the list */
cpu_data->dtlb.first -= cpu_data->dtlb.step;
cpu_data->dtlb.next = entry;
return 0;
}
/**
* sh64_setup_tlb_slot - Load up a translation in a wired slot.
*
* @config_addr: Address of TLB slot.
* @eaddr: Virtual address.
* @asid: Address Space Identifier.
* @paddr: Physical address.
*
* Load up a virtual<->physical translation for @eaddr<->@paddr in the
* pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry).
*/
void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr,
unsigned long asid, unsigned long paddr)
{
unsigned long long pteh, ptel;
pteh = neff_sign_extend(eaddr);
pteh &= PAGE_MASK;
pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
ptel = neff_sign_extend(paddr);
ptel &= PAGE_MASK;
ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE);
asm volatile("putcfg %0, 1, %1\n\t"
"putcfg %0, 0, %2\n"
: : "r" (config_addr), "r" (ptel), "r" (pteh));
}
/**
* sh64_teardown_tlb_slot - Teardown a translation.
*
* @config_addr: Address of TLB slot.
*
* Teardown any existing mapping in the TLB slot @config_addr.
*/
void sh64_teardown_tlb_slot(unsigned long long config_addr)
__attribute__ ((alias("__flush_tlb_slot")));
static int dtlb_entry;
static unsigned long long dtlb_entries[64];
void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
{
unsigned long long entry;
unsigned long paddr, flags;
BUG_ON(dtlb_entry == ARRAY_SIZE(dtlb_entries));
local_irq_save(flags);
entry = sh64_get_wired_dtlb_entry();
dtlb_entries[dtlb_entry++] = entry;
paddr = pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK;
paddr &= ~PAGE_MASK;
sh64_setup_tlb_slot(entry, addr, get_asid(), paddr);
local_irq_restore(flags);
}
void tlb_unwire_entry(void)
{
unsigned long long entry;
unsigned long flags;
BUG_ON(!dtlb_entry);
local_irq_save(flags);
entry = dtlb_entries[dtlb_entry--];
sh64_teardown_tlb_slot(entry);
sh64_put_wired_dtlb_entry(entry);
local_irq_restore(flags);
}
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
unsigned long long ptel;
unsigned long long pteh=0;
struct tlb_info *tlbp;
unsigned long long next;
unsigned int fault_code = get_thread_fault_code();
/* Get PTEL first */
ptel = pte.pte_low;
/*
* Set PTEH register
*/
pteh = neff_sign_extend(address & MMU_VPN_MASK);
/* Set the ASID. */
pteh |= get_asid() << PTEH_ASID_SHIFT;
pteh |= PTEH_VALID;
/* Set PTEL register, set_pte has performed the sign extension */
ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
if (fault_code & FAULT_CODE_ITLB)
tlbp = &cpu_data->itlb;
else
tlbp = &cpu_data->dtlb;
next = tlbp->next;
__flush_tlb_slot(next);
asm volatile ("putcfg %0,1,%2\n\n\t"
"putcfg %0,0,%1\n"
: : "r" (next), "r" (pteh), "r" (ptel) );
next += TLB_STEP;
if (next > tlbp->last)
next = tlbp->first;
tlbp->next = next;
}
/*
* The SH64 TLB miss.
*
* Original code from fault.c
* Copyright (C) 2000, 2001 Paolo Alberelli
*
* Fast PTE->TLB refill path
* Copyright (C) 2003 Richard.Curnow@superh.com
*
* IMPORTANT NOTES :
* The do_fast_page_fault function is called from a context in entry.S
* where very few registers have been saved. In particular, the code in
* this file must be compiled not to use ANY caller-save registers that
* are not part of the restricted save set. Also, it means that code in
* this file must not make calls to functions elsewhere in the kernel, or
* else the excepting context will see corruption in its caller-save
* registers. Plus, the entry.S save area is non-reentrant, so this code
* has to run with SR.BL==1, i.e. no interrupts taken inside it and panic
* on any exception.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/kprobes.h>
#include <asm/tlb.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
static int handle_tlbmiss(unsigned long long protection_flags,
unsigned long address)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pte_t entry;
if (is_vmalloc_addr((void *)address)) {
pgd = pgd_offset_k(address);
} else {
if (unlikely(address >= TASK_SIZE || !current->mm))
return 1;
pgd = pgd_offset(current->mm, address);
}
pud = pud_offset(pgd, address);
if (pud_none(*pud) || !pud_present(*pud))
return 1;
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd) || !pmd_present(*pmd))
return 1;
pte = pte_offset_kernel(pmd, address);
entry = *pte;
if (pte_none(entry) || !pte_present(entry))
return 1;
/*
* If the page doesn't have sufficient protection bits set to
* service the kind of fault being handled, there's not much
* point doing the TLB refill. Punt the fault to the general
* handler.
*/
if ((pte_val(entry) & protection_flags) != protection_flags)
return 1;
update_mmu_cache(NULL, address, pte);
return 0;
}
/*
* Put all this information into one structure so that everything is just
* arithmetic relative to a single base address. This reduces the number
* of movi/shori pairs needed just to load addresses of static data.
*/
struct expevt_lookup {
unsigned short protection_flags[8];
unsigned char is_text_access[8];
unsigned char is_write_access[8];
};
#define PRU (1<<9)
#define PRW (1<<8)
#define PRX (1<<7)
#define PRR (1<<6)
/* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether
the fault happened in user mode or privileged mode. */
static struct expevt_lookup expevt_lookup_table = {
.protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW},
.is_text_access = {1, 1, 0, 0, 0, 0, 0, 0}
};
static inline unsigned int
expevt_to_fault_code(unsigned long expevt)
{
if (expevt == 0xa40)
return FAULT_CODE_ITLB;
else if (expevt == 0x060)
return FAULT_CODE_WRITE;
return 0;
}
/*
This routine handles page faults that can be serviced just by refilling a
TLB entry from an existing page table entry. (This case represents a very
large majority of page faults.) Return 1 if the fault was successfully
handled. Return 0 if the fault could not be handled. (This leads into the
general fault handling in fault.c which deals with mapping file-backed
pages, stack growth, segmentation faults, swapping etc etc)
*/
asmlinkage int __kprobes
do_fast_page_fault(unsigned long long ssr_md, unsigned long long expevt,
unsigned long address)
{
unsigned long long protection_flags;
unsigned long long index;
unsigned long long expevt4;
unsigned int fault_code;
/* The next few lines implement a way of hashing EXPEVT into a
* small array index which can be used to lookup parameters
* specific to the type of TLBMISS being handled.
*
* Note:
* ITLBMISS has EXPEVT==0xa40
* RTLBMISS has EXPEVT==0x040
* WTLBMISS has EXPEVT==0x060
*/
expevt4 = (expevt >> 4);
/* TODO : xor ssr_md into this expression too. Then we can check
* that PRU is set when it needs to be. */
index = expevt4 ^ (expevt4 >> 5);
index &= 7;
fault_code = expevt_to_fault_code(expevt);
protection_flags = expevt_lookup_table.protection_flags[index];
if (expevt_lookup_table.is_text_access[index])
fault_code |= FAULT_CODE_ITLB;
if (!ssr_md)
fault_code |= FAULT_CODE_USER;
set_thread_fault_code(fault_code);
return handle_tlbmiss(protection_flags, address);
}
/*
* arch/sh/mm/tlb-flush_64.c
*
* Copyright (C) 2000, 2001 Paolo Alberelli
* Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes)
* Copyright (C) 2003 - 2012 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/signal.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/perf_event.h>
#include <linux/interrupt.h>
#include <asm/io.h>
#include <asm/tlb.h>
#include <linux/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/mmu_context.h>
void local_flush_tlb_one(unsigned long asid, unsigned long page)
{
unsigned long long match, pteh=0, lpage;
unsigned long tlb;
/*
* Sign-extend based on neff.
*/
lpage = neff_sign_extend(page);
match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID;
match |= lpage;
for_each_itlb_entry(tlb) {
asm volatile ("getcfg %1, 0, %0"
: "=r" (pteh)
: "r" (tlb) );
if (pteh == match) {
__flush_tlb_slot(tlb);
break;
}
}
for_each_dtlb_entry(tlb) {
asm volatile ("getcfg %1, 0, %0"
: "=r" (pteh)
: "r" (tlb) );
if (pteh == match) {
__flush_tlb_slot(tlb);
break;
}
}
}
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
unsigned long flags;
if (vma->vm_mm) {
page &= PAGE_MASK;
local_irq_save(flags);
local_flush_tlb_one(get_asid(), page);
local_irq_restore(flags);
}
}
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
unsigned long flags;
unsigned long long match, pteh=0, pteh_epn, pteh_low;
unsigned long tlb;
unsigned int cpu = smp_processor_id();
struct mm_struct *mm;
mm = vma->vm_mm;
if (cpu_context(cpu, mm) == NO_CONTEXT)
return;
local_irq_save(flags);
start &= PAGE_MASK;
end &= PAGE_MASK;
match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID;
/* Flush ITLB */
for_each_itlb_entry(tlb) {
asm volatile ("getcfg %1, 0, %0"
: "=r" (pteh)
: "r" (tlb) );
pteh_epn = pteh & PAGE_MASK;
pteh_low = pteh & ~PAGE_MASK;
if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
__flush_tlb_slot(tlb);
}
/* Flush DTLB */
for_each_dtlb_entry(tlb) {
asm volatile ("getcfg %1, 0, %0"
: "=r" (pteh)
: "r" (tlb) );
pteh_epn = pteh & PAGE_MASK;
pteh_low = pteh & ~PAGE_MASK;
if (pteh_low == match && pteh_epn >= start && pteh_epn <= end)
__flush_tlb_slot(tlb);
}
local_irq_restore(flags);
}
void local_flush_tlb_mm(struct mm_struct *mm)
{
unsigned long flags;
unsigned int cpu = smp_processor_id();
if (cpu_context(cpu, mm) == NO_CONTEXT)
return;
local_irq_save(flags);
cpu_context(cpu, mm) = NO_CONTEXT;
if (mm == current->mm)
activate_context(mm, cpu);
local_irq_restore(flags);
}
void local_flush_tlb_all(void)
{
/* Invalidate all, including shared pages, excluding fixed TLBs */
unsigned long flags, tlb;
local_irq_save(flags);
/* Flush each ITLB entry */
for_each_itlb_entry(tlb)
__flush_tlb_slot(tlb);
/* Flush each DTLB entry */
for_each_dtlb_entry(tlb)
__flush_tlb_slot(tlb);
local_irq_restore(flags);
}
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
/* FIXME: Optimize this later.. */
flush_tlb_all();
}
void __flush_tlb_global(void)
{
flush_tlb_all();
}
......@@ -1516,7 +1516,7 @@ config RTC_DRV_GENERIC
tristate "Generic RTC support"
# Please consider writing a new RTC driver instead of using the generic
# RTC abstraction
depends on PARISC || M68K || PPC || SUPERH32 || COMPILE_TEST
depends on PARISC || M68K || PPC || SUPERH || COMPILE_TEST
help
Say Y or M here to enable RTC support on systems using the generic
RTC abstraction. If you do not know what you are doing, you should
......
......@@ -39,7 +39,7 @@ config ARCH_BINFMT_ELF_STATE
config BINFMT_ELF_FDPIC
bool "Kernel support for FDPIC ELF binaries"
default y if !BINFMT_ELF
depends on (ARM || (SUPERH32 && !MMU) || C6X)
depends on (ARM || (SUPERH && !MMU) || C6X)
select ELFCORE
help
ELF FDPIC binaries are based on ELF, but allow the individual load
......
......@@ -81,9 +81,6 @@ arch/ia64/include/uapi/asm/cmpxchg.h:CONFIG_IA64_DEBUG_CMPXCHG
arch/m68k/include/uapi/asm/ptrace.h:CONFIG_COLDFIRE
arch/nios2/include/uapi/asm/swab.h:CONFIG_NIOS2_CI_SWAB_NO
arch/nios2/include/uapi/asm/swab.h:CONFIG_NIOS2_CI_SWAB_SUPPORT
arch/sh/include/uapi/asm/ptrace.h:CONFIG_CPU_SH5
arch/sh/include/uapi/asm/sigcontext.h:CONFIG_CPU_SH5
arch/sh/include/uapi/asm/stat.h:CONFIG_CPU_SH5
arch/x86/include/uapi/asm/auxvec.h:CONFIG_IA32_EMULATION
arch/x86/include/uapi/asm/auxvec.h:CONFIG_X86_64
arch/x86/include/uapi/asm/mman.h:CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
......
......@@ -22,7 +22,7 @@
* Historically we have only done this type of barrier for the MMUCR, but
* it's also necessary for the CCR, so we make it generic here instead.
*/
#if defined(__SH4A__) || defined(__SH5__)
#if defined(__SH4A__)
#define mb() __asm__ __volatile__ ("synco": : :"memory")
#define rmb() mb()
#define wmb() mb()
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment