Commit 2671fe5e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'mips_5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux

Pull MIPS updates from Thomas Bogendoerfer:

 - added support for Nintendo N64

 - added support for Realtek RTL83XX SoCs

 - kaslr support for Loongson64

 - first steps to get rid of set_fs()

 - DMA runtime coherent/non-coherent selection cleanup

 - cleanups and fixes

* tag 'mips_5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux: (98 commits)
  Revert "MIPS: Add basic support for ptrace single step"
  vmlinux.lds.h: catch more UBSAN symbols into .data
  MIPS: kernel: Drop kgdb_call_nmi_hook
  MAINTAINERS: Add git tree for KVM/mips
  MIPS: Use common way to parse elfcorehdr
  MIPS: Simplify EVA cache handling
  Revert "MIPS: kernel: {ftrace,kgdb}: Set correct address limit for cache flushes"
  MIPS: remove CONFIG_DMA_PERDEV_COHERENT
  MIPS: remove CONFIG_DMA_MAYBE_COHERENT
  driver core: lift dma_default_coherent into common code
  MIPS: refactor the runtime coherent vs noncoherent DMA indicators
  MIPS/alchemy: factor out the DMA coherent setup
  MIPS/malta: simplify plat_setup_iocoherency
  MIPS: Add basic support for ptrace single step
  MAINTAINERS: replace non-matching patterns for loongson{2,3}
  MIPS: Make check condition for SDBBP consistent with EJTAG spec
  mips: Replace lkml.org links with lore
  Revert "MIPS: microMIPS: Fix the judgment of mm_jr16_op and mm_jalr_op"
  MIPS: crash_dump.c: Simplify copy_oldmem_page()
  Revert "mips: Manually call fdt_init_reserved_mem() method"
  ...
parents b811b410 b0c2793b
......@@ -202,6 +202,7 @@ Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
Lukasz Luba <lukasz.luba@arm.com> <l.luba@partner.samsung.com>
Maciej W. Rozycki <macro@mips.com> <macro@imgtec.com>
Maciej W. Rozycki <macro@orcam.me.uk> <macro@linux-mips.org>
Manivannan Sadhasivam <mani@kernel.org> <manivannanece23@gmail.com>
Manivannan Sadhasivam <mani@kernel.org> <manivannan.sadhasivam@linaro.org>
Marcin Nowakowski <marcin.nowakowski@mips.com> <marcin.nowakowski@imgtec.com>
......
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/mips/lantiq/lantiq,cgu.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Lantiq Xway SoC series Clock Generation Unit (CGU)
maintainers:
- John Crispin <john@phrozen.org>
properties:
compatible:
items:
- enum:
- lantiq,cgu-xway
reg:
maxItems: 1
required:
- compatible
- reg
additionalProperties: false
examples:
- |
cgu@103000 {
compatible = "lantiq,cgu-xway";
reg = <0x103000 0x1000>;
};
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/mips/lantiq/lantiq,dma-xway.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Lantiq Xway SoCs DMA Controller DT bindings
maintainers:
- John Crispin <john@phrozen.org>
properties:
compatible:
items:
- enum:
- lantiq,dma-xway
reg:
maxItems: 1
required:
- compatible
- reg
additionalProperties: false
examples:
- |
dma@e104100 {
compatible = "lantiq,dma-xway";
reg = <0xe104100 0x800>;
};
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/mips/lantiq/lantiq,ebu.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Lantiq Xway SoC series External Bus Unit (EBU)
maintainers:
- John Crispin <john@phrozen.org>
properties:
compatible:
items:
- enum:
- lantiq,ebu-xway
reg:
maxItems: 1
required:
- compatible
- reg
additionalProperties: false
examples:
- |
ebu@105300 {
compatible = "lantiq,ebu-xway";
reg = <0x105300 0x100>;
};
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/mips/lantiq/lantiq,pmu.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Lantiq Xway SoC series Power Management Unit (PMU)
maintainers:
- John Crispin <john@phrozen.org>
properties:
compatible:
items:
- enum:
- lantiq,pmu-xway
reg:
maxItems: 1
required:
- compatible
- reg
additionalProperties: false
examples:
- |
pmu@102000 {
compatible = "lantiq,pmu-xway";
reg = <0x102000 0x1000>;
};
# SPDX-License-Identifier: GPL-2.0-or-later OR BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/mips/realtek-rtl.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Realtek RTL83xx/93xx SoC series device tree bindings
maintainers:
- Bert Vermeulen <bert@biot.com>
- Sander Vanheule <sander@svanheule.net>
properties:
$nodename:
const: "/"
compatible:
oneOf:
# RTL8382-based boards
- items:
- enum:
- cisco,sg220-26
- const: realtek,rtl8382-soc
additionalProperties: true
......@@ -231,6 +231,8 @@ patternProperties:
description: Computadora Industrial Abierta Argentina
"^cirrus,.*":
description: Cirrus Logic, Inc.
"^cisco,.*":
description: Cisco Systems, Inc.
"^cloudengines,.*":
description: Cloud Engines, Inc.
"^cnm,.*":
......
......@@ -4914,7 +4914,7 @@ F: Documentation/networking/decnet.rst
F: net/decnet/
DECSTATION PLATFORM SUPPORT
M: "Maciej W. Rozycki" <macro@linux-mips.org>
M: "Maciej W. Rozycki" <macro@orcam.me.uk>
L: linux-mips@vger.kernel.org
S: Maintained
W: http://www.linux-mips.org/wiki/DECstation
......@@ -4923,12 +4923,12 @@ F: arch/mips/include/asm/dec/
F: arch/mips/include/asm/mach-dec/
DEFXX FDDI NETWORK DRIVER
M: "Maciej W. Rozycki" <macro@linux-mips.org>
M: "Maciej W. Rozycki" <macro@orcam.me.uk>
S: Maintained
F: drivers/net/fddi/defxx.*
DEFZA FDDI NETWORK DRIVER
M: "Maciej W. Rozycki" <macro@linux-mips.org>
M: "Maciej W. Rozycki" <macro@orcam.me.uk>
S: Maintained
F: drivers/net/fddi/defza.*
......@@ -6191,7 +6191,7 @@ F: include/linux/dim.h
F: lib/dim/
DZ DECSTATION DZ11 SERIAL DRIVER
M: "Maciej W. Rozycki" <macro@linux-mips.org>
M: "Maciej W. Rozycki" <macro@orcam.me.uk>
S: Maintained
F: drivers/tty/serial/dz.*
......@@ -9709,6 +9709,7 @@ M: Aleksandar Markovic <aleksandar.qemu.devel@gmail.com>
L: linux-mips@vger.kernel.org
L: kvm@vger.kernel.org
S: Maintained
T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git
F: arch/mips/include/asm/kvm*
F: arch/mips/include/uapi/asm/kvm*
F: arch/mips/kvm/
......@@ -11878,8 +11879,7 @@ L: linux-mips@vger.kernel.org
S: Maintained
F: arch/mips/include/asm/mach-loongson2ef/
F: arch/mips/loongson2ef/
F: drivers/*/*/*loongson2*
F: drivers/*/*loongson2*
F: drivers/cpufreq/loongson2_cpufreq.c
MIPS/LOONGSON64 ARCHITECTURE
M: Huacai Chen <chenhuacai@kernel.org>
......@@ -11888,8 +11888,6 @@ L: linux-mips@vger.kernel.org
S: Maintained
F: arch/mips/include/asm/mach-loongson64/
F: arch/mips/loongson64/
F: drivers/*/*/*loongson3*
F: drivers/*/*loongson3*
F: drivers/irqchip/irq-loongson*
F: drivers/platform/mips/cpu_hwmon.c
......@@ -14064,7 +14062,6 @@ L: linux-mips@vger.kernel.org
S: Odd Fixes
F: arch/mips/boot/dts/img/pistachio*
F: arch/mips/configs/pistachio*_defconfig
F: arch/mips/include/asm/mach-pistachio/
F: arch/mips/pistachio/
PKTCDVD DRIVER
......@@ -18082,7 +18079,7 @@ F: Documentation/networking/tuntap.rst
F: arch/um/os-Linux/drivers/
TURBOCHANNEL SUBSYSTEM
M: "Maciej W. Rozycki" <macro@linux-mips.org>
M: "Maciej W. Rozycki" <macro@orcam.me.uk>
M: Ralf Baechle <ralf@linux-mips.org>
L: linux-mips@vger.kernel.org
S: Maintained
......@@ -19715,7 +19712,7 @@ F: Documentation/admin-guide/blockdev/zram.rst
F: drivers/block/zram/
ZS DECSTATION Z85C30 SERIAL DRIVER
M: "Maciej W. Rozycki" <macro@linux-mips.org>
M: "Maciej W. Rozycki" <macro@orcam.me.uk>
S: Maintained
F: drivers/tty/serial/zs.*
......
......@@ -18,6 +18,7 @@ platform-$(CONFIG_MACH_LOONGSON2EF) += loongson2ef/
platform-$(CONFIG_MACH_LOONGSON32) += loongson32/
platform-$(CONFIG_MACH_LOONGSON64) += loongson64/
platform-$(CONFIG_MIPS_MALTA) += mti-malta/
platform-$(CONFIG_MACH_NINTENDO64) += n64/
platform-$(CONFIG_NLM_COMMON) += netlogic/
platform-$(CONFIG_PIC32MZDA) += pic32/
platform-$(CONFIG_MACH_PISTACHIO) += pistachio/
......
......@@ -18,6 +18,7 @@ config MIPS
select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_WANT_LD_ORPHAN_WARN
select BUILDTIME_TABLE_SORT
select CLONE_BACKWARDS
select CPU_NO_EFFICIENT_FFS if (TARGET_ISA_REV < 1)
......@@ -42,7 +43,7 @@ config MIPS
select HANDLE_DOMAIN_IRQ
select HAVE_ARCH_COMPILER_H
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KGDB
select HAVE_ARCH_KGDB if MIPS_FP_SUPPORT
select HAVE_ARCH_MMAP_RND_BITS if MMU
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT
select HAVE_ARCH_SECCOMP_FILTER
......@@ -75,6 +76,8 @@ config MIPS
select HAVE_MOD_ARCH_SPECIFIC
select HAVE_NMI
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ
select HAVE_SPARSE_SYSCALL_NR
......@@ -123,6 +126,7 @@ choice
config MIPS_GENERIC_KERNEL
bool "Generic board-agnostic MIPS kernel"
select ARCH_HAS_SETUP_DMA_OPS
select MIPS_GENERIC
select BOOT_RAW
select BUILTIN_DTB
......@@ -132,7 +136,7 @@ config MIPS_GENERIC_KERNEL
select CPU_MIPSR2_IRQ_EI
select CPU_MIPSR2_IRQ_VI
select CSRC_R4K
select DMA_PERDEV_COHERENT
select DMA_NONCOHERENT
select HAVE_PCI
select IRQ_MIPS_CPU
select MIPS_AUTO_PFN_OFFSET
......@@ -181,7 +185,7 @@ config MIPS_ALCHEMY
select CEVT_R4K
select CSRC_R4K
select IRQ_MIPS_CPU
select DMA_MAYBE_COHERENT # Au1000,1500,1100 aren't, rest is
select DMA_NONCOHERENT # Au1000,1500,1100 aren't, rest is
select MIPS_FIXUP_BIGPHYS_ADDR if PCI
select SYS_HAS_CPU_MIPS32_R1
select SYS_SUPPORTS_32BIT_KERNEL
......@@ -408,6 +412,7 @@ config MACH_JAZZ
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_100HZ
select SYS_SUPPORTS_LITTLE_ENDIAN
help
This a family of machines based on the MIPS R4030 chipset which was
used by several vendors to build RISC/os and Windows NT workstations.
......@@ -491,8 +496,6 @@ config MACH_LOONGSON64
select SYS_SUPPORTS_ZBOOT
select SYS_SUPPORTS_RELOCATABLE
select ZONE_DMA32
select NUMA
select SMP
select COMMON_CLK
select USE_OF
select BUILTIN_DTB
......@@ -546,7 +549,7 @@ config MIPS_MALTA
select CLKSRC_MIPS_GIC
select COMMON_CLK
select CSRC_R4K
select DMA_MAYBE_COHERENT
select DMA_NONCOHERENT
select GENERIC_ISA_DMA
select HAVE_PCSPKR_PLATFORM
select HAVE_PCI
......@@ -608,6 +611,18 @@ config MACH_VR41XX
select SYS_SUPPORTS_MIPS16
select GPIOLIB
config MACH_NINTENDO64
bool "Nintendo 64 console"
select CEVT_R4K
select CSRC_R4K
select SYS_HAS_CPU_R4300
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_ZBOOT
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL
select DMA_NONCOHERENT
select IRQ_MIPS_CPU
config RALINK
bool "Ralink based machines"
select CEVT_R4K
......@@ -627,6 +642,27 @@ config RALINK
select ARCH_HAS_RESET_CONTROLLER
select RESET_CONTROLLER
config MACH_REALTEK_RTL
bool "Realtek RTL838x/RTL839x based machines"
select MIPS_GENERIC
select DMA_NONCOHERENT
select IRQ_MIPS_CPU
select CSRC_R4K
select CEVT_R4K
select SYS_HAS_CPU_MIPS32_R1
select SYS_HAS_CPU_MIPS32_R2
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_MIPS16
select SYS_SUPPORTS_MULTITHREADING
select SYS_SUPPORTS_VPE_LOADER
select SYS_HAS_EARLY_PRINTK
select SYS_HAS_EARLY_PRINTK_8250
select USE_GENERIC_EARLY_PRINTK_8250
select BOOT_RAW
select PINCTRL
select USE_OF
config SGI_IP22
bool "SGI IP22 (Indy/Indigo2)"
select ARC_MEMORY
......@@ -1127,11 +1163,6 @@ config FW_CFE
config ARCH_SUPPORTS_UPROBES
bool
config DMA_MAYBE_COHERENT
select ARCH_HAS_DMA_COHERENCE_H
select DMA_NONCOHERENT
bool
config DMA_PERDEV_COHERENT
bool
select ARCH_HAS_SETUP_DMA_OPS
......@@ -1258,9 +1289,6 @@ config SYS_SUPPORTS_HUGETLBFS
config MIPS_HUGE_TLB_SUPPORT
def_bool HUGETLB_PAGE || TRANSPARENT_HUGEPAGE
config IRQ_CPU_RM7K
bool
config IRQ_MSP_SLP
bool
......@@ -1664,6 +1692,15 @@ config CPU_VR41XX
kernel built with this option will not run on any other type of
processor or vice versa.
config CPU_R4300
bool "R4300"
depends on SYS_HAS_CPU_R4300
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_HAS_LOAD_STORE_LR
help
MIPS Technologies R4300-series processors.
config CPU_R4X00
bool "R4x00"
depends on SYS_HAS_CPU_R4X00
......@@ -1998,6 +2035,9 @@ config SYS_HAS_CPU_TX39XX
config SYS_HAS_CPU_VR41XX
bool
config SYS_HAS_CPU_R4300
bool
config SYS_HAS_CPU_R4X00
bool
......@@ -2758,6 +2798,7 @@ config ARCH_SPARSEMEM_ENABLE
config NUMA
bool "NUMA Support"
depends on SYS_SUPPORTS_NUMA
select SMP
help
Say Y to compile the kernel to support NUMA (Non-Uniform Memory
Access). This option improves performance on systems with more
......
......@@ -136,11 +136,31 @@ cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \
#
cflags-y += -fno-stack-check
# binutils from v2.35 when built with --enable-mips-fix-loongson3-llsc=yes,
# supports an -mfix-loongson3-llsc flag which emits a sync prior to each ll
# instruction to work around a CPU bug (see __SYNC_loongson3_war in asm/sync.h
# for a description).
#
# We disable this in order to prevent the assembler meddling with the
# instruction that labels refer to, ie. if we label an ll instruction:
#
# 1: ll v0, 0(a0)
#
# ...then with the assembler fix applied the label may actually point at a sync
# instruction inserted by the assembler, and if we were using the label in an
# exception table the table would no longer contain the address of the ll
# instruction.
#
# Avoid this by explicitly disabling that assembler behaviour.
#
cflags-y += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
#
# CPU-dependent compiler/assembler options for optimization.
#
cflags-$(CONFIG_CPU_R3000) += -march=r3000
cflags-$(CONFIG_CPU_TX39XX) += -march=r3900
cflags-$(CONFIG_CPU_R4300) += -march=r4300 -Wa,--trap
cflags-$(CONFIG_CPU_VR41XX) += -march=r4100 -Wa,--trap
cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap
cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap
......
......@@ -143,7 +143,3 @@ int __init prom_get_ethernet_addr(char *ethernet_addr)
return 0;
}
void __init prom_free_prom_memory(void)
{
}
......@@ -28,8 +28,8 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/mm.h>
#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include <asm/dma-coherence.h>
#include <asm/mipsregs.h>
#include <au1000.h>
......@@ -37,6 +37,23 @@
extern void __init board_setup(void);
extern void __init alchemy_set_lpj(void);
static bool alchemy_dma_coherent(void)
{
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1000:
case ALCHEMY_CPU_AU1500:
case ALCHEMY_CPU_AU1100:
return false;
case ALCHEMY_CPU_AU1200:
/* Au1200 AB USB does not support coherent memory */
if ((read_c0_prid() & PRID_REV_MASK) == 0)
return false;
return true;
default:
return true;
}
}
void __init plat_mem_setup(void)
{
alchemy_set_lpj();
......@@ -48,20 +65,7 @@ void __init plat_mem_setup(void)
/* Clear to obtain best system bus performance */
clear_c0_config(1 << 19); /* Clear Config[OD] */
hw_coherentio = 0;
coherentio = IO_COHERENCE_ENABLED;
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1000:
case ALCHEMY_CPU_AU1500:
case ALCHEMY_CPU_AU1100:
coherentio = IO_COHERENCE_DISABLED;
break;
case ALCHEMY_CPU_AU1200:
/* Au1200 AB USB does not support coherent memory */
if (0 == (read_c0_prid() & PRID_REV_MASK))
coherentio = IO_COHERENCE_DISABLED;
break;
}
dma_default_coherent = alchemy_dma_coherent();
board_setup(); /* board specific setup */
......
......@@ -49,8 +49,3 @@ void __init prom_meminit(void)
pages = memsize() >> PAGE_SHIFT;
memblock_add(PHYS_OFFSET, pages << PAGE_SHIFT);
}
void __init prom_free_prom_memory(void)
{
/* Nothing to free */
}
......@@ -20,7 +20,3 @@
void __init prom_init(void)
{
}
void __init prom_free_prom_memory(void)
{
}
......@@ -32,8 +32,3 @@ void __init prom_init(void)
}
#endif
}
void __init prom_free_prom_memory(void)
{
/* We do not have to prom memory to free */
}
......@@ -213,16 +213,17 @@ unsigned int get_c0_compare_int(void)
void __init plat_mem_setup(void)
{
unsigned long fdt_start;
void *dtb;
set_io_port_base(KSEG1);
/* Get the position of the FDT passed by the bootloader */
fdt_start = fw_getenvl("fdt_start");
if (fdt_start)
__dt_setup_arch((void *)KSEG0ADDR(fdt_start));
else if (fw_passed_dtb)
__dt_setup_arch((void *)KSEG0ADDR(fw_passed_dtb));
dtb = (void *)fw_getenvl("fdt_start");
if (dtb == NULL)
dtb = get_fdt();
if (dtb)
__dt_setup_arch((void *)KSEG0ADDR(dtb));
ath79_reset_base = ioremap(AR71XX_RESET_BASE,
AR71XX_RESET_SIZE);
......
......@@ -113,10 +113,6 @@ void __init prom_init(void)
setup_8250_early_printk_port(CKSEG1ADDR(BCM47XX_SERIAL_ADDR), 0, 0);
}
void __init prom_free_prom_memory(void)
{
}
#if defined(CONFIG_BCM47XX_BCMA) && defined(CONFIG_HIGHMEM)
#define EXTVBASE 0xc0000000
......
......@@ -94,7 +94,3 @@ void __init prom_init(void)
*/
}
}
void __init prom_free_prom_memory(void)
{
}
......@@ -129,10 +129,6 @@ void __init prom_init(void)
register_bmips_smp_ops();
}
void __init prom_free_prom_memory(void)
{
}
const char *get_system_type(void)
{
return "Generic BMIPS kernel";
......@@ -165,11 +161,10 @@ void __init plat_mem_setup(void)
/* intended to somewhat resemble ARM; see Documentation/arm/booting.rst */
if (fw_arg0 == 0 && fw_arg1 == 0xffffffff)
dtb = phys_to_virt(fw_arg2);
else if (fw_passed_dtb) /* UHI interface or appended dtb */
dtb = (void *)fw_passed_dtb;
else if (__dtb_start != __dtb_end)
dtb = (void *)__dtb_start;
else
dtb = get_fdt();
if (!dtb)
panic("no dtb found");
__dt_setup_arch(dtb);
......
......@@ -37,6 +37,7 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
KCOV_INSTRUMENT := n
GCOV_PROFILE := n
UBSAN_SANITIZE := n
# decompressor objects (linked with vmlinuz)
vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o
......
......@@ -15,10 +15,7 @@
#include <asm/asm.h>
#include <asm/regdef.h>
.set noreorder
.cprestore
LEAF(start)
start:
/* Save boot rom start args */
move s0, a0
move s1, a1
......@@ -29,27 +26,26 @@ start:
PTR_LA a0, _edata
PTR_LA a2, _end
1: sw zero, 0(a0)
addiu a0, a0, 4
bne a2, a0, 1b
addiu a0, a0, 4
PTR_LA a0, (.heap) /* heap address */
PTR_LA sp, (.stack + 8192) /* stack address */
PTR_LA ra, 2f
PTR_LA k0, decompress_kernel
jr k0
nop
PTR_LA t9, decompress_kernel
jalr t9
2:
move a0, s0
move a1, s1
move a2, s2
move a3, s3
PTR_LI k0, KERNEL_ENTRY
jr k0
nop
PTR_LI t9, KERNEL_ENTRY
jalr t9
3:
b 3b
nop
END(start)
.comm .heap,BOOT_HEAP_SIZE,4
......
......@@ -14,6 +14,7 @@ subdir-$(CONFIG_FIT_IMAGE_FDT_NI169445) += ni
subdir-$(CONFIG_MACH_PIC32) += pic32
subdir-$(CONFIG_ATH79) += qca
subdir-$(CONFIG_RALINK) += ralink
subdir-$(CONFIG_MACH_REALTEK_RTL) += realtek
subdir-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += xilfpga
obj-$(CONFIG_BUILTIN_DTB) := $(addsuffix /, $(subdir-y))
# SPDX-License-Identifier: GPL-2.0
dtb-y += cisco_sg220-26.dtb
// SPDX-License-Identifier: GPL-2.0-or-later OR BSD-2-Clause
/dts-v1/;
#include "rtl83xx.dtsi"
#include "rtl838x.dtsi"
/ {
model = "Cisco SG220-26";
compatible = "cisco,sg220-26", "realtek,rtl8382-soc";
chosen {
stdout-path = "serial0:9600n8";
bootargs = "earlycon console=ttyS0,9600";
};
memory@0 {
device_type = "memory";
reg = <0x0 0x8000000>;
};
};
&uart0 {
status = "okay";
};
// SPDX-License-Identifier: GPL-2.0-or-later OR BSD-2-Clause
/ {
cpus {
#address-cells = <1>;
#size-cells = <0>;
cpu@0 {
compatible = "mips,mips4KEc";
reg = <0>;
clocks = <&baseclk 0>;
clock-names = "cpu";
};
};
baseclk: baseclk {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <500000000>;
};
};
// SPDX-License-Identifier: GPL-2.0-or-later OR BSD-2-Clause
/ {
#address-cells = <1>;
#size-cells = <1>;
aliases {
serial0 = &uart0;
serial1 = &uart1;
};
cpuintc: cpuintc {
compatible = "mti,cpu-interrupt-controller";
#address-cells = <0>;
#interrupt-cells = <1>;
interrupt-controller;
};
soc: soc {
compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges = <0x0 0x18000000 0x10000>;
uart0: uart@2000 {
compatible = "ns16550a";
reg = <0x2000 0x100>;
clock-frequency = <200000000>;
interrupt-parent = <&cpuintc>;
interrupts = <31>;
reg-io-width = <1>;
reg-shift = <2>;
fifo-size = <1>;
no-loopback-test;
status = "disabled";
};
uart1: uart@2100 {
compatible = "ns16550a";
reg = <0x2100 0x100>;
clock-frequency = <200000000>;
interrupt-parent = <&cpuintc>;
interrupts = <30>;
reg-io-width = <1>;
reg-shift = <2>;
fifo-size = <1>;
no-loopback-test;
status = "disabled";
};
};
};
......@@ -1149,12 +1149,15 @@ void __init device_tree_init(void)
bool do_prune;
bool fill_mac;
if (fw_passed_dtb) {
fdt = (void *)fw_passed_dtb;
#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
if (!fdt_check_header(&__appended_dtb)) {
fdt = &__appended_dtb;
do_prune = false;
fill_mac = true;
pr_info("Using appended Device Tree.\n");
} else if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) {
} else
#endif
if (octeon_bootinfo->minor_version >= 3 && octeon_bootinfo->fdt_addr) {
fdt = phys_to_virt(octeon_bootinfo->fdt_addr);
if (fdt_check_header(fdt))
panic("Corrupt Device Tree passed to kernel.");
......
......@@ -117,8 +117,3 @@ void __init prom_init(void)
setup_8250_early_printk_port(CKSEG1ADDR(0x1c800000), 0, 0);
}
void __init prom_free_prom_memory(void)
{
/* Nothing to do! */
}
......@@ -31,6 +31,8 @@ CONFIG_PERF_EVENTS=y
CONFIG_MACH_LOONGSON64=y
CONFIG_CPU_HAS_MSA=y
CONFIG_NR_CPUS=16
CONFIG_NUMA=y
CONFIG_SMP=y
CONFIG_HZ_256=y
CONFIG_KEXEC=y
CONFIG_MIPS32_O32=y
......
......@@ -173,7 +173,7 @@ void __weak __init prom_cleanup(void)
{
}
void __weak __init prom_free_prom_memory(void)
void __init prom_free_prom_memory(void)
{
int i;
......
......@@ -87,10 +87,6 @@ void *prom_get_hwconf(void)
return (void *)CKSEG1ADDR(hwconf);
}
void __init prom_free_prom_memory(void)
{
}
/*
* /proc/cpuinfo system type
*
......
......@@ -39,14 +39,13 @@ void __init *plat_get_fdt(void)
/* Already set up */
return (void *)fdt;
if (fw_passed_dtb && !fdt_check_header((void *)fw_passed_dtb)) {
fdt = (void *)get_fdt();
if (fdt && !fdt_check_header(fdt)) {
/*
* We have been provided with the appropriate device tree for
* the board. Make use of it & search for any machine struct
* based upon the root compatible string.
*/
fdt = (void *)fw_passed_dtb;
for_each_mips_machine(check_mach) {
match = mips_machine_is_compatible(check_mach, fdt);
if (match) {
......@@ -202,7 +201,3 @@ void __init arch_init_irq(void)
irqchip_init();
}
void __init prom_free_prom_memory(void)
{
}
......@@ -4,6 +4,10 @@ generated-y += syscall_table_32_o32.h
generated-y += syscall_table_64_n32.h
generated-y += syscall_table_64_n64.h
generated-y += syscall_table_64_o32.h
generated-y += unistd_nr_n32.h
generated-y += unistd_nr_n64.h
generated-y += unistd_nr_o32.h
generic-y += export.h
generic-y += kvm_para.h
generic-y += mcs_spinlock.h
......
......@@ -20,10 +20,27 @@
#include <asm/sgidefs.h>
#include <asm/asm-eva.h>
#ifndef __VDSO__
/*
* Emit CFI data in .debug_frame sections, not .eh_frame sections.
* We don't do DWARF unwinding at runtime, so only the offline DWARF
* information is useful to anyone. Note we should change this if we
* ever decide to enable DWARF unwinding at runtime.
*/
#define CFI_SECTIONS .cfi_sections .debug_frame
#else
/*
* For the vDSO, emit both runtime unwind information and debug
* symbols for the .dbg file.
*/
#define CFI_SECTIONS
#endif
/*
* LEAF - declare leaf routine
*/
#define LEAF(symbol) \
CFI_SECTIONS; \
.globl symbol; \
.align 2; \
.type symbol, @function; \
......@@ -36,6 +53,7 @@ symbol: .frame sp, 0, ra; \
* NESTED - declare nested routine entry point
*/
#define NESTED(symbol, framesize, rpc) \
CFI_SECTIONS; \
.globl symbol; \
.align 2; \
.type symbol, @function; \
......
......@@ -248,7 +248,7 @@ static __inline__ int pfx##_sub_if_positive(type i, pfx##_t * v) \
* bltz that can branch to code outside of the LL/SC loop. As \
* such, we don't need to emit another barrier here. \
*/ \
if (!__SYNC_loongson3_war) \
if (__SYNC_loongson3_war == 0) \
smp_mb__after_atomic(); \
\
return result; \
......
......@@ -26,7 +26,7 @@
#include <asm/war.h>
#define __bit_op(mem, insn, inputs...) do { \
unsigned long temp; \
unsigned long __temp; \
\
asm volatile( \
" .set push \n" \
......@@ -37,13 +37,13 @@
" " __SC "%0, %1 \n" \
" " __SC_BEQZ "%0, 1b \n" \
" .set pop \n" \
: "=&r"(temp), "+" GCC_OFF_SMALL_ASM()(mem) \
: "=&r"(__temp), "+" GCC_OFF_SMALL_ASM()(mem) \
: inputs \
: __LLSC_CLOBBER); \
} while (0)
#define __test_bit_op(mem, ll_dst, insn, inputs...) ({ \
unsigned long orig, temp; \
unsigned long __orig, __temp; \
\
asm volatile( \
" .set push \n" \
......@@ -54,12 +54,12 @@
" " __SC "%1, %2 \n" \
" " __SC_BEQZ "%1, 1b \n" \
" .set pop \n" \
: "=&r"(orig), "=&r"(temp), \
: "=&r"(__orig), "=&r"(__temp), \
"+" GCC_OFF_SMALL_ASM()(mem) \
: inputs \
: __LLSC_CLOBBER); \
\
orig; \
__orig; \
})
/*
......@@ -435,7 +435,7 @@ static inline int fls(unsigned int x)
*
* This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
* differs in spirit from the below ffz (man ffs).
*/
static inline int ffs(int word)
{
......
......@@ -112,7 +112,27 @@ extern char arcs_cmdline[COMMAND_LINE_SIZE];
extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
#ifdef CONFIG_USE_OF
extern unsigned long fw_passed_dtb;
#include <linux/libfdt.h>
#include <linux/of_fdt.h>
extern char __appended_dtb[];
static inline void *get_fdt(void)
{
if (IS_ENABLED(CONFIG_MIPS_RAW_APPENDED_DTB) ||
IS_ENABLED(CONFIG_MIPS_ELF_APPENDED_DTB))
if (fdt_magic(&__appended_dtb) == FDT_MAGIC)
return &__appended_dtb;
if (fw_arg0 == -2) /* UHI interface */
return (void *)fw_arg1;
if (IS_ENABLED(CONFIG_BUILTIN_DTB))
if (&__dtb_start != &__dtb_end)
return &__dtb_start;
return NULL;
}
#endif
/*
......
......@@ -130,6 +130,8 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
__u32 len, __u8 proto,
__wsum sum)
{
unsigned long tmp = (__force unsigned long)sum;
__asm__(
" .set push # csum_tcpudp_nofold\n"
" .set noat \n"
......@@ -157,7 +159,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
" addu %0, $1 \n"
#endif
" .set pop"
: "=r" (sum)
: "=r" (tmp)
: "0" ((__force unsigned long)daddr),
"r" ((__force unsigned long)saddr),
#ifdef __MIPSEL__
......@@ -167,7 +169,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
#endif
"r" ((__force unsigned long)sum));
return sum;
return (__force __wsum)tmp;
}
#define csum_tcpudp_nofold csum_tcpudp_nofold
......
......@@ -99,7 +99,7 @@ unsigned long __xchg(volatile void *ptr, unsigned long x, int size)
* contains a completion barrier prior to the LL, so we don't \
* need to emit an extra one here. \
*/ \
if (!__SYNC_loongson3_war) \
if (__SYNC_loongson3_war == 0) \
smp_mb__before_llsc(); \
\
__res = (__typeof__(*(ptr))) \
......@@ -191,7 +191,7 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
* contains a completion barrier prior to the LL, so we don't \
* need to emit an extra one here. \
*/ \
if (!__SYNC_loongson3_war) \
if (__SYNC_loongson3_war == 0) \
smp_mb__before_llsc(); \
\
__res = cmpxchg_local((ptr), (old), (new)); \
......@@ -201,7 +201,7 @@ unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
* contains a completion barrier after the SC, so we don't \
* need to emit an extra one here. \
*/ \
if (!__SYNC_loongson3_war) \
if (__SYNC_loongson3_war == 0) \
smp_llsc_mb(); \
\
__res; \
......
......@@ -122,6 +122,11 @@ static inline int __pure __get_cpu_type(const int cpu_type)
case CPU_VR4181A:
#endif
#ifdef CONFIG_SYS_HAS_CPU_R4300
case CPU_R4300:
case CPU_R4310:
#endif
#ifdef CONFIG_SYS_HAS_CPU_R4X00
case CPU_R4000PC:
case CPU_R4000SC:
......
......@@ -302,7 +302,7 @@ enum cpu_type_enum {
/*
* R4000 class processors
*/
CPU_R4000PC, CPU_R4000SC, CPU_R4000MC, CPU_R4200,
CPU_R4000PC, CPU_R4000SC, CPU_R4000MC, CPU_R4200, CPU_R4300, CPU_R4310,
CPU_R4400PC, CPU_R4400SC, CPU_R4400MC, CPU_R4600, CPU_R4640, CPU_R4650,
CPU_R4700, CPU_R5000, CPU_R5500, CPU_NEVADA, CPU_R10000,
CPU_R12000, CPU_R14000, CPU_R16000, CPU_VR41XX, CPU_VR4111, CPU_VR4121,
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
*
*/
#ifndef __ASM_DMA_COHERENCE_H
#define __ASM_DMA_COHERENCE_H
enum coherent_io_user_state {
IO_COHERENCE_DEFAULT,
IO_COHERENCE_ENABLED,
IO_COHERENCE_DISABLED,
};
#if defined(CONFIG_DMA_PERDEV_COHERENT)
/* Don't provide (hw_)coherentio to avoid misuse */
#elif defined(CONFIG_DMA_MAYBE_COHERENT)
extern enum coherent_io_user_state coherentio;
extern int hw_coherentio;
static inline bool dev_is_dma_coherent(struct device *dev)
{
return coherentio == IO_COHERENCE_ENABLED ||
(coherentio == IO_COHERENCE_DEFAULT && hw_coherentio);
}
#else
#ifdef CONFIG_DMA_NONCOHERENT
#define coherentio IO_COHERENCE_DISABLED
#else
#define coherentio IO_COHERENCE_ENABLED
#endif
#define hw_coherentio 0
#endif /* CONFIG_DMA_MAYBE_COHERENT */
#endif
......@@ -65,11 +65,11 @@
#define I_FR_SFT 21
#define MIPSInst_FR(x) ((MIPSInst(x) & 0x03e00000) >> I_FR_SFT)
#define I_FMA_FUNC_SFT 2
#define MIPSInst_FMA_FUNC(x) ((MIPSInst(x) & 0x0000003c) >> I_FMA_FUNC_SFT)
#define I_FMA_FUNC_SFT 3
#define MIPSInst_FMA_FUNC(x) ((MIPSInst(x) & 0x00000038) >> I_FMA_FUNC_SFT)
#define I_FMA_FFMT_SFT 0
#define MIPSInst_FMA_FFMT(x) (MIPSInst(x) & 0x00000003)
#define MIPSInst_FMA_FFMT(x) (MIPSInst(x) & 0x00000007)
typedef unsigned int mips_instruction;
......
......@@ -20,6 +20,7 @@
#define IRQ_STACK_SIZE THREAD_SIZE
#define IRQ_STACK_START (IRQ_STACK_SIZE - 16)
extern void __init init_IRQ(void);
extern void *irq_stack[NR_CPUS];
/*
......
......@@ -10,8 +10,6 @@
#define _ASM_IRQ_CPU_H
extern void mips_cpu_irq_init(void);
extern void rm7k_cpu_irq_init(void);
extern void rm9k_cpu_irq_init(void);
#ifdef CONFIG_IRQ_DOMAIN
struct device_node;
......
......@@ -28,12 +28,6 @@
#endif /* CONFIG_I8259 */
#endif
#ifdef CONFIG_IRQ_CPU_RM7K
#ifndef RM7K_CPU_IRQ_BASE
#define RM7K_CPU_IRQ_BASE (MIPS_CPU_IRQ_BASE+8)
#endif
#endif
#endif /* CONFIG_IRQ_MIPS_CPU */
#endif /* __ASM_MACH_GENERIC_IRQ_H */
......@@ -23,8 +23,8 @@ extern u32 memsize, highmemsize;
extern const struct plat_smp_ops loongson3_smp_ops;
/* loongson-specific command line, env and memory initialization */
extern void __init prom_init_memory(void);
extern void __init prom_init_env(void);
extern void __init szmem(unsigned int node);
extern void *loongson_fdt_blob;
/* irq operation functions */
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_MACH_N64_IRQ_H
#define __ASM_MACH_N64_IRQ_H
#define NR_IRQS 8
#include <asm/mach-generic/irq.h>
#endif /* __ASM_MACH_N64_IRQ_H */
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_MACH_N64_KMALLOC_H
#define __ASM_MACH_N64_KMALLOC_H
/* The default of 128 bytes wastes too much, use 32 (the largest cacheline, I) */
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#endif /* __ASM_MACH_N64_KMALLOC_H */
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Pistachio IRQ setup
*
* Copyright (C) 2014 Google, Inc.
*/
#ifndef __ASM_MACH_PISTACHIO_IRQ_H
#define __ASM_MACH_PISTACHIO_IRQ_H
#define NR_IRQS 256
#include <asm/mach-generic/irq.h>
#endif /* __ASM_MACH_PISTACHIO_IRQ_H */
......@@ -1085,6 +1085,10 @@
#define CVMVMCONF_RMMUSIZEM1_S 0
#define CVMVMCONF_RMMUSIZEM1 (_U64CAST_(0xff) << CVMVMCONF_RMMUSIZEM1_S)
/* Debug register field definitions */
#define MIPS_DEBUG_DBP_SHIFT 1
#define MIPS_DEBUG_DBP (_ULCAST_(1) << MIPS_DEBUG_DBP_SHIFT)
/*
* Coprocessor 1 (FPU) register names
*/
......
......@@ -282,7 +282,6 @@ union octeon_cvmemctl {
extern void octeon_check_cpu_bist(void);
int octeon_prune_device_tree(void);
extern const char __appended_dtb;
extern const char __dtb_octeon_3xxx_begin;
extern const char __dtb_octeon_68xx_begin;
......
......@@ -202,14 +202,13 @@ static inline unsigned long ___pa(unsigned long x)
/*
* RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
* (lmo) rsp. 8431fd094d625b94d364fe393076ccef88e6ce18 (kernel.org). The
* discussion can be found in lkml posting
* <a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com> which is
* archived at http://lists.linuxcoding.com/kernel/2006-q3/msg17360.html
* discussion can be found in
* https://lore.kernel.org/lkml/a2ebde260608230500o3407b108hc03debb9da6e62c@mail.gmail.com
*
* It is unclear if the misscompilations mentioned in
* http://lkml.org/lkml/2010/8/8/138 also affect MIPS so we keep this one
* until GCC 3.x has been retired before we can apply
* https://patchwork.linux-mips.org/patch/1541/
* https://lore.kernel.org/lkml/1281303490-390-1-git-send-email-namhyung@gmail.com
* also affect MIPS so we keep this one until GCC 3.x has been retired
* before we can apply https://patchwork.linux-mips.org/patch/1541/
*/
#ifndef __pa_symbol
......@@ -255,6 +254,12 @@ extern bool __virt_addr_valid(const volatile void *kaddr);
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
extern unsigned long __kaslr_offset;
static inline unsigned long kaslr_offset(void)
{
return __kaslr_offset;
}
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
......
......@@ -64,6 +64,7 @@ struct vm_area_struct;
#define __S111 __pgprot(0)
extern unsigned long _page_cachable_default;
extern void __update_cache(unsigned long address, pte_t pte);
/*
* ZERO_PAGE is a global shared page that is always zero; used
......@@ -94,31 +95,31 @@ extern void paging_init(void);
#define htw_stop() \
do { \
unsigned long flags; \
unsigned long __flags; \
\
if (cpu_has_htw) { \
local_irq_save(flags); \
local_irq_save(__flags); \
if(!raw_current_cpu_data.htw_seq++) { \
write_c0_pwctl(read_c0_pwctl() & \
~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
back_to_back_c0_hazard(); \
} \
local_irq_restore(flags); \
local_irq_restore(__flags); \
} \
} while(0)
#define htw_start() \
do { \
unsigned long flags; \
unsigned long __flags; \
\
if (cpu_has_htw) { \
local_irq_save(flags); \
local_irq_save(__flags); \
if (!--raw_current_cpu_data.htw_seq) { \
write_c0_pwctl(read_c0_pwctl() | \
(1 << MIPS_PWCTL_PWEN_SHIFT)); \
back_to_back_c0_hazard(); \
} \
local_irq_restore(flags); \
local_irq_restore(__flags); \
} \
} while(0)
......@@ -224,7 +225,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval)
{
extern void __update_cache(unsigned long address, pte_t pte);
if (!pte_present(pteval))
goto cache_sync_done;
......
......@@ -53,7 +53,7 @@ struct pt_regs {
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
{
return regs->regs[31];
return regs->regs[29];
}
static inline void instruction_pointer_set(struct pt_regs *regs,
......
......@@ -23,7 +23,6 @@
#include <asm/mipsmtregs.h>
#include <asm/mmzone.h>
#include <asm/unroll.h>
#include <linux/uaccess.h> /* for uaccess_kernel() */
extern void (*r4k_blast_dcache)(void);
extern void (*r4k_blast_icache)(void);
......@@ -102,14 +101,17 @@ static inline void flush_scache_line(unsigned long addr)
cache_op(Hit_Writeback_Inv_SD, addr);
}
#define protected_cache_op(op,addr) \
#ifdef CONFIG_EVA
#define protected_cache_op(op, addr) \
({ \
int __err = 0; \
__asm__ __volatile__( \
" .set push \n" \
" .set noreorder \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
"1: cache %1, (%2) \n" \
" .set mips0 \n" \
" .set eva \n" \
"1: cachee %1, (%2) \n" \
"2: .insn \n" \
" .set pop \n" \
" .section .fixup,\"ax\" \n" \
......@@ -123,17 +125,16 @@ static inline void flush_scache_line(unsigned long addr)
: "i" (op), "r" (addr), "i" (-EFAULT)); \
__err; \
})
#else
#define protected_cachee_op(op,addr) \
#define protected_cache_op(op, addr) \
({ \
int __err = 0; \
__asm__ __volatile__( \
" .set push \n" \
" .set noreorder \n" \
" .set mips0 \n" \
" .set eva \n" \
"1: cachee %1, (%2) \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
"1: cache %1, (%2) \n" \
"2: .insn \n" \
" .set pop \n" \
" .section .fixup,\"ax\" \n" \
......@@ -147,6 +148,7 @@ static inline void flush_scache_line(unsigned long addr)
: "i" (op), "r" (addr), "i" (-EFAULT)); \
__err; \
})
#endif
/*
* The next two are for badland addresses like signal trampolines.
......@@ -158,11 +160,7 @@ static inline int protected_flush_icache_line(unsigned long addr)
return protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
default:
#ifdef CONFIG_EVA
return protected_cachee_op(Hit_Invalidate_I, addr);
#else
return protected_cache_op(Hit_Invalidate_I, addr);
#endif
}
}
......@@ -174,20 +172,12 @@ static inline int protected_flush_icache_line(unsigned long addr)
*/
static inline int protected_writeback_dcache_line(unsigned long addr)
{
#ifdef CONFIG_EVA
return protected_cachee_op(Hit_Writeback_Inv_D, addr);
#else
return protected_cache_op(Hit_Writeback_Inv_D, addr);
#endif
}
static inline int protected_writeback_scache_line(unsigned long addr)
{
#ifdef CONFIG_EVA
return protected_cachee_op(Hit_Writeback_Inv_SD, addr);
#else
return protected_cache_op(Hit_Writeback_Inv_SD, addr);
#endif
}
/*
......@@ -307,43 +297,8 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start,
} \
}
#ifndef CONFIG_EVA
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
#else
#define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop) \
static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
unsigned long end) \
{ \
unsigned long lsize = cpu_##desc##_line_size(); \
unsigned long addr = start & ~(lsize - 1); \
unsigned long aend = (end - 1) & ~(lsize - 1); \
\
if (!uaccess_kernel()) { \
while (1) { \
protected_cachee_op(hitop, addr); \
if (addr == aend) \
break; \
addr += lsize; \
} \
} else { \
while (1) { \
protected_cache_op(hitop, addr); \
if (addr == aend) \
break; \
addr += lsize; \
} \
\
} \
}
__BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
__BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
#endif
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
protected_, loongson2_)
......
......@@ -5,7 +5,7 @@
#if defined(CONFIG_MIPS_SPRAM)
extern __init void spram_config(void);
#else
static inline void spram_config(void) { };
static inline void spram_config(void) { }
#endif /* CONFIG_MIPS_SPRAM */
#endif /* _MIPS_SPRAM_H */
......@@ -24,6 +24,7 @@ extern void (*board_ebase_setup)(void);
extern void (*board_cache_error_setup)(void);
extern int register_nmi_notifier(struct notifier_block *nb);
extern char except_vec_nmi[];
#define nmi_notifier(fn, pri) \
({ \
......
......@@ -26,6 +26,8 @@
#define MODULE_PROC_FAMILY "TX39XX "
#elif defined CONFIG_CPU_VR41XX
#define MODULE_PROC_FAMILY "VR41XX "
#elif defined CONFIG_CPU_R4300
#define MODULE_PROC_FAMILY "R4300 "
#elif defined CONFIG_CPU_R4X00
#define MODULE_PROC_FAMILY "R4X00 "
#elif defined CONFIG_CPU_TX49XX
......
......@@ -26,7 +26,6 @@
#endif
#define MAX_VPES 16
#define VPE_PATH_MAX 256
static inline int aprp_cpu_index(void)
{
......@@ -62,7 +61,6 @@ struct vpe {
unsigned long len;
char *pbuffer;
unsigned long plen;
char cwd[VPE_PATH_MAX];
unsigned long __start;
......@@ -111,7 +109,6 @@ extern const struct file_operations vpe_fops;
int vpe_notify(int index, struct vpe_notifications *notify);
void *vpe_get_shared(int index);
char *vpe_getcwd(int index);
struct vpe *get_vpe(int minor);
struct tc *get_tc(int index);
......
......@@ -2,8 +2,5 @@
generated-y += unistd_n32.h
generated-y += unistd_n64.h
generated-y += unistd_o32.h
generated-y += unistd_nr_n32.h
generated-y += unistd_nr_n64.h
generated-y += unistd_nr_o32.h
generic-y += kvm_para.h
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
#ifndef _ASM_MIPS_PERF_REGS_H
#define _ASM_MIPS_PERF_REGS_H
enum perf_event_mips_regs {
PERF_REG_MIPS_PC,
PERF_REG_MIPS_R1,
PERF_REG_MIPS_R2,
PERF_REG_MIPS_R3,
PERF_REG_MIPS_R4,
PERF_REG_MIPS_R5,
PERF_REG_MIPS_R6,
PERF_REG_MIPS_R7,
PERF_REG_MIPS_R8,
PERF_REG_MIPS_R9,
PERF_REG_MIPS_R10,
PERF_REG_MIPS_R11,
PERF_REG_MIPS_R12,
PERF_REG_MIPS_R13,
PERF_REG_MIPS_R14,
PERF_REG_MIPS_R15,
PERF_REG_MIPS_R16,
PERF_REG_MIPS_R17,
PERF_REG_MIPS_R18,
PERF_REG_MIPS_R19,
PERF_REG_MIPS_R20,
PERF_REG_MIPS_R21,
PERF_REG_MIPS_R22,
PERF_REG_MIPS_R23,
PERF_REG_MIPS_R24,
PERF_REG_MIPS_R25,
PERF_REG_MIPS_R26,
PERF_REG_MIPS_R27,
PERF_REG_MIPS_R28,
PERF_REG_MIPS_R29,
PERF_REG_MIPS_R30,
PERF_REG_MIPS_R31,
PERF_REG_MIPS_MAX = PERF_REG_MIPS_R31 + 1,
};
#endif /* _ASM_MIPS_PERF_REGS_H */
......@@ -3,7 +3,6 @@ config ACER_PICA_61
bool "Support for Acer PICA 1 chipset"
depends on MACH_JAZZ
select DMA_NONCOHERENT
select SYS_SUPPORTS_LITTLE_ENDIAN
help
This is a machine with a R4400 133/150 MHz CPU. To compile a Linux
kernel that runs on these, say Y here. For details about Linux on
......@@ -15,7 +14,6 @@ config MIPS_MAGNUM_4000
depends on MACH_JAZZ
select DMA_NONCOHERENT
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
help
This is a machine with a R4000 100 MHz CPU. To compile a Linux
kernel that runs on these, say Y here. For details about Linux on
......@@ -26,7 +24,6 @@ config OLIVETTI_M700
bool "Support for Olivetti M700-10"
depends on MACH_JAZZ
select DMA_NONCOHERENT
select SYS_SUPPORTS_LITTLE_ENDIAN
help
This is a machine with a R4000 100 MHz CPU. To compile a Linux
kernel that runs on these, say Y here. For details about Linux on
......
......@@ -71,7 +71,6 @@ obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
obj-$(CONFIG_MIPS_VPE_APSP_API_CMP) += rtlx-cmp.o
obj-$(CONFIG_MIPS_VPE_APSP_API_MT) += rtlx-mt.o
obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
obj-$(CONFIG_MIPS_MSC) += irq-msc01.o
obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o
......@@ -104,7 +103,7 @@ obj-$(CONFIG_MIPSR2_TO_R6_EMULATOR) += mips-r2-to-r6-emul.o
CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
obj-$(CONFIG_PERF_EVENTS) += perf_event.o
obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
......
......@@ -35,6 +35,11 @@ static int __init_cache_level(unsigned int cpu)
leaves += (c->icache.waysize) ? 2 : 1;
if (c->vcache.waysize) {
levels++;
leaves++;
}
if (c->scache.waysize) {
levels++;
leaves++;
......@@ -74,25 +79,36 @@ static int __populate_cache_leaves(unsigned int cpu)
struct cpuinfo_mips *c = &current_cpu_data;
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
int level = 1;
if (c->icache.waysize) {
/* L1 caches are per core */
/* I/D caches are per core */
fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA);
populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA);
fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST);
populate_cache(icache, this_leaf, level, CACHE_TYPE_INST);
level++;
} else {
populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED);
populate_cache(dcache, this_leaf, level, CACHE_TYPE_UNIFIED);
level++;
}
if (c->vcache.waysize) {
/* Vcache is per core as well */
fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
populate_cache(vcache, this_leaf, level, CACHE_TYPE_UNIFIED);
level++;
}
if (c->scache.waysize) {
/* L2 cache is per cluster */
/* Scache is per cluster */
fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map);
populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED);
populate_cache(scache, this_leaf, level, CACHE_TYPE_UNIFIED);
level++;
}
if (c->tcache.waysize)
populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
populate_cache(tcache, this_leaf, level, CACHE_TYPE_UNIFIED);
this_cpu_ci->cpu_map_populated = true;
......
......@@ -193,7 +193,7 @@ void __init txx9_clockevent_init(unsigned long baseaddr, int irq,
cd->min_delta_ns = clockevent_delta2ns(0xf, cd);
cd->min_delta_ticks = 0xf;
cd->irq = irq;
cd->cpumask = cpumask_of(0),
cd->cpumask = cpumask_of(0);
clockevents_register_device(cd);
if (request_irq(irq, txx9tmr_interrupt, IRQF_PERCPU | IRQF_TIMER,
"txx9tmr", &txx9_clock_event_device))
......
......@@ -91,7 +91,6 @@
.set pop
.endm
.section .text.cps-vec
.balign 0x1000
LEAF(mips_cps_core_entry)
......
......@@ -1154,6 +1154,15 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
break;
}
break;
case PRID_IMP_R4300:
c->cputype = CPU_R4300;
__cpu_name[cpu] = "R4300";
set_isa(c, MIPS_CPU_ISA_III);
c->fpu_msk31 |= FPU_CSR_CONDX;
c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
MIPS_CPU_LLSC;
c->tlbsize = 32;
break;
case PRID_IMP_R4600:
c->cputype = CPU_R4600;
__cpu_name[cpu] = "R4600";
......@@ -1830,16 +1839,17 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
*/
case PRID_COMP_INGENIC_D0:
c->isa_level &= ~MIPS_CPU_ISA_M32R2;
break;
fallthrough;
/*
* The config0 register in the XBurst CPUs with a processor ID of
* PRID_COMP_INGENIC_D1 has an abandoned huge page tlb mode, this
* mode is not compatible with the MIPS standard, it will cause
* tlbmiss and into an infinite loop (line 21 in the tlb-funcs.S)
* when starting the init process. After chip reset, the default
* is HPTLB mode, Write 0xa9000000 to cp0 register 5 sel 4 to
* switch back to VTLB mode to prevent getting stuck.
* PRID_COMP_INGENIC_D0 or PRID_COMP_INGENIC_D1 has an abandoned
* huge page tlb mode, this mode is not compatible with the MIPS
* standard, it will cause tlbmiss and into an infinite loop
* (line 21 in the tlb-funcs.S) when starting the init process.
* After chip reset, the default is HPTLB mode, Write 0xa9000000
* to cp0 register 5 sel 4 to switch back to VTLB mode to prevent
* getting stuck.
*/
case PRID_COMP_INGENIC_D1:
write_c0_page_ctrl(XBURST_PAGECTRL_HPTLB_DIS);
......
// SPDX-License-Identifier: GPL-2.0
#include <linux/highmem.h>
#include <linux/memblock.h>
#include <linux/crash_dump.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
static void *kdump_buf_page;
/**
* copy_oldmem_page - copy one page from "oldmem"
......@@ -19,10 +14,6 @@ static void *kdump_buf_page;
*
* Copy a page from "oldmem". For this page, there is no pte mapped
* in the current kernel.
*
* Calling copy_to_user() in atomic context is not desirable. Hence first
* copying the data to a pre-allocated kernel page and then copying to user
* space in non-atomic context.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
......@@ -32,36 +23,16 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
if (!csize)
return 0;
vaddr = kmap_atomic_pfn(pfn);
vaddr = kmap_local_pfn(pfn);
if (!userbuf) {
memcpy(buf, (vaddr + offset), csize);
kunmap_atomic(vaddr);
memcpy(buf, vaddr + offset, csize);
} else {
if (!kdump_buf_page) {
pr_warn("Kdump: Kdump buffer page not allocated\n");
return -EFAULT;
}
copy_page(kdump_buf_page, vaddr);
kunmap_atomic(vaddr);
if (copy_to_user(buf, (kdump_buf_page + offset), csize))
return -EFAULT;
if (copy_to_user(buf, vaddr + offset, csize))
csize = -EFAULT;
}
return csize;
}
static int __init kdump_buf_page_init(void)
{
int ret = 0;
kunmap_local(vaddr);
kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!kdump_buf_page) {
pr_warn("Kdump: Failed to allocate kdump buffer page\n");
ret = -ENOMEM;
}
return ret;
return csize;
}
arch_initcall(kdump_buf_page_init);
......@@ -73,7 +73,6 @@ static inline void ftrace_dyn_arch_init_insns(void)
static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
{
int faulted;
mm_segment_t old_fs;
/* *(unsigned int *)ip = new_code; */
safe_store_code(new_code, ip, faulted);
......@@ -81,10 +80,7 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
if (unlikely(faulted))
return -EFAULT;
old_fs = get_fs();
set_fs(KERNEL_DS);
flush_icache_range(ip, ip + 8);
set_fs(old_fs);
return 0;
}
......
......@@ -349,8 +349,8 @@ NESTED(ejtag_debug_handler, PT_SIZE, sp)
MTC0 k0, CP0_DESAVE
mfc0 k0, CP0_DEBUG
sll k0, k0, 30 # Check for SDBBP.
bgez k0, ejtag_return
andi k0, k0, MIPS_DEBUG_DBP # Check for SDBBP.
beqz k0, ejtag_return
#ifdef CONFIG_SMP
1: PTR_LA k0, ejtag_debug_buffer_spinlock
......
......@@ -93,33 +93,6 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
jr t0
0:
#ifdef CONFIG_USE_OF
#if defined(CONFIG_MIPS_RAW_APPENDED_DTB) || \
defined(CONFIG_MIPS_ELF_APPENDED_DTB)
PTR_LA t2, __appended_dtb
#ifdef CONFIG_CPU_BIG_ENDIAN
li t1, 0xd00dfeed
#else /* !CONFIG_CPU_BIG_ENDIAN */
li t1, 0xedfe0dd0
#endif /* !CONFIG_CPU_BIG_ENDIAN */
lw t0, (t2)
beq t0, t1, dtb_found
#endif /* CONFIG_MIPS_RAW_APPENDED_DTB || CONFIG_MIPS_ELF_APPENDED_DTB */
li t1, -2
move t2, a1
beq a0, t1, dtb_found
#ifdef CONFIG_BUILTIN_DTB
PTR_LA t2, __dtb_start
PTR_LA t1, __dtb_end
bne t1, t2, dtb_found
#endif /* CONFIG_BUILTIN_DTB */
li t2, 0
dtb_found:
#endif /* CONFIG_USE_OF */
PTR_LA t0, __bss_start # clear .bss
LONG_S zero, (t0)
PTR_LA t1, __bss_stop - LONGSIZE
......@@ -133,10 +106,6 @@ dtb_found:
LONG_S a2, fw_arg2
LONG_S a3, fw_arg3
#ifdef CONFIG_USE_OF
LONG_S t2, fw_passed_dtb
#endif
MTC0 zero, CP0_CONTEXT # clear context register
#ifdef CONFIG_64BIT
MTC0 zero, CP0_XCONTEXT
......
......@@ -151,6 +151,7 @@ void __init check_wait(void)
cpu_wait = r39xx_wait;
break;
case CPU_R4200:
/* case CPU_R4300: */
case CPU_R4600:
case CPU_R4640:
case CPU_R4650:
......
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Copyright (C) 2003 Ralf Baechle
*
* Handler for RM7000 extended interrupts. These are a non-standard
* feature so we handle them separately from standard interrupts.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
static inline void unmask_rm7k_irq(struct irq_data *d)
{
set_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE));
}
static inline void mask_rm7k_irq(struct irq_data *d)
{
clear_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE));
}
static struct irq_chip rm7k_irq_controller = {
.name = "RM7000",
.irq_ack = mask_rm7k_irq,
.irq_mask = mask_rm7k_irq,
.irq_mask_ack = mask_rm7k_irq,
.irq_unmask = unmask_rm7k_irq,
.irq_eoi = unmask_rm7k_irq
};
void __init rm7k_cpu_irq_init(void)
{
int base = RM7K_CPU_IRQ_BASE;
int i;
clear_c0_intcontrol(0x00000f00); /* Mask all */
for (i = base; i < base + 4; i++)
irq_set_chip_and_handler(i, &rm7k_irq_controller,
handle_percpu_irq);
}
......@@ -32,7 +32,6 @@
#include <asm/cacheflush.h>
#include <asm/processor.h>
#include <asm/sigcontext.h>
#include <linux/uaccess.h>
#include <asm/irq_regs.h>
static struct hard_trap_info {
......@@ -208,18 +207,6 @@ void arch_kgdb_breakpoint(void)
".set\treorder");
}
void kgdb_call_nmi_hook(void *ignored)
{
mm_segment_t old_fs;
old_fs = get_fs();
set_fs(KERNEL_DS);
kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
set_fs(old_fs);
}
static int compute_signal(int tt)
{
struct hard_trap_info *ht;
......@@ -302,7 +289,6 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
struct die_args *args = (struct die_args *)ptr;
struct pt_regs *regs = args->regs;
int trap = (regs->cp0_cause & 0x7c) >> 2;
mm_segment_t old_fs;
#ifdef CONFIG_KPROBES
/*
......@@ -317,17 +303,11 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
if (user_mode(regs))
return NOTIFY_DONE;
/* Kernel mode. Set correct address limit */
old_fs = get_fs();
set_fs(KERNEL_DS);
if (atomic_read(&kgdb_active) != -1)
kgdb_nmicallback(smp_processor_id(), regs);
if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs)) {
set_fs(old_fs);
if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs))
return NOTIFY_DONE;
}
if (atomic_read(&kgdb_setting_breakpoint))
if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst))
......@@ -337,7 +317,6 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
local_irq_enable();
__flush_cache_all();
set_fs(old_fs);
return NOTIFY_STOP;
}
......
......@@ -40,22 +40,13 @@ void *module_alloc(unsigned long size)
}
#endif
static int apply_r_mips_none(struct module *me, u32 *location,
u32 base, Elf_Addr v, bool rela)
{
return 0;
}
static int apply_r_mips_32(struct module *me, u32 *location,
u32 base, Elf_Addr v, bool rela)
static void apply_r_mips_32(u32 *location, u32 base, Elf_Addr v)
{
*location = base + v;
return 0;
}
static int apply_r_mips_26(struct module *me, u32 *location,
u32 base, Elf_Addr v, bool rela)
static int apply_r_mips_26(struct module *me, u32 *location, u32 base,
Elf_Addr v)
{
if (v % 4) {
pr_err("module %s: dangerous R_MIPS_26 relocation\n",
......@@ -75,8 +66,8 @@ static int apply_r_mips_26(struct module *me, u32 *location,
return 0;
}
static int apply_r_mips_hi16(struct module *me, u32 *location,
u32 base, Elf_Addr v, bool rela)
static int apply_r_mips_hi16(struct module *me, u32 *location, Elf_Addr v,
bool rela)
{
struct mips_hi16 *n;
......@@ -217,26 +208,25 @@ static int apply_r_mips_pc(struct module *me, u32 *location, u32 base,
return 0;
}
static int apply_r_mips_pc16(struct module *me, u32 *location,
u32 base, Elf_Addr v, bool rela)
static int apply_r_mips_pc16(struct module *me, u32 *location, u32 base,
Elf_Addr v)
{
return apply_r_mips_pc(me, location, base, v, 16);
}
static int apply_r_mips_pc21(struct module *me, u32 *location,
u32 base, Elf_Addr v, bool rela)
static int apply_r_mips_pc21(struct module *me, u32 *location, u32 base,
Elf_Addr v)
{
return apply_r_mips_pc(me, location, base, v, 21);
}
static int apply_r_mips_pc26(struct module *me, u32 *location,
u32 base, Elf_Addr v, bool rela)
static int apply_r_mips_pc26(struct module *me, u32 *location, u32 base,
Elf_Addr v)
{
return apply_r_mips_pc(me, location, base, v, 26);
}
static int apply_r_mips_64(struct module *me, u32 *location,
u32 base, Elf_Addr v, bool rela)
static int apply_r_mips_64(u32 *location, Elf_Addr v, bool rela)
{
if (WARN_ON(!rela))
return -EINVAL;
......@@ -246,8 +236,7 @@ static int apply_r_mips_64(struct module *me, u32 *location,
return 0;
}
static int apply_r_mips_higher(struct module *me, u32 *location,
u32 base, Elf_Addr v, bool rela)
static int apply_r_mips_higher(u32 *location, Elf_Addr v, bool rela)
{
if (WARN_ON(!rela))
return -EINVAL;
......@@ -258,8 +247,7 @@ static int apply_r_mips_higher(struct module *me, u32 *location,
return 0;
}
static int apply_r_mips_highest(struct module *me, u32 *location,
u32 base, Elf_Addr v, bool rela)
static int apply_r_mips_highest(u32 *location, Elf_Addr v, bool rela)
{
if (WARN_ON(!rela))
return -EINVAL;
......@@ -272,12 +260,14 @@ static int apply_r_mips_highest(struct module *me, u32 *location,
/**
* reloc_handler() - Apply a particular relocation to a module
* @type: type of the relocation to apply
* @me: the module to apply the reloc to
* @location: the address at which the reloc is to be applied
* @base: the existing value at location for REL-style; 0 for RELA-style
* @v: the value of the reloc, with addend for RELA-style
* @rela: indication of is this a RELA (true) or REL (false) relocation
*
* Each implemented reloc_handler function applies a particular type of
* Each implemented relocation function applies a particular type of
* relocation to the module @me. Relocs that may be found in either REL or RELA
* variants can be handled by making use of the @base & @v parameters which are
* set to values which abstract the difference away from the particular reloc
......@@ -285,23 +275,40 @@ static int apply_r_mips_highest(struct module *me, u32 *location,
*
* Return: 0 upon success, else -ERRNO
*/
typedef int (*reloc_handler)(struct module *me, u32 *location,
u32 base, Elf_Addr v, bool rela);
/* The handlers for known reloc types */
static reloc_handler reloc_handlers[] = {
[R_MIPS_NONE] = apply_r_mips_none,
[R_MIPS_32] = apply_r_mips_32,
[R_MIPS_26] = apply_r_mips_26,
[R_MIPS_HI16] = apply_r_mips_hi16,
[R_MIPS_LO16] = apply_r_mips_lo16,
[R_MIPS_PC16] = apply_r_mips_pc16,
[R_MIPS_64] = apply_r_mips_64,
[R_MIPS_HIGHER] = apply_r_mips_higher,
[R_MIPS_HIGHEST] = apply_r_mips_highest,
[R_MIPS_PC21_S2] = apply_r_mips_pc21,
[R_MIPS_PC26_S2] = apply_r_mips_pc26,
};
static int reloc_handler(u32 type, struct module *me, u32 *location, u32 base,
Elf_Addr v, bool rela)
{
switch (type) {
case R_MIPS_NONE:
break;
case R_MIPS_32:
apply_r_mips_32(location, base, v);
break;
case R_MIPS_26:
return apply_r_mips_26(me, location, base, v);
case R_MIPS_HI16:
return apply_r_mips_hi16(me, location, v, rela);
case R_MIPS_LO16:
return apply_r_mips_lo16(me, location, base, v, rela);
case R_MIPS_PC16:
return apply_r_mips_pc16(me, location, base, v);
case R_MIPS_PC21_S2:
return apply_r_mips_pc21(me, location, base, v);
case R_MIPS_PC26_S2:
return apply_r_mips_pc26(me, location, base, v);
case R_MIPS_64:
return apply_r_mips_64(location, v, rela);
case R_MIPS_HIGHER:
return apply_r_mips_higher(location, v, rela);
case R_MIPS_HIGHEST:
return apply_r_mips_highest(location, v, rela);
default:
pr_err("%s: Unknown relocation type %u\n", me->name, type);
return -EINVAL;
}
return 0;
}
static int __apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
......@@ -311,7 +318,6 @@ static int __apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
Elf_Mips_Rel *rel;
Elf_Mips_Rela *rela;
} r;
reloc_handler handler;
Elf_Sym *sym;
u32 *location, base;
unsigned int i, type;
......@@ -343,17 +349,6 @@ static int __apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
}
type = ELF_MIPS_R_TYPE(*r.rel);
if (type < ARRAY_SIZE(reloc_handlers))
handler = reloc_handlers[type];
else
handler = NULL;
if (!handler) {
pr_err("%s: Unknown relocation type %u\n",
me->name, type);
err = -EINVAL;
goto out;
}
if (rela) {
v = sym->st_value + r.rela->r_addend;
......@@ -365,7 +360,7 @@ static int __apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
r.rel = &r.rel[1];
}
err = handler(me, location, base, v, rela);
err = reloc_handler(type, me, location, base, v, rela);
if (err)
goto out;
}
......
......@@ -1919,19 +1919,22 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
{
unsigned int raw_id = config & 0xff;
unsigned int base_id = raw_id & 0x7f;
unsigned int base_id = config & 0x7f;
unsigned int event_max;
raw_event.cntr_mask = CNTR_ALL;
raw_event.event_id = base_id;
if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
if (base_id > 0x42)
return ERR_PTR(-EOPNOTSUPP);
} else {
if (base_id > 0x3a)
return ERR_PTR(-EOPNOTSUPP);
if (current_cpu_type() == CPU_CAVIUM_OCTEON3)
event_max = 0x5f;
else if (current_cpu_type() == CPU_CAVIUM_OCTEON2)
event_max = 0x42;
else
event_max = 0x3a;
if (base_id > event_max) {
return ERR_PTR(-EOPNOTSUPP);
}
switch (base_id) {
......@@ -1941,7 +1944,7 @@ static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
case 0x1f:
case 0x2f:
case 0x34:
case 0x3b ... 0x3f:
case 0x3e ... 0x3f:
return ERR_PTR(-EOPNOTSUPP);
default:
break;
......@@ -2077,6 +2080,7 @@ init_hw_perf_events(void)
case CPU_CAVIUM_OCTEON:
case CPU_CAVIUM_OCTEON_PLUS:
case CPU_CAVIUM_OCTEON2:
case CPU_CAVIUM_OCTEON3:
mipspmu.name = "octeon";
mipspmu.general_event_map = &octeon_event_map;
mipspmu.cache_event_map = &octeon_cache_map;
......
// SPDX-License-Identifier: GPL-2.0
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Some parts derived from x86 version of this file.
*
* Copyright (C) 2013 Cavium, Inc.
*/
#include <linux/perf_event.h>
#include <asm/ptrace.h>
#ifdef CONFIG_32BIT
u64 perf_reg_abi(struct task_struct *tsk)
{
return PERF_SAMPLE_REGS_ABI_32;
}
#else /* Must be CONFIG_64BIT */
u64 perf_reg_abi(struct task_struct *tsk)
{
if (test_tsk_thread_flag(tsk, TIF_32BIT_REGS))
return PERF_SAMPLE_REGS_ABI_32;
else
return PERF_SAMPLE_REGS_ABI_64;
}
#endif /* CONFIG_32BIT */
int perf_reg_validate(u64 mask)
{
if (!mask)
return -EINVAL;
if (mask & ~((1ull << PERF_REG_MIPS_MAX) - 1))
return -EINVAL;
return 0;
}
u64 perf_reg_value(struct pt_regs *regs, int idx)
{
long v;
switch (idx) {
case PERF_REG_MIPS_PC:
v = regs->cp0_epc;
break;
case PERF_REG_MIPS_R1 ... PERF_REG_MIPS_R25:
v = regs->regs[idx - PERF_REG_MIPS_R1 + 1];
break;
case PERF_REG_MIPS_R28 ... PERF_REG_MIPS_R31:
v = regs->regs[idx - PERF_REG_MIPS_R28 + 28];
break;
default:
WARN_ON_ONCE(1);
return 0;
}
return (s64)v; /* Sign extend if 32-bit. */
}
void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current);
}
......@@ -9,50 +9,35 @@
* Copyright (C) 2004 Thiemo Seufer
* Copyright (C) 2013 Imagination Technologies Ltd.
*/
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task.h>
#include <linux/sched/task_stack.h>
#include <linux/tick.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/export.h>
#include <linux/ptrace.h>
#include <linux/mman.h>
#include <linux/personality.h>
#include <linux/sys.h>
#include <linux/init.h>
#include <linux/completion.h>
#include <linux/kallsyms.h>
#include <linux/random.h>
#include <linux/prctl.h>
#include <linux/kernel.h>
#include <linux/nmi.h>
#include <linux/cpu.h>
#include <linux/personality.h>
#include <linux/prctl.h>
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <asm/abi.h>
#include <asm/asm.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
#include <asm/dsemul.h>
#include <asm/dsp.h>
#include <asm/exec.h>
#include <asm/fpu.h>
#include <asm/inst.h>
#include <asm/irq.h>
#include <asm/mips-cps.h>
#include <asm/irq_regs.h>
#include <asm/isadep.h>
#include <asm/msa.h>
#include <asm/mips-cps.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
#include <asm/reg.h>
#include <linux/uaccess.h>
#include <asm/io.h>
#include <asm/elf.h>
#include <asm/isadep.h>
#include <asm/inst.h>
#include <asm/stacktrace.h>
#include <asm/irq_regs.h>
#include <asm/exec.h>
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
......@@ -205,6 +190,36 @@ struct mips_frame_info {
#define J_TARGET(pc,target) \
(((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
static inline int is_jr_ra_ins(union mips_instruction *ip)
{
#ifdef CONFIG_CPU_MICROMIPS
/*
* jr16 ra
* jr ra
*/
if (mm_insn_16bit(ip->word >> 16)) {
if (ip->mm16_r5_format.opcode == mm_pool16c_op &&
ip->mm16_r5_format.rt == mm_jr16_op &&
ip->mm16_r5_format.imm == 31)
return 1;
return 0;
}
if (ip->r_format.opcode == mm_pool32a_op &&
ip->r_format.func == mm_pool32axf_op &&
((ip->u_format.uimmediate >> 6) & GENMASK(9, 0)) == mm_jalr_op &&
ip->r_format.rt == 31)
return 1;
return 0;
#else
if (ip->r_format.opcode == spec_op &&
ip->r_format.func == jr_op &&
ip->r_format.rs == 31)
return 1;
return 0;
#endif
}
static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
{
#ifdef CONFIG_CPU_MICROMIPS
......@@ -390,10 +405,8 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
static int get_frame_info(struct mips_frame_info *info)
{
bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
union mips_instruction insn, *ip;
const unsigned int max_insns = 128;
union mips_instruction insn, *ip, *ip_end;
unsigned int last_insn_size = 0;
unsigned int i;
bool saw_jump = false;
info->pc_offset = -1;
......@@ -403,7 +416,9 @@ static int get_frame_info(struct mips_frame_info *info)
if (!ip)
goto err;
for (i = 0; i < max_insns; i++) {
ip_end = (void *)ip + (info->func_size ? info->func_size : 512);
while (ip < ip_end) {
ip = (void *)ip + last_insn_size;
if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
......@@ -417,7 +432,9 @@ static int get_frame_info(struct mips_frame_info *info)
last_insn_size = 4;
}
if (!info->frame_size) {
if (is_jr_ra_ins(ip)) {
break;
} else if (!info->frame_size) {
is_sp_move_ins(&insn, &info->frame_size);
continue;
} else if (!saw_jump && is_jump_ins(ip)) {
......
......@@ -70,18 +70,14 @@ static void __init sync_icache(void *kbase, unsigned long kernel_length)
__sync();
}
static int __init apply_r_mips_64_rel(u32 *loc_orig, u32 *loc_new, long offset)
static void __init apply_r_mips_64_rel(u32 *loc_new, long offset)
{
*(u64 *)loc_new += offset;
return 0;
}
static int __init apply_r_mips_32_rel(u32 *loc_orig, u32 *loc_new, long offset)
static void __init apply_r_mips_32_rel(u32 *loc_new, long offset)
{
*loc_new += offset;
return 0;
}
static int __init apply_r_mips_26_rel(u32 *loc_orig, u32 *loc_new, long offset)
......@@ -114,7 +110,8 @@ static int __init apply_r_mips_26_rel(u32 *loc_orig, u32 *loc_new, long offset)
}
static int __init apply_r_mips_hi16_rel(u32 *loc_orig, u32 *loc_new, long offset)
static void __init apply_r_mips_hi16_rel(u32 *loc_orig, u32 *loc_new,
long offset)
{
unsigned long insn = *loc_orig;
unsigned long target = (insn & 0xffff) << 16; /* high 16bits of target */
......@@ -122,17 +119,33 @@ static int __init apply_r_mips_hi16_rel(u32 *loc_orig, u32 *loc_new, long offset
target += offset;
*loc_new = (insn & ~0xffff) | ((target >> 16) & 0xffff);
return 0;
}
static int (*reloc_handlers_rel[]) (u32 *, u32 *, long) __initdata = {
[R_MIPS_64] = apply_r_mips_64_rel,
[R_MIPS_32] = apply_r_mips_32_rel,
[R_MIPS_26] = apply_r_mips_26_rel,
[R_MIPS_HI16] = apply_r_mips_hi16_rel,
};
static int __init reloc_handler(u32 type, u32 *loc_orig, u32 *loc_new,
long offset)
{
switch (type) {
case R_MIPS_64:
apply_r_mips_64_rel(loc_new, offset);
break;
case R_MIPS_32:
apply_r_mips_32_rel(loc_new, offset);
break;
case R_MIPS_26:
return apply_r_mips_26_rel(loc_orig, loc_new, offset);
case R_MIPS_HI16:
apply_r_mips_hi16_rel(loc_orig, loc_new, offset);
break;
default:
pr_err("Unhandled relocation type %d at 0x%pK\n", type,
loc_orig);
return -ENOEXEC;
}
return 0;
}
int __init do_relocations(void *kbase_old, void *kbase_new, long offset)
static int __init do_relocations(void *kbase_old, void *kbase_new, long offset)
{
u32 *r;
u32 *loc_orig;
......@@ -149,14 +162,7 @@ int __init do_relocations(void *kbase_old, void *kbase_new, long offset)
loc_orig = kbase_old + ((*r & 0x00ffffff) << 2);
loc_new = RELOCATED(loc_orig);
if (reloc_handlers_rel[type] == NULL) {
/* Unsupported relocation */
pr_err("Unhandled relocation type %d at 0x%pK\n",
type, loc_orig);
return -ENOEXEC;
}
res = reloc_handlers_rel[type](loc_orig, loc_new, offset);
res = reloc_handler(type, loc_orig, loc_new, offset);
if (res)
return res;
}
......@@ -300,6 +306,13 @@ static inline int __init relocation_addr_valid(void *loc_new)
return 1;
}
static inline void __init update_kaslr_offset(unsigned long *addr, long offset)
{
unsigned long *new_addr = (unsigned long *)RELOCATED(addr);
*new_addr = (unsigned long)offset;
}
#if defined(CONFIG_USE_OF)
void __weak *plat_get_fdt(void)
{
......@@ -410,6 +423,9 @@ void *__init relocate_kernel(void)
/* Return the new kernel's entry point */
kernel_entry = RELOCATED(start_kernel);
/* Error may occur before, so keep it at last */
update_kaslr_offset(&__kaslr_offset, offset);
}
out:
return kernel_entry;
......@@ -418,15 +434,11 @@ void *__init relocate_kernel(void)
/*
* Show relocation information on panic.
*/
void show_kernel_relocation(const char *level)
static void show_kernel_relocation(const char *level)
{
unsigned long offset;
offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
if (IS_ENABLED(CONFIG_RELOCATABLE) && offset > 0) {
if (__kaslr_offset > 0) {
printk(level);
pr_cont("Kernel relocated by 0x%pK\n", (void *)offset);
pr_cont("Kernel relocated by 0x%pK\n", (void *)__kaslr_offset);
pr_cont(" .text @ 0x%pK\n", _text);
pr_cont(" .data @ 0x%pK\n", _sdata);
pr_cont(" .bss @ 0x%pK\n", __bss_start);
......
......@@ -27,8 +27,8 @@
#include <linux/dma-map-ops.h>
#include <linux/decompress/generic.h>
#include <linux/of_fdt.h>
#include <linux/of_reserved_mem.h>
#include <linux/dmi.h>
#include <linux/crash_dump.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
......@@ -37,7 +37,6 @@
#include <asm/cdmm.h>
#include <asm/cpu.h>
#include <asm/debug.h>
#include <asm/dma-coherence.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp-ops.h>
......@@ -84,6 +83,9 @@ static struct resource code_resource = { .name = "Kernel code", };
static struct resource data_resource = { .name = "Kernel data", };
static struct resource bss_resource = { .name = "Kernel bss", };
unsigned long __kaslr_offset __ro_after_init;
EXPORT_SYMBOL(__kaslr_offset);
static void *detect_magic __initdata = detect_memory_region;
#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
......@@ -404,34 +406,32 @@ static int __init early_parse_memmap(char *p)
}
early_param("memmap", early_parse_memmap);
#ifdef CONFIG_PROC_VMCORE
static unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
static int __init early_parse_elfcorehdr(char *p)
static void __init mips_reserve_vmcore(void)
{
#ifdef CONFIG_PROC_VMCORE
phys_addr_t start, end;
u64 i;
setup_elfcorehdr = memparse(p, &p);
for_each_mem_range(i, &start, &end) {
if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
/*
* Reserve from the elf core header to the end of
* the memory segment, that should all be kdump
* reserved memory.
*/
setup_elfcorehdr_size = end - setup_elfcorehdr;
break;
if (!elfcorehdr_size) {
for_each_mem_range(i, &start, &end) {
if (elfcorehdr_addr >= start && elfcorehdr_addr < end) {
/*
* Reserve from the elf core header to the end of
* the memory segment, that should all be kdump
* reserved memory.
*/
elfcorehdr_size = end - elfcorehdr_addr;
break;
}
}
}
/*
* If we don't find it in the memory map, then we shouldn't
* have to worry about it, as the new kernel won't use it.
*/
return 0;
}
early_param("elfcorehdr", early_parse_elfcorehdr);
pr_info("Reserving %ldKB of memory at %ldKB for kdump\n",
(unsigned long)elfcorehdr_size >> 10, (unsigned long)elfcorehdr_addr >> 10);
memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
#endif
}
#ifdef CONFIG_KEXEC
......@@ -653,13 +653,7 @@ static void __init arch_mem_init(char **cmdline_p)
*/
memblock_set_current_limit(PFN_PHYS(max_low_pfn));
#ifdef CONFIG_PROC_VMCORE
if (setup_elfcorehdr && setup_elfcorehdr_size) {
printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
setup_elfcorehdr, setup_elfcorehdr_size);
memblock_reserve(setup_elfcorehdr, setup_elfcorehdr_size);
}
#endif
mips_reserve_vmcore();
mips_parse_crashkernel();
#ifdef CONFIG_KEXEC
......@@ -686,8 +680,6 @@ static void __init arch_mem_init(char **cmdline_p)
memblock_reserve(__pa_symbol(&__nosave_begin),
__pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin));
fdt_init_reserved_mem();
early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
}
......@@ -792,10 +784,6 @@ void __init setup_arch(char **cmdline_p)
unsigned long kernelsp[NR_CPUS];
unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
#ifdef CONFIG_USE_OF
unsigned long fw_passed_dtb;
#endif
#ifdef CONFIG_DEBUG_FS
struct dentry *mips_debugfs_dir;
static int __init debugfs_mips(void)
......@@ -806,15 +794,10 @@ static int __init debugfs_mips(void)
arch_initcall(debugfs_mips);
#endif
#ifdef CONFIG_DMA_MAYBE_COHERENT
/* User defined DMA coherency from command line. */
enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
EXPORT_SYMBOL_GPL(coherentio);
int hw_coherentio; /* Actual hardware supported DMA coherency setting. */
#ifdef CONFIG_DMA_NONCOHERENT
static int __init setcoherentio(char *str)
{
coherentio = IO_COHERENCE_ENABLED;
dma_default_coherent = true;
pr_info("Hardware DMA cache coherency (command line)\n");
return 0;
}
......@@ -822,7 +805,7 @@ early_param("coherentio", setcoherentio);
static int __init setnocoherentio(char *str)
{
coherentio = IO_COHERENCE_DISABLED;
dma_default_coherent = true;
pr_info("Software DMA cache coherency (command line)\n");
return 0;
}
......
......@@ -451,9 +451,6 @@ static int cps_cpu_disable(void)
unsigned cpu = smp_processor_id();
struct core_boot_config *core_cfg;
if (!cpu)
return -EBUSY;
if (!cps_pm_support_state(CPS_PM_POWER_GATED))
return -EINVAL;
......
......@@ -59,7 +59,7 @@ static DECLARE_COMPLETION(cpu_starting);
static DECLARE_COMPLETION(cpu_running);
/*
* A logcal cpu mask containing only one VPE per core to
* A logical cpu mask containing only one VPE per core to
* reduce the number of IPIs on large MT systems.
*/
cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
......@@ -510,8 +510,8 @@ static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
* address spaces, a new context is obtained on the current cpu, and tlb
* context on other cpus are invalidated to force a new context allocation
* at switch_mm time, should the mm ever be used on other cpus. For
* multithreaded address spaces, intercpu interrupts have to be sent.
* Another case where intercpu interrupts are required is when the target
* multithreaded address spaces, inter-CPU interrupts have to be sent.
* Another case where inter-CPU interrupts are required is when the target
* mm might be active on another cpu (eg debuggers doing the flushes on
* behalf of debugees, kswapd stealing pages from another process etc).
* Kanoj 07/00.
......
......@@ -44,17 +44,17 @@ $(uapi)/unistd_o32.h: $(syscallo32) $(syshdr)
sysnr_pfx_unistd_nr_n32 := N32
sysnr_offset_unistd_nr_n32 := 6000
$(uapi)/unistd_nr_n32.h: $(syscalln32) $(sysnr)
$(kapi)/unistd_nr_n32.h: $(syscalln32) $(sysnr)
$(call if_changed,sysnr)
sysnr_pfx_unistd_nr_n64 := 64
sysnr_offset_unistd_nr_n64 := 5000
$(uapi)/unistd_nr_n64.h: $(syscalln64) $(sysnr)
$(kapi)/unistd_nr_n64.h: $(syscalln64) $(sysnr)
$(call if_changed,sysnr)
sysnr_pfx_unistd_nr_o32 := O32
sysnr_offset_unistd_nr_o32 := 4000
$(uapi)/unistd_nr_o32.h: $(syscallo32) $(sysnr)
$(kapi)/unistd_nr_o32.h: $(syscallo32) $(sysnr)
$(call if_changed,sysnr)
systbl_abi_syscall_table_32_o32 := 32_o32
......@@ -79,14 +79,14 @@ $(kapi)/syscall_table_64_o32.h: $(syscallo32) $(systbl)
uapisyshdr-y += unistd_n32.h \
unistd_n64.h \
unistd_o32.h \
unistd_nr_n32.h \
unistd_nr_n64.h \
unistd_nr_o32.h
unistd_o32.h
kapisyshdr-y += syscall_table_32_o32.h \
syscall_table_64_n32.h \
syscall_table_64_n64.h \
syscall_table_64_o32.h
syscall_table_64_o32.h \
unistd_nr_n32.h \
unistd_nr_n64.h \
unistd_nr_o32.h
targets += $(uapisyshdr-y) $(kapisyshdr-y)
......
......@@ -66,9 +66,10 @@ SECTIONS
KPROBES_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
*(.text.*)
*(.fixup)
*(.gnu.warning)
. = ALIGN(16);
*(.got) /* Global offset table */
} :text = 0
_etext = .; /* End of text section */
......@@ -90,6 +91,7 @@ SECTIONS
INIT_TASK_DATA(THREAD_SIZE)
NOSAVE_DATA
PAGE_ALIGNED_DATA(PAGE_SIZE)
CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
DATA_DATA
......@@ -137,6 +139,11 @@ SECTIONS
PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
#endif
.rel.dyn : ALIGN(8) {
*(.rel)
*(.rel*)
}
#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
.appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) {
*(.appended_dtb)
......@@ -220,9 +227,9 @@ SECTIONS
/* ABI crap starts here */
*(.MIPS.abiflags)
*(.MIPS.options)
*(.gnu.attributes)
*(.options)
*(.pdr)
*(.reginfo)
*(.eh_frame)
}
}
......@@ -117,8 +117,8 @@ int __init vpe_module_init(void)
}
device_initialize(&vpe_device);
vpe_device.class = &vpe_class,
vpe_device.parent = NULL,
vpe_device.class = &vpe_class;
vpe_device.parent = NULL;
dev_set_name(&vpe_device, "vpe_sp");
vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR);
err = device_add(&vpe_device);
......
......@@ -365,8 +365,8 @@ int __init vpe_module_init(void)
}
device_initialize(&vpe_device);
vpe_device.class = &vpe_class,
vpe_device.parent = NULL,
vpe_device.class = &vpe_class;
vpe_device.parent = NULL;
dev_set_name(&vpe_device, "vpe1");
vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR);
err = device_add(&vpe_device);
......
......@@ -746,28 +746,12 @@ static int vpe_elfload(struct vpe *v)
return 0;
}
static int getcwd(char *buff, int size)
{
mm_segment_t old_fs;
int ret;
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_getcwd(buff, size);
set_fs(old_fs);
return ret;
}
/* checks VPE is unused and gets ready to load program */
static int vpe_open(struct inode *inode, struct file *filp)
{
enum vpe_state state;
struct vpe_notifications *notifier;
struct vpe *v;
int ret;
if (VPE_MODULE_MINOR != iminor(inode)) {
/* assume only 1 device at the moment. */
......@@ -803,12 +787,6 @@ static int vpe_open(struct inode *inode, struct file *filp)
v->plen = P_SIZE;
v->load_addr = NULL;
v->len = 0;
v->cwd[0] = 0;
ret = getcwd(v->cwd, VPE_PATH_MAX);
if (ret < 0)
pr_warn("VPE loader: open, getcwd returned %d\n", ret);
v->shared_ptr = NULL;
v->__start = 0;
......@@ -915,17 +893,6 @@ int vpe_notify(int index, struct vpe_notifications *notify)
}
EXPORT_SYMBOL(vpe_notify);
char *vpe_getcwd(int index)
{
struct vpe *v = get_vpe(index);
if (v == NULL)
return NULL;
return v->cwd;
}
EXPORT_SYMBOL(vpe_getcwd);
module_init(vpe_module_init);
module_exit(vpe_module_exit);
MODULE_DESCRIPTION("MIPS VPE Loader");
......
......@@ -148,7 +148,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
default:
/* Unsupported KVM type */
return -EINVAL;
};
}
/* Allocate page table to map GPA -> RPA */
kvm->arch.gpa_mm.pgd = kvm_pgd_alloc();
......
......@@ -8,6 +8,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
......@@ -302,7 +303,7 @@ static void ltq_hw_irq_handler(struct irq_desc *desc)
generic_handle_irq(irq_linear_revmap(ltq_domain, hwirq));
/* if this is a EBU irq, we need to ack it or get a deadlock */
if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
if (irq == LTQ_ICU_EBU_IRQ && !module && LTQ_EBU_PCC_ISTAT != 0)
ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
LTQ_EBU_PCC_ISTAT);
}
......@@ -422,12 +423,9 @@ unsigned int get_c0_compare_int(void)
return CP0_LEGACY_COMPARE_IRQ;
}
static const struct of_device_id of_irq_ids[] __initconst = {
{ .compatible = "lantiq,icu", .data = icu_of_init },
{},
};
IRQCHIP_DECLARE(lantiq_icu, "lantiq,icu", icu_of_init);
void __init arch_init_irq(void)
{
of_irq_init(of_irq_ids);
irqchip_init();
}
......@@ -44,10 +44,6 @@ int ltq_soc_type(void)
return soc_info.type;
}
void __init prom_free_prom_memory(void)
{
}
static void __init prom_init_cmdline(void)
{
int argc = fw_arg0;
......@@ -77,11 +73,8 @@ void __init plat_mem_setup(void)
set_io_port_base((unsigned long) KSEG1);
if (fw_passed_dtb) /* UHI interface */
dtb = (void *)fw_passed_dtb;
else if (__dtb_start != __dtb_end)
dtb = (void *)__dtb_start;
else
dtb = get_fdt();
if (dtb == NULL)
panic("no dtb found");
/*
......
......@@ -19,7 +19,6 @@ unsigned long __maybe_unused _loongson_addrwincfg_base;
static void __init mips_nmi_setup(void)
{
void *base;
extern char except_vec_nmi[];
base = (void *)(CAC_BASE + 0x380);
memcpy(base, except_vec_nmi, 0x80);
......@@ -46,7 +45,3 @@ void __init prom_init(void)
prom_init_uart_base();
board_nmi_handler_setup = mips_nmi_setup;
}
void __init prom_free_prom_memory(void)
{
}
......@@ -41,14 +41,3 @@ void __init prom_init_memory(void)
memblock_add(LOONGSON_HIGHMEM_START, highmemsize << 20);
#endif /* !CONFIG_64BIT */
}
/* override of arch/mips/mm/cache.c: __uncached_access */
int __uncached_access(struct file *file, unsigned long addr)
{
if (file->f_flags & O_DSYNC)
return 1;
return addr >= __pa(high_memory) ||
((addr >= LOONGSON_MMIO_MEM_START) &&
(addr < LOONGSON_MMIO_MEM_END));
}
......@@ -36,10 +36,6 @@ void __init prom_init(void)
setup_8250_early_printk_port((unsigned long)uart_base, 0, 0);
}
void __init prom_free_prom_memory(void)
{
}
void __init plat_mem_setup(void)
{
memblock_add(0x0, (memsize << 20));
......
......@@ -5,28 +5,6 @@
cflags-$(CONFIG_CPU_LOONGSON64) += -Wa,--trap
#
# Some versions of binutils, not currently mainline as of 2019/02/04, support
# an -mfix-loongson3-llsc flag which emits a sync prior to each ll instruction
# to work around a CPU bug (see __SYNC_loongson3_war in asm/sync.h for a
# description).
#
# We disable this in order to prevent the assembler meddling with the
# instruction that labels refer to, ie. if we label an ll instruction:
#
# 1: ll v0, 0(a0)
#
# ...then with the assembler fix applied the label may actually point at a sync
# instruction inserted by the assembler, and if we were using the label in an
# exception table the table would no longer contain the address of the ll
# instruction.
#
# Avoid this by explicitly disabling that assembler behaviour. If upstream
# binutils does not merge support for the flag then we can revisit & remove
# this later - for now it ensures vendor toolchains don't cause problems.
#
cflags-$(CONFIG_CPU_LOONGSON64) += $(call as-option,-Wa$(comma)-mno-fix-loongson3-llsc,)
#
# binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a
# as MIPS64 R2; older versions as just R1. This leaves the possibility open
......
......@@ -25,7 +25,6 @@ u32 node_id_offset;
static void __init mips_nmi_setup(void)
{
void *base;
extern char except_vec_nmi[];
base = (void *)(CAC_BASE + 0x380);
memcpy(base, except_vec_nmi, 0x80);
......@@ -47,6 +46,51 @@ void virtual_early_config(void)
node_id_offset = 44;
}
void __init szmem(unsigned int node)
{
u32 i, mem_type;
static unsigned long num_physpages;
u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size;
/* Parse memory information and activate */
for (i = 0; i < loongson_memmap->nr_map; i++) {
node_id = loongson_memmap->map[i].node_id;
if (node_id != node)
continue;
mem_type = loongson_memmap->map[i].mem_type;
mem_size = loongson_memmap->map[i].mem_size;
mem_start = loongson_memmap->map[i].mem_start;
switch (mem_type) {
case SYSTEM_RAM_LOW:
case SYSTEM_RAM_HIGH:
start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT;
node_psize = (mem_size << 20) >> PAGE_SHIFT;
end_pfn = start_pfn + node_psize;
num_physpages += node_psize;
pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
(u32)node_id, mem_type, mem_start, mem_size);
pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
start_pfn, end_pfn, num_physpages);
memblock_add_node(PFN_PHYS(start_pfn), PFN_PHYS(node_psize), node);
break;
case SYSTEM_RAM_RESERVED:
pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
(u32)node_id, mem_type, mem_start, mem_size);
memblock_reserve(((node_id << 44) + mem_start), mem_size << 20);
break;
}
}
}
#ifndef CONFIG_NUMA
static void __init prom_init_memory(void)
{
szmem(0);
}
#endif
void __init prom_init(void)
{
fw_init_cmdline();
......@@ -57,7 +101,11 @@ void __init prom_init(void)
loongson_sysconf.early_config();
#ifdef CONFIG_NUMA
prom_init_numa_memory();
#else
prom_init_memory();
#endif
/* Hardcode to CPU UART 0 */
setup_8250_early_printk_port(TO_UNCAC(LOONGSON_REG_BASE + 0x1e0), 0, 1024);
......@@ -66,10 +114,6 @@ void __init prom_init(void)
board_nmi_handler_setup = mips_nmi_setup;
}
void __init prom_free_prom_memory(void)
{
}
static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, resource_size_t hw_start,
resource_size_t size)
{
......
......@@ -25,6 +25,7 @@
#include <asm/time.h>
#include <asm/wbflush.h>
#include <boot_param.h>
#include <loongson.h>
static struct pglist_data prealloc__node_data[MAX_NUMNODES];
unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
......@@ -81,57 +82,6 @@ static void __init init_topology_matrix(void)
}
}
static void __init szmem(unsigned int node)
{
u32 i, mem_type;
static unsigned long num_physpages;
u64 node_id, node_psize, start_pfn, end_pfn, mem_start, mem_size;
/* Parse memory information and activate */
for (i = 0; i < loongson_memmap->nr_map; i++) {
node_id = loongson_memmap->map[i].node_id;
if (node_id != node)
continue;
mem_type = loongson_memmap->map[i].mem_type;
mem_size = loongson_memmap->map[i].mem_size;
mem_start = loongson_memmap->map[i].mem_start;
switch (mem_type) {
case SYSTEM_RAM_LOW:
start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT;
node_psize = (mem_size << 20) >> PAGE_SHIFT;
end_pfn = start_pfn + node_psize;
num_physpages += node_psize;
pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
(u32)node_id, mem_type, mem_start, mem_size);
pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
start_pfn, end_pfn, num_physpages);
memblock_add_node(PFN_PHYS(start_pfn),
PFN_PHYS(node_psize), node);
break;
case SYSTEM_RAM_HIGH:
start_pfn = ((node_id << 44) + mem_start) >> PAGE_SHIFT;
node_psize = (mem_size << 20) >> PAGE_SHIFT;
end_pfn = start_pfn + node_psize;
num_physpages += node_psize;
pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
(u32)node_id, mem_type, mem_start, mem_size);
pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
start_pfn, end_pfn, num_physpages);
memblock_add_node(PFN_PHYS(start_pfn),
PFN_PHYS(node_psize), node);
break;
case SYSTEM_RAM_RESERVED:
pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx MB\n",
(u32)node_id, mem_type, mem_start, mem_size);
memblock_reserve(((node_id << 44) + mem_start),
mem_size << 20);
break;
}
}
}
static void __init node_mem_init(unsigned int node)
{
unsigned long node_addrspace_offset;
......
......@@ -483,7 +483,8 @@ static void __init loongson3_smp_setup(void)
init_cpu_possible(cpu_none_mask);
/* For unified kernel, NR_CPUS is the maximum possible value,
* loongson_sysconf.nr_cpus is the really present value */
* loongson_sysconf.nr_cpus is the really present value
*/
while (i < loongson_sysconf.nr_cpus) {
if (loongson_sysconf.reserved_cpus_mask & (1<<i)) {
/* Reserved physical CPU cores */
......@@ -492,6 +493,8 @@ static void __init loongson3_smp_setup(void)
__cpu_number_map[i] = num;
__cpu_logical_map[num] = i;
set_cpu_possible(num, true);
/* Loongson processors are always grouped by 4 */
cpu_set_cluster(&cpu_data[num], i / 4);
num++;
}
i++;
......@@ -567,7 +570,8 @@ static void loongson3_cpu_die(unsigned int cpu)
/* To shutdown a core in Loongson 3, the target core should go to CKSEG1 and
* flush all L1 entries at first. Then, another core (usually Core 0) can
* safely disable the clock of the target core. loongson3_play_dead() is
* called via CKSEG1 (uncached and unmmaped) */
* called via CKSEG1 (uncached and unmmaped)
*/
static void loongson3_type1_play_dead(int *state_addr)
{
register int val;
......
......@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/bitops.h>
#include <linux/dma-map-ops.h> /* for dma_default_coherent */
#include <asm/bcache.h>
#include <asm/bootinfo.h>
......@@ -35,7 +36,6 @@
#include <asm/war.h>
#include <asm/cacheflush.h> /* for run_uncached() */
#include <asm/traps.h>
#include <asm/dma-coherence.h>
#include <asm/mips-cps.h>
/*
......@@ -1164,6 +1164,7 @@ static void probe_pcache(void)
case CPU_R4400PC:
case CPU_R4400SC:
case CPU_R4400MC:
case CPU_R4300:
icache_size = 1 << (12 + ((config & CONF_IC) >> 9));
c->icache.linesz = 16 << ((config & CONF_IB) >> 5);
c->icache.ways = 1;
......@@ -1593,7 +1594,7 @@ static int probe_scache(void)
return 1;
}
static void __init loongson2_sc_init(void)
static void loongson2_sc_init(void)
{
struct cpuinfo_mips *c = &current_cpu_data;
......@@ -1913,15 +1914,11 @@ void r4k_cache_init(void)
__local_flush_icache_user_range = local_r4k_flush_icache_user_range;
#ifdef CONFIG_DMA_NONCOHERENT
#ifdef CONFIG_DMA_MAYBE_COHERENT
if (coherentio == IO_COHERENCE_ENABLED ||
(coherentio == IO_COHERENCE_DEFAULT && hw_coherentio)) {
if (dma_default_coherent) {
_dma_cache_wback_inv = (void *)cache_noop;
_dma_cache_wback = (void *)cache_noop;
_dma_cache_inv = (void *)cache_noop;
} else
#endif /* CONFIG_DMA_MAYBE_COHERENT */
{
} else {
_dma_cache_wback_inv = r4k_dma_cache_wback_inv;
_dma_cache_wback = r4k_dma_cache_wback_inv;
_dma_cache_inv = r4k_dma_cache_inv;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment